id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1627321 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils import _wqlutils
from os_win.utils.compute import vmutils
from os_win.utils.metrics import metricsutils
from os_win import utilsfactory
class MetricsUtilsTestCase(test_base.OsWinBaseTestCase):
"""Unit tests for the Hyper-V MetricsUtils class."""
_FAKE_RET_VAL = 0
_FAKE_PORT = "fake's port name"
_autospec_classes = [
vmutils.VMUtils,
]
def setUp(self):
super(MetricsUtilsTestCase, self).setUp()
mock.patch.object(utilsfactory, 'get_vmutils',
mock.Mock(return_value=vmutils.VMUtils)).start()
self.utils = metricsutils.MetricsUtils()
self.utils._conn_attr = mock.MagicMock()
def test_cache_metrics_defs(self):
mock_metric_def = mock.Mock(ElementName=mock.sentinel.elementname)
self.utils._conn.CIM_BaseMetricDefinition.return_value = [
mock_metric_def]
self.utils._cache_metrics_defs()
expected_cache_metrics = {mock.sentinel.elementname: mock_metric_def}
self.assertEqual(expected_cache_metrics, self.utils._metrics_defs_obj)
@mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
def test_enable_vm_metrics_collection(
self, mock_get_vm, mock_get_vm_resources, mock_enable_metrics):
mock_vm = mock_get_vm.return_value
mock_disk = mock.MagicMock()
mock_dvd = mock.MagicMock(
ResourceSubType=self.utils._DVD_DISK_RES_SUB_TYPE)
mock_get_vm_resources.return_value = [mock_disk, mock_dvd]
self.utils.enable_vm_metrics_collection(mock.sentinel.vm_name)
metrics_names = [self.utils._CPU_METRICS,
self.utils._MEMORY_METRICS]
mock_enable_metrics.assert_has_calls(
[mock.call(mock_disk), mock.call(mock_vm, metrics_names)])
@mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics')
def test_enable_disk_metrics_collection(self, mock_enable_metrics):
mock_get_disk = (
self.utils._vmutils._get_mounted_disk_resource_from_path)
self.utils.enable_disk_metrics_collection(
mock.sentinel.disk_path,
mock.sentinel.is_physical,
mock.sentinel.serial)
mock_get_disk.assert_called_once_with(
mock.sentinel.disk_path,
is_physical=mock.sentinel.is_physical,
serial=mock.sentinel.serial)
mock_enable_metrics.assert_called_once_with(mock_get_disk.return_value)
@mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics')
@mock.patch.object(metricsutils.MetricsUtils, '_get_switch_port')
def test_enable_switch_port_metrics_collection(self, mock_get_port,
mock_enable_metrics):
self.utils.enable_port_metrics_collection(mock.sentinel.port_name)
mock_get_port.assert_called_once_with(mock.sentinel.port_name)
metrics = [self.utils._NET_IN_METRICS,
self.utils._NET_OUT_METRICS]
mock_enable_metrics.assert_called_once_with(
mock_get_port.return_value, metrics)
def _check_enable_metrics(self, metrics=None, definition=None):
mock_element = mock.MagicMock()
self.utils._metrics_svc.ControlMetrics.return_value = [0]
self.utils._enable_metrics(mock_element, metrics)
self.utils._metrics_svc.ControlMetrics.assert_called_once_with(
Subject=mock_element.path_.return_value,
Definition=definition,
MetricCollectionEnabled=self.utils._METRICS_ENABLED)
def test_enable_metrics_no_metrics(self):
self._check_enable_metrics()
def test_enable_metrics(self):
metrics_name = self.utils._CPU_METRICS
metrics_def = mock.MagicMock()
self.utils._metrics_defs_obj = {metrics_name: metrics_def}
self._check_enable_metrics([metrics_name, mock.sentinel.metrics_name],
metrics_def.path_.return_value)
def test_enable_metrics_exception(self):
metric_name = self.utils._CPU_METRICS
metric_def = mock.MagicMock()
self.utils._metrics_defs_obj = {metric_name: metric_def}
self.utils._metrics_svc.ControlMetrics.return_value = [1]
self.assertRaises(exceptions.OSWinException,
self.utils._enable_metrics,
mock.MagicMock(),
[metric_name])
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
def test_get_cpu_metrics(self, mock_get_vm, mock_get_vm_resources,
mock_get_metrics):
fake_cpu_count = 2
fake_uptime = 1000
fake_cpu_metrics_val = 2000
self.utils._metrics_defs_obj = {
self.utils._CPU_METRICS: mock.sentinel.metrics}
mock_vm = mock_get_vm.return_value
mock_vm.OnTimeInMilliseconds = fake_uptime
mock_cpu = mock.MagicMock(VirtualQuantity=fake_cpu_count)
mock_get_vm_resources.return_value = [mock_cpu]
mock_metric = mock.MagicMock(MetricValue=fake_cpu_metrics_val)
mock_get_metrics.return_value = [mock_metric]
cpu_metrics = self.utils.get_cpu_metrics(mock.sentinel.vm_name)
self.assertEqual(3, len(cpu_metrics))
self.assertEqual(fake_cpu_metrics_val, cpu_metrics[0])
self.assertEqual(fake_cpu_count, cpu_metrics[1])
self.assertEqual(fake_uptime, cpu_metrics[2])
mock_get_vm.assert_called_once_with(mock.sentinel.vm_name)
mock_get_vm_resources.assert_called_once_with(
mock.sentinel.vm_name, self.utils._PROCESSOR_SETTING_DATA_CLASS)
mock_get_metrics.assert_called_once_with(mock_vm,
mock.sentinel.metrics)
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
def test_get_memory_metrics(self, mock_get_vm, mock_get_metrics):
mock_vm = mock_get_vm.return_value
self.utils._metrics_defs_obj = {
self.utils._MEMORY_METRICS: mock.sentinel.metrics}
metrics_memory = mock.MagicMock()
metrics_memory.MetricValue = 3
mock_get_metrics.return_value = [metrics_memory]
response = self.utils.get_memory_metrics(mock.sentinel.vm_name)
self.assertEqual(3, response)
mock_get_vm.assert_called_once_with(mock.sentinel.vm_name)
mock_get_metrics.assert_called_once_with(mock_vm,
mock.sentinel.metrics)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(metricsutils.MetricsUtils,
'_sum_metrics_values_by_defs')
@mock.patch.object(metricsutils.MetricsUtils,
'_get_metrics_value_instances')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
def test_get_vnic_metrics(self, mock_get_vm_resources,
mock_get_value_instances, mock_sum_by_defs,
mock_get_element_associated_class):
fake_rx_mb = 1000
fake_tx_mb = 2000
self.utils._metrics_defs_obj = {
self.utils._NET_IN_METRICS: mock.sentinel.net_in_metrics,
self.utils._NET_OUT_METRICS: mock.sentinel.net_out_metrics}
mock_port = mock.MagicMock(Parent=mock.sentinel.vnic_path)
mock_vnic = mock.MagicMock(ElementName=mock.sentinel.element_name,
Address=mock.sentinel.address)
mock_vnic.path_.return_value = mock.sentinel.vnic_path
mock_get_vm_resources.side_effect = [[mock_port], [mock_vnic]]
mock_sum_by_defs.return_value = [fake_rx_mb, fake_tx_mb]
vnic_metrics = list(
self.utils.get_vnic_metrics(mock.sentinel.vm_name))
self.assertEqual(1, len(vnic_metrics))
self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb'])
self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb'])
self.assertEqual(mock.sentinel.element_name,
vnic_metrics[0]['element_name'])
self.assertEqual(mock.sentinel.address, vnic_metrics[0]['address'])
mock_get_vm_resources.assert_has_calls([
mock.call(mock.sentinel.vm_name, self.utils._PORT_ALLOC_SET_DATA),
mock.call(mock.sentinel.vm_name,
self.utils._SYNTH_ETH_PORT_SET_DATA)])
mock_get_value_instances.assert_called_once_with(
mock_get_element_associated_class.return_value,
self.utils._BASE_METRICS_VALUE)
mock_sum_by_defs.assert_called_once_with(
mock_get_value_instances.return_value,
[mock.sentinel.net_in_metrics, mock.sentinel.net_out_metrics])
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
def test_get_disk_metrics(self, mock_get_vm_resources,
mock_get_metrics_values):
fake_read_mb = 1000
fake_write_mb = 2000
self.utils._metrics_defs_obj = {
self.utils._DISK_RD_METRICS: mock.sentinel.disk_rd_metrics,
self.utils._DISK_WR_METRICS: mock.sentinel.disk_wr_metrics}
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
InstanceID=mock.sentinel.instance_id)
mock_get_vm_resources.return_value = [mock_disk]
mock_get_metrics_values.return_value = [fake_read_mb, fake_write_mb]
disk_metrics = list(
self.utils.get_disk_metrics(mock.sentinel.vm_name))
self.assertEqual(1, len(disk_metrics))
self.assertEqual(fake_read_mb, disk_metrics[0]['read_mb'])
self.assertEqual(fake_write_mb, disk_metrics[0]['write_mb'])
self.assertEqual(mock.sentinel.instance_id,
disk_metrics[0]['instance_id'])
self.assertEqual(mock.sentinel.host_resource,
disk_metrics[0]['host_resource'])
mock_get_vm_resources.assert_called_once_with(
mock.sentinel.vm_name,
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
metrics = [mock.sentinel.disk_rd_metrics,
mock.sentinel.disk_wr_metrics]
mock_get_metrics_values.assert_called_once_with(mock_disk, metrics)
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
def test_get_disk_latency_metrics(self, mock_get_vm_resources,
mock_get_metrics_values):
self.utils._metrics_defs_obj = {
self.utils._DISK_LATENCY_METRICS: mock.sentinel.metrics}
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
InstanceID=mock.sentinel.instance_id)
mock_get_vm_resources.return_value = [mock_disk]
mock_get_metrics_values.return_value = [mock.sentinel.latency]
disk_metrics = list(
self.utils.get_disk_latency_metrics(mock.sentinel.vm_name))
self.assertEqual(1, len(disk_metrics))
self.assertEqual(mock.sentinel.latency,
disk_metrics[0]['disk_latency'])
self.assertEqual(mock.sentinel.instance_id,
disk_metrics[0]['instance_id'])
mock_get_vm_resources.assert_called_once_with(
mock.sentinel.vm_name,
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
mock_get_metrics_values.assert_called_once_with(
mock_disk, [mock.sentinel.metrics])
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
def test_get_disk_iops_metrics(self, mock_get_vm_resources,
mock_get_metrics_values):
self.utils._metrics_defs_obj = {
self.utils._DISK_IOPS_METRICS: mock.sentinel.metrics}
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
InstanceID=mock.sentinel.instance_id)
mock_get_vm_resources.return_value = [mock_disk]
mock_get_metrics_values.return_value = [mock.sentinel.iops]
disk_metrics = list(
self.utils.get_disk_iops_count(mock.sentinel.vm_name))
self.assertEqual(1, len(disk_metrics))
self.assertEqual(mock.sentinel.iops,
disk_metrics[0]['iops_count'])
self.assertEqual(mock.sentinel.instance_id,
disk_metrics[0]['instance_id'])
mock_get_vm_resources.assert_called_once_with(
mock.sentinel.vm_name,
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
mock_get_metrics_values.assert_called_once_with(
mock_disk, [mock.sentinel.metrics])
def test_sum_metrics_values(self):
mock_metric = mock.MagicMock(MetricValue='100')
result = self.utils._sum_metrics_values([mock_metric] * 2)
self.assertEqual(200, result)
def test_sum_metrics_values_by_defs(self):
mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id,
MetricValue='100')
mock_metric_useless = mock.MagicMock(MetricValue='200')
mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id)
result = self.utils._sum_metrics_values_by_defs(
[mock_metric, mock_metric_useless], [None, mock_metric_def])
self.assertEqual([0, 100], result)
def test_get_metrics_value_instances(self):
FAKE_CLASS_NAME = "FAKE_CLASS"
mock_el_metric = mock.MagicMock()
mock_el_metric_2 = mock.MagicMock()
mock_el_metric_2.path.return_value = mock.Mock(Class=FAKE_CLASS_NAME)
self.utils._conn.Msvm_MetricForME.side_effect = [
[], [mock.Mock(Dependent=mock_el_metric_2)]]
returned = self.utils._get_metrics_value_instances(
[mock_el_metric, mock_el_metric_2], FAKE_CLASS_NAME)
expected_return = [mock_el_metric_2]
self.assertEqual(expected_return, returned)
@mock.patch.object(metricsutils.MetricsUtils,
'_sum_metrics_values_by_defs')
def test_get_metrics_values(self, mock_sum_by_defs):
mock_element = mock.MagicMock()
self.utils._conn.Msvm_MetricForME.return_value = [
mock.Mock(Dependent=mock.sentinel.metric),
mock.Mock(Dependent=mock.sentinel.another_metric)]
resulted_metrics_sum = self.utils._get_metrics_values(
mock_element, mock.sentinel.metrics_defs)
self.utils._conn.Msvm_MetricForME.assert_called_once_with(
Antecedent=mock_element.path_.return_value)
mock_sum_by_defs.assert_called_once_with(
[mock.sentinel.metric, mock.sentinel.another_metric],
mock.sentinel.metrics_defs)
expected_metrics_sum = mock_sum_by_defs.return_value
self.assertEqual(expected_metrics_sum, resulted_metrics_sum)
@mock.patch.object(metricsutils.MetricsUtils, '_filter_metrics')
def test_get_metrics(self, mock_filter_metrics):
mock_metric = mock.MagicMock()
mock_element = mock.MagicMock()
self.utils._conn.Msvm_MetricForME.return_value = [mock_metric]
result = self.utils._get_metrics(mock_element,
mock.sentinel.metrics_def)
self.assertEqual(mock_filter_metrics.return_value, result)
self.utils._conn.Msvm_MetricForME.assert_called_once_with(
Antecedent=mock_element.path_.return_value)
mock_filter_metrics.assert_called_once_with(
[mock_metric.Dependent],
mock.sentinel.metrics_def)
def test_filter_metrics(self):
mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id)
mock_bad_metric = mock.MagicMock()
mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id)
result = self.utils._filter_metrics([mock_bad_metric, mock_metric],
mock_metric_def)
self.assertEqual([mock_metric], result)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_setting_data')
def test_get_vm_resources(self, mock_get_vm_setting_data,
mock_get_element_associated_class):
result = self.utils._get_vm_resources(mock.sentinel.vm_name,
mock.sentinel.resource_class)
mock_get_vm_setting_data.assert_called_once_with(mock.sentinel.vm_name)
vm_setting_data = mock_get_vm_setting_data.return_value
mock_get_element_associated_class.assert_called_once_with(
self.utils._conn, mock.sentinel.resource_class,
element_instance_id=vm_setting_data.InstanceID)
self.assertEqual(mock_get_element_associated_class.return_value,
result)
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
def test_get_vm(self, mock_unique_result):
result = self.utils._get_vm(mock.sentinel.vm_name)
self.assertEqual(mock_unique_result.return_value, result)
conn_class = self.utils._conn.Msvm_ComputerSystem
conn_class.assert_called_once_with(ElementName=mock.sentinel.vm_name)
mock_unique_result.assert_called_once_with(conn_class.return_value,
mock.sentinel.vm_name)
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
def test_get_switch_port(self, mock_unique_result):
result = self.utils._get_switch_port(mock.sentinel.port_name)
self.assertEqual(mock_unique_result.return_value, result)
conn_class = self.utils._conn.Msvm_EthernetPortAllocationSettingData
conn_class.assert_called_once_with(ElementName=mock.sentinel.port_name)
mock_unique_result.assert_called_once_with(conn_class.return_value,
mock.sentinel.port_name)
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
def test_get_vm_setting_data(self, mock_unique_result):
result = self.utils._get_vm_setting_data(mock.sentinel.vm_name)
self.assertEqual(mock_unique_result.return_value, result)
conn_class = self.utils._conn.Msvm_VirtualSystemSettingData
conn_class.assert_called_once_with(ElementName=mock.sentinel.vm_name)
mock_unique_result.assert_called_once_with(conn_class.return_value,
mock.sentinel.vm_name)
def test_unique_result_not_found(self):
self.assertRaises(exceptions.NotFound,
self.utils._unique_result,
[], mock.sentinel.resource_name)
def test_unique_result_duplicate(self):
self.assertRaises(exceptions.OSWinException,
self.utils._unique_result,
[mock.ANY, mock.ANY], mock.sentinel.resource_name)
def test_unique_result(self):
result = self.utils._unique_result([mock.sentinel.obj],
mock.sentinel.resource_name)
self.assertEqual(mock.sentinel.obj, result)
| StarcoderdataPython |
1682861 | <reponame>huntc/conductr-cli<filename>typesafe_conductr_cli/test/test_conduct_run.py
from unittest import TestCase
from unittest.mock import patch, MagicMock
from typesafe_conductr_cli.test.cli_test_case import CliTestCase, strip_margin
from typesafe_conductr_cli import conduct_run
class TestConductRunCommand(TestCase, CliTestCase):
@property
def default_response(self):
return strip_margin("""|{
| "bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c"
|}
|""")
default_args = {
'ip': '127.0.0.1',
'port': 9005,
'verbose': False,
'long_ids': False,
'cli_parameters': '',
'bundle': '45e0c477d3e5ea92aa8d85c0d8f3e25c',
'scale': 3
}
default_url = 'http://127.0.0.1:9005/bundles/45e0c477d3e5ea92aa8d85c0d8f3e25c?scale=3'
output_template = """|Bundle run request sent.
|Stop bundle with: conduct stop{params} {bundle_id}
|Print ConductR info with: conduct info{params}
|"""
def default_output(self, params='', bundle_id='45e0c47'):
return strip_margin(self.output_template.format(**{'params': params, 'bundle_id': bundle_id}))
def test_success(self):
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
with patch('requests.put', http_method), patch('sys.stdout', stdout):
conduct_run.run(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(self.default_output(), self.output(stdout))
def test_success_verbose(self):
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
with patch('requests.put', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'verbose': True})
conduct_run.run(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(self.default_response + self.default_output(), self.output(stdout))
def test_success_long_ids(self):
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
with patch('requests.put', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'long_ids': True})
conduct_run.run(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(self.default_output(bundle_id='45e0c477d3e5ea92aa8d85c0d8f3e25c'), self.output(stdout))
def test_success_with_configuration(self):
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
cli_parameters = ' --ip 127.0.1.1 --port 9006'
with patch('requests.put', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'cli_parameters': cli_parameters})
conduct_run.run(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
self.default_output(params=cli_parameters),
self.output(stdout))
def test_failure(self):
http_method = self.respond_with(404)
stderr = MagicMock()
with patch('requests.put', http_method), patch('sys.stderr', stderr):
conduct_run.run(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ERROR: 404 Not Found
|"""),
self.output(stderr))
def test_failure_invalid_address(self):
http_method = self.raise_connection_error('test reason')
stderr = MagicMock()
with patch('requests.put', http_method), patch('sys.stderr', stderr):
conduct_run.run(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
self.default_connection_error.format(self.default_args['ip'], self.default_args['port']),
self.output(stderr))
| StarcoderdataPython |
134589 | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import pytest
from neo4j import (
BoltDriver,
GraphDatabase,
Neo4jDriver,
TRUST_ALL_CERTIFICATES,
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES,
TrustAll,
TrustCustomCAs,
TrustSystemCAs,
)
from neo4j.api import (
READ_ACCESS,
WRITE_ACCESS,
)
from neo4j.exceptions import ConfigurationError
from ..._async_compat import mark_sync_test
@pytest.mark.parametrize("protocol", ("bolt://", "bolt+s://", "bolt+ssc://"))
@pytest.mark.parametrize("host", ("localhost", "127.0.0.1",
"[::1]", "[0:0:0:0:0:0:0:1]"))
@pytest.mark.parametrize("port", (":1234", "", ":7687"))
@pytest.mark.parametrize("params", ("", "?routing_context=test"))
@pytest.mark.parametrize("auth_token", (("test", "test"), None))
@mark_sync_test
def test_direct_driver_constructor(protocol, host, port, params, auth_token):
uri = protocol + host + port + params
if params:
with pytest.warns(DeprecationWarning, match="routing context"):
driver = GraphDatabase.driver(uri, auth=auth_token)
else:
driver = GraphDatabase.driver(uri, auth=auth_token)
assert isinstance(driver, BoltDriver)
driver.close()
@pytest.mark.parametrize("protocol",
("neo4j://", "neo4j+s://", "neo4j+ssc://"))
@pytest.mark.parametrize("host", ("localhost", "127.0.0.1",
"[::1]", "[0:0:0:0:0:0:0:1]"))
@pytest.mark.parametrize("port", (":1234", "", ":7687"))
@pytest.mark.parametrize("params", ("", "?routing_context=test"))
@pytest.mark.parametrize("auth_token", (("<PASSWORD>", "test"), None))
@mark_sync_test
def test_routing_driver_constructor(protocol, host, port, params, auth_token):
uri = protocol + host + port + params
driver = GraphDatabase.driver(uri, auth=auth_token)
assert isinstance(driver, Neo4jDriver)
driver.close()
@pytest.mark.parametrize("test_uri", (
"bolt+ssc://127.0.0.1:9001",
"bolt+s://127.0.0.1:9001",
"bolt://127.0.0.1:9001",
"neo4j+ssc://127.0.0.1:9001",
"neo4j+s://127.0.0.1:9001",
"neo4j://127.0.0.1:9001",
))
@pytest.mark.parametrize(
("test_config", "expected_failure", "expected_failure_message"),
(
({"encrypted": False}, ConfigurationError, "The config settings"),
({"encrypted": True}, ConfigurationError, "The config settings"),
(
{"encrypted": True, "trust": TRUST_ALL_CERTIFICATES},
ConfigurationError, "The config settings"
),
(
{"trust": TRUST_ALL_CERTIFICATES},
ConfigurationError, "The config settings"
),
(
{"trust": TRUST_SYSTEM_CA_SIGNED_CERTIFICATES},
ConfigurationError, "The config settings"
),
(
{"encrypted": True, "trusted_certificates": TrustAll()},
ConfigurationError, "The config settings"
),
(
{"trusted_certificates": TrustAll()},
ConfigurationError, "The config settings"
),
(
{"trusted_certificates": TrustSystemCAs()},
ConfigurationError, "The config settings"
),
(
{"trusted_certificates": TrustCustomCAs("foo", "bar")},
ConfigurationError, "The config settings"
),
(
{"ssl_context": None},
ConfigurationError, "The config settings"
),
(
{"ssl_context": ssl.SSLContext(ssl.PROTOCOL_TLSv1)},
ConfigurationError, "The config settings"
),
)
)
@mark_sync_test
def test_driver_config_error(
test_uri, test_config, expected_failure, expected_failure_message
):
if "+" in test_uri:
# `+s` and `+ssc` are short hand syntax for not having to configure the
# encryption behavior of the driver. Specifying both is invalid.
with pytest.raises(expected_failure, match=expected_failure_message):
GraphDatabase.driver(test_uri, **test_config)
else:
driver = GraphDatabase.driver(test_uri, **test_config)
driver.close()
@pytest.mark.parametrize("test_uri", (
"http://localhost:9001",
"ftp://localhost:9001",
"x://localhost:9001",
))
def test_invalid_protocol(test_uri):
with pytest.raises(ConfigurationError, match="scheme"):
GraphDatabase.driver(test_uri)
@pytest.mark.parametrize(
("test_config", "expected_failure", "expected_failure_message"),
(
({"trust": 1}, ConfigurationError, "The config setting `trust`"),
({"trust": True}, ConfigurationError, "The config setting `trust`"),
({"trust": None}, ConfigurationError, "The config setting `trust`"),
)
)
def test_driver_trust_config_error(
test_config, expected_failure, expected_failure_message
):
with pytest.raises(expected_failure, match=expected_failure_message):
GraphDatabase.driver("bolt://127.0.0.1:9001", **test_config)
@pytest.mark.parametrize("uri", (
"bolt://127.0.0.1:9000",
"neo4j://127.0.0.1:9000",
))
@mark_sync_test
def test_driver_opens_write_session_by_default(uri, mocker):
driver = GraphDatabase.driver(uri)
from neo4j import Transaction
# we set a specific db, because else the driver would try to fetch a RT
# to get hold of the actual home database (which won't work in this
# unittest)
with driver.session(database="foobar") as session:
acquire_mock = mocker.patch.object(session._pool, "acquire",
autospec=True)
tx_begin_mock = mocker.patch.object(Transaction, "_begin",
autospec=True)
tx = session.begin_transaction()
acquire_mock.assert_called_once_with(
access_mode=WRITE_ACCESS,
timeout=mocker.ANY,
database=mocker.ANY,
bookmarks=mocker.ANY
)
tx_begin_mock.assert_called_once_with(
tx,
mocker.ANY,
mocker.ANY,
mocker.ANY,
WRITE_ACCESS,
mocker.ANY,
mocker.ANY
)
driver.close()
@pytest.mark.parametrize("uri", (
"bolt://127.0.0.1:9000",
"neo4j://127.0.0.1:9000",
))
@mark_sync_test
def test_verify_connectivity(uri, mocker):
driver = GraphDatabase.driver(uri)
pool_mock = mocker.patch.object(driver, "_pool", autospec=True)
try:
ret = driver.verify_connectivity()
finally:
driver.close()
assert ret is None
pool_mock.acquire.assert_called_once()
assert pool_mock.acquire.call_args.kwargs["lifeness_check_timeout"] == 0
pool_mock.release.assert_called_once()
@pytest.mark.parametrize("uri", (
"bolt://127.0.0.1:9000",
"neo4j://127.0.0.1:9000",
))
@pytest.mark.parametrize("kwargs", (
{"access_mode": WRITE_ACCESS},
{"access_mode": READ_ACCESS},
{"fetch_size": 69},
))
@mark_sync_test
def test_verify_connectivity_parameters_are_deprecated(uri, kwargs,
mocker):
driver = GraphDatabase.driver(uri)
mocker.patch.object(driver, "_pool", autospec=True)
try:
with pytest.warns(DeprecationWarning, match="configuration"):
driver.verify_connectivity(**kwargs)
finally:
driver.close()
| StarcoderdataPython |
117448 | <gh_stars>0
def hashadNum(num):
digitSum = sum([int(k) for k in str(num)])
if num % digitSum == 0:
return True
else:
return False
n = int(input())
while not (hashadNum(n)):
n += 1
print(n) | StarcoderdataPython |
3338161 | class Matrix(object):
def __init__(self, matrix_string):
self.values = self._decode(matrix_string)
def _decode(self, matrix_string):
decoded_rows = matrix_string.split("\n")
return [list(map(int, row.split())) for row in decoded_rows]
def row(self, index):
return self.values[index]
def column(self, index):
return [row[index] for row in self.values]
| StarcoderdataPython |
3280503 | # coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.3.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from integration_api.configuration import Configuration
class CardTokenResponseVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'apple_payload': 'ApplePayload',
'google_payload': 'GooglePayload',
'message': 'str',
'nucleus_card_id': 'str',
'samsung_payload': 'SamsungPayload',
'vendor_name': 'str',
'vendor_response': 'object',
'wallet': 'str'
}
attribute_map = {
'apple_payload': 'apple_payload',
'google_payload': 'google_payload',
'message': 'message',
'nucleus_card_id': 'nucleus_card_id',
'samsung_payload': 'samsung_payload',
'vendor_name': 'vendor_name',
'vendor_response': 'vendor_response',
'wallet': 'wallet'
}
def __init__(self, apple_payload=None, google_payload=None, message=None, nucleus_card_id=None, samsung_payload=None, vendor_name=None, vendor_response=None, wallet=None, _configuration=None): # noqa: E501
"""CardTokenResponseVO - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._apple_payload = None
self._google_payload = None
self._message = None
self._nucleus_card_id = None
self._samsung_payload = None
self._vendor_name = None
self._vendor_response = None
self._wallet = None
self.discriminator = None
if apple_payload is not None:
self.apple_payload = apple_payload
if google_payload is not None:
self.google_payload = google_payload
if message is not None:
self.message = message
if nucleus_card_id is not None:
self.nucleus_card_id = nucleus_card_id
if samsung_payload is not None:
self.samsung_payload = samsung_payload
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_response is not None:
self.vendor_response = vendor_response
if wallet is not None:
self.wallet = wallet
@property
def apple_payload(self):
"""Gets the apple_payload of this CardTokenResponseVO. # noqa: E501
:return: The apple_payload of this CardTokenResponseVO. # noqa: E501
:rtype: ApplePayload
"""
return self._apple_payload
@apple_payload.setter
def apple_payload(self, apple_payload):
"""Sets the apple_payload of this CardTokenResponseVO.
:param apple_payload: The apple_payload of this CardTokenResponseVO. # noqa: E501
:type: ApplePayload
"""
self._apple_payload = apple_payload
@property
def google_payload(self):
"""Gets the google_payload of this CardTokenResponseVO. # noqa: E501
:return: The google_payload of this CardTokenResponseVO. # noqa: E501
:rtype: GooglePayload
"""
return self._google_payload
@google_payload.setter
def google_payload(self, google_payload):
"""Sets the google_payload of this CardTokenResponseVO.
:param google_payload: The google_payload of this CardTokenResponseVO. # noqa: E501
:type: GooglePayload
"""
self._google_payload = google_payload
@property
def message(self):
"""Gets the message of this CardTokenResponseVO. # noqa: E501
:return: The message of this CardTokenResponseVO. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this CardTokenResponseVO.
:param message: The message of this CardTokenResponseVO. # noqa: E501
:type: str
"""
self._message = message
@property
def nucleus_card_id(self):
"""Gets the nucleus_card_id of this CardTokenResponseVO. # noqa: E501
:return: The nucleus_card_id of this CardTokenResponseVO. # noqa: E501
:rtype: str
"""
return self._nucleus_card_id
@nucleus_card_id.setter
def nucleus_card_id(self, nucleus_card_id):
"""Sets the nucleus_card_id of this CardTokenResponseVO.
:param nucleus_card_id: The nucleus_card_id of this CardTokenResponseVO. # noqa: E501
:type: str
"""
self._nucleus_card_id = nucleus_card_id
@property
def samsung_payload(self):
"""Gets the samsung_payload of this CardTokenResponseVO. # noqa: E501
:return: The samsung_payload of this CardTokenResponseVO. # noqa: E501
:rtype: SamsungPayload
"""
return self._samsung_payload
@samsung_payload.setter
def samsung_payload(self, samsung_payload):
"""Sets the samsung_payload of this CardTokenResponseVO.
:param samsung_payload: The samsung_payload of this CardTokenResponseVO. # noqa: E501
:type: SamsungPayload
"""
self._samsung_payload = samsung_payload
@property
def vendor_name(self):
"""Gets the vendor_name of this CardTokenResponseVO. # noqa: E501
:return: The vendor_name of this CardTokenResponseVO. # noqa: E501
:rtype: str
"""
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
"""Sets the vendor_name of this CardTokenResponseVO.
:param vendor_name: The vendor_name of this CardTokenResponseVO. # noqa: E501
:type: str
"""
self._vendor_name = vendor_name
@property
def vendor_response(self):
"""Gets the vendor_response of this CardTokenResponseVO. # noqa: E501
:return: The vendor_response of this CardTokenResponseVO. # noqa: E501
:rtype: object
"""
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
"""Sets the vendor_response of this CardTokenResponseVO.
:param vendor_response: The vendor_response of this CardTokenResponseVO. # noqa: E501
:type: object
"""
self._vendor_response = vendor_response
@property
def wallet(self):
"""Gets the wallet of this CardTokenResponseVO. # noqa: E501
:return: The wallet of this CardTokenResponseVO. # noqa: E501
:rtype: str
"""
return self._wallet
@wallet.setter
def wallet(self, wallet):
"""Sets the wallet of this CardTokenResponseVO.
:param wallet: The wallet of this CardTokenResponseVO. # noqa: E501
:type: str
"""
allowed_values = ["google", "apple", "samsung", "other"] # noqa: E501
if (self._configuration.client_side_validation and
wallet not in allowed_values):
raise ValueError(
"Invalid value for `wallet` ({0}), must be one of {1}" # noqa: E501
.format(wallet, allowed_values)
)
self._wallet = wallet
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CardTokenResponseVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CardTokenResponseVO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CardTokenResponseVO):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
1666413 | #!/usr/bin/env python
"""
Configure folder for RCC drift correction testing.
Note: This test takes approximately 20-30 minutes.
Hazen 09/18
"""
import numpy
import storm_analysis.simulator.emitters_in_clusters as emittersInClusters
import storm_analysis.simulator.emitters_on_lines as emittersOnLines
import storm_analysis.diagnostics.rcc.settings as settings
def configure():
# Create localizations in clusters file.
#
print("Creating clustered localizations file.")
emittersInClusters.emittersInClusters("cluster_list.hdf5",
50,
200,
1.0,
sx = settings.x_size,
sy = settings.y_size)
# Create localizations on lines file.
#
print("Creating lines localizations file.")
emittersOnLines.emittersOnLines("lines_list.hdf5",
50,
100000,
sx = settings.x_size,
sy = settings.y_size)
# Create drift file. This is used in the simulations to displace
# the localizations.
#
dx = settings.x_drift/settings.n_frames
drift_x = numpy.arange(0.0, settings.x_drift + 0.5 * dx, dx)
dy = settings.y_drift/settings.n_frames
drift_y = numpy.arange(0.0, settings.y_drift + 0.5 * dy, dy)
dz = settings.z_drift/settings.n_frames
drift_z = numpy.arange(0.0, settings.z_drift + 0.5 * dz, dz)
drift_data = numpy.zeros((drift_x.size, 3))
drift_data[:,0] = drift_x
drift_data[:,1] = drift_y
numpy.savetxt("drift_xy.txt", drift_data)
drift_data[:,2] = drift_z
numpy.savetxt("drift_xyz.txt", drift_data)
if (__name__ == "__main__"):
configure()
| StarcoderdataPython |
190701 | # Copyright (c) 2021 <NAME>. All Rights Reserved.
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import maya.OpenMaya as om
import pymel.core as pm
import piper_config as pcfg
import piper.core.util as pcu
from piper.ui.widget import manager
from piper.ui.switcher import Switcher
from piper.mayapy.pipe.store import store
import piper.mayapy.rig as rig
import piper.mayapy.rig.space as space
import piper.mayapy.rig.control as control
import piper.mayapy.rig.switcher as switcher
import piper.mayapy.pipernode as pipernode
class MayaSwitcher(MayaQWidgetDockableMixin, Switcher):
def __init__(self, maya_store=None, *args, **kwargs):
super(MayaSwitcher, self).__init__(dcc_store=maya_store, *args, **kwargs)
manager.register(self)
self.callback = om.MEventMessage.addEventCallback('SelectionChanged', self.onSelectionChanged)
self.selected = None
self.inners = []
self.pivots = []
self.rests = []
self.onSelectionChanged() # called to load/update switcher on startup
def restorePrevious(self):
"""
Restores the previous settings the window had when it was closed.
"""
# all controls state
rigs = pm.ls(type='piperRig')
states = [attr.get() for r in rigs for attr in r.listAttr(v=True, k=True, st='*' + pcfg.visibility_suffix)]
state = not all(states)
self.all_controls_button.setChecked(state)
# all bendy controls state
bendy_controls = control.getAllBendy()
state = not all([ctrl.visibility.get() for ctrl in bendy_controls])
self.bendy_button.setChecked(state)
# joints state
state = not all([joint.visibility.get() for joint in pm.ls(type='joint') if joint not in bendy_controls])
self.joints_button.setChecked(state)
# hide/play state
controls = control.getAll()
state = all([ctrl.hideOnPlayback.get() for ctrl in controls]) if controls else False
self.hide_play_button.setChecked(state)
# inner controls state
controls = control.getAllInner()
state = not all([ctrl.visibility.get() for ctrl in controls])
self.all_inner_button.setChecked(state)
super(MayaSwitcher, self).restorePrevious()
def onSelectionChanged(self, *args):
"""
Main update function called every time user selects a node.
"""
# don't update list if user does not want to update.
if not self.self_update.isChecked():
return
# declare iterables to add to
names = []
inners_state = []
self.inners.clear()
spaces = pcu.OrderedSet(['local'])
switchers = set()
self.pivots = pcu.OrderedSet()
self.rests = pcu.OrderedSet()
self.selected = pm.selected()
# gather info we need
for node in self.selected:
# skip over is node is not a transform, for example: don't need verts, faces, etc.
if not isinstance(node, pm.nodetypes.Transform):
continue
node_name = node.name()
names.append(node_name)
# get all the space attributes from the node
node_spaces = space.getAll(node)
spaces.update(node_spaces)
# get fk/ik switcher controls
fk_ik_switcher = switcher.get(node, error=False, name=True)
if fk_ik_switcher:
switchers.add(fk_ik_switcher)
# grabs children of node and node to see if any are dynamic pivots
for child in node.getChildren() + [node]:
child_name = child.name(stripNamespace=True)
if child_name.endswith(pcfg.dynamic_pivot_suffix + pcfg.control_suffix):
self.pivots.add(child_name)
self.rests.add(child.attr(pcfg.dynamic_pivot_rest).get())
# adding inner controls for visibility toggle
if pcfg.inner_suffix in child_name and child != node:
visibility = child.visibility.get()
inners_state.append(visibility)
self.inners.append(child)
# update window title with selected and lists widgets with info we gathered
text = ' - ' + ', '.join(names) if names else ''
self.setWindowTitle('Switcher' + text)
self.updateList(self.spaces_list, spaces, minimum=1)
self.updateList(self.switcher_list, switchers)
self.updateList(self.pivots_list, self.pivots)
self.updateList(self.rest_list, self.rests)
inner_state = not all(inners_state)
self.selected_inner_button.setChecked(inner_state)
def onSpacePressed(self, item):
"""
Called when a space item is clicked. Attempts to match all selected items to that space.
Args:
item (QtWidgets.QListWidgetItem): Space pressed that will match selected objects if they have the space.
"""
t = self.translate.isChecked()
r = self.rotate.isChecked()
o = self.orient.isChecked()
s = self.scale.isChecked()
k = self.keyframe_box.isChecked()
# parsing name since None == local space in space.switch argument
name = item.text()
if name == 'local':
name = None
pm.undoInfo(openChunk=True)
[space.switch(n, name, t=t, r=r, o=o, s=s, key=k) for n in self.selected if name is None or n.hasAttr(name)]
pm.undoInfo(closeChunk=True)
def onSwitcherPressed(self, item):
"""
Called when a FK/IK switcher item is clicked.
Args:
item (QtWidgets.QListWidgetItem): FK/IK switcher clicked that will have its FK or IK matched.
"""
key = self.keyframe_box.isChecked()
match = self.match_only.isChecked()
pm.undoInfo(openChunk=True)
space.switchFKIK(pm.PyNode(item.text()), key=key, match_only=match)
pm.undoInfo(closeChunk=True)
def onPivotPressed(self, item):
"""
Called when dynamic pivot item is clicked. Will set dynamic pivot transforms to 0.
Args:
item (QtWidgets.QListWidgetItem): Dynamic pivot to reset when clicked.
"""
pm.undoInfo(openChunk=True)
space.resetDynamicPivot(pm.PyNode(item.text()), key=self.keyframe_box.isChecked())
pm.undoInfo(closeChunk=True)
def onPivotRestPressed(self, item):
"""
Called when dynamic pivot item is clicked. Will move dynamic pivot to its rest position.
Args:
item (QtWidgets.QListWidgetItem): Dynamic pivot to move to rest position when clicked.
"""
pm.undoInfo(openChunk=True)
index = self.rest_list.indexFromItem(item)
pivot_item = self.pivots_list.item(index.row())
space.resetDynamicPivot(pm.PyNode(pivot_item.text()), key=self.keyframe_box.isChecked(), rest=True)
pm.undoInfo(closeChunk=True)
def onSelectAllPressed(self):
"""
Selects all the controls.
"""
controls = control.getAll()
pm.select(controls)
def onAllControlsPressed(self):
"""
Toggles between showing/hiding all controls.
"""
rigs = pm.ls(type='piperRig')
state = not self.all_controls_button.isChecked()
pm.undoInfo(openChunk=True)
[attr.set(state) for r in rigs for attr in r.listAttr(ud=True, v=True, k=True, st='*' + pcfg.visibility_suffix)]
pm.undoInfo(closeChunk=True)
def onInnerControlsPressed(self):
"""
Toggles between showing/hiding all inner controls.
"""
controls = control.getAllInner()
state = not self.all_inner_button.isChecked()
pm.undoInfo(openChunk=True)
[ctrl.visibility.set(state) for ctrl in controls]
pm.undoInfo(closeChunk=True)
def onSelectedInnerPressed(self):
"""
Toggles between showing/hiding selected controls' inner control.
"""
state = not self.selected_inner_button.isChecked()
pm.undoInfo(openChunk=True)
[ctrl.visibility.set(state) for ctrl in self.inners]
pm.undoInfo(closeChunk=True)
def onBendyPressed(self):
"""
Toggles between showing/hiding all bendy controls.
"""
controls = control.getAllBendy()
state = not self.bendy_button.isChecked()
pm.undoInfo(openChunk=True)
[ctrl.visibility.set(state) for ctrl in controls]
pm.undoInfo(closeChunk=True)
def onJointsPressed(self):
"""
Toggles between showing/hiding all joints in scene.
"""
joints = pm.ls(type='joint')
bendy_controls = control.getAllBendy()
joints = filter(lambda i: i not in bendy_controls, joints)
state = not self.joints_button.isChecked()
pm.undoInfo(openChunk=True)
[joint.visibility.set(state) for joint in joints]
pm.undoInfo(closeChunk=True)
def onHideOnPlayPressed(self):
"""
Toggles between showing/hiding controls during playback.
"""
controls = control.getAll()
state = self.hide_play_button.isChecked()
pm.undoInfo(openChunk=True)
[ctrl.hideOnPlayback.set(state) for ctrl in controls]
pm.undoInfo(closeChunk=True)
def onResetPressed(self):
"""
Sets the selected controls to zero/bind pose. If no controls selected, zeroes out all controls in scene.
"""
pm.undoInfo(openChunk=True)
rig.zeroOut()
pm.undoInfo(closeChunk=True)
def onRigPressed(self):
"""
Selects the rigs associated with current selection. If nothing selected, selects all piperRigs in scene.
"""
rigs = pipernode.get('piperRig')
pm.select(rigs)
def dockCloseEventTriggered(self):
self.onClosedPressed()
manager.unregister(self)
om.MMessage.removeCallback(self.callback)
super(MayaSwitcher, self).dockCloseEventTriggered()
def show():
"""
Convenience method for showing MayaSwitcher.
Returns:
(MayaSwitcher): QtWidget being shown.
"""
gui = MayaSwitcher(maya_store=store)
gui.show(dockable=True)
return gui
| StarcoderdataPython |
3393036 | <reponame>serik1987/corefacility<filename>corefacility/core/test/entity_set/token/base_test_class.py
from time import sleep
from datetime import timedelta
from parameterized import parameterized
from core.entity.entity_exceptions import EntityNotFoundException
from ..base_test_class import BaseTestClass
from ..entity_set_objects.user_set_object import UserSetObject
def token_reading_provider():
return [
(0, 0),
(1, 0),
(2, 3),
]
class TokenTest(BaseTestClass):
"""
The base class for testing all internally issued tokens (i.e., authentications, cookies etc.)
"""
_token_set_class = None
_token_class = None
_token_object_class = None
_user_set_object = None
_token_set_object = None
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls._user_set_object = UserSetObject()
cls._authentication_set_object = cls._token_object_class(cls._user_set_object)
@parameterized.expand(token_reading_provider())
def test_authentication_retrieve(self, auth_index, user_index):
auth_id = self._authentication_set_object[auth_index].id
desired_user = self._user_set_object[user_index]
auth_set = self._token_set_class()
with self.assertLessQueries(1):
auth = auth_set.get(auth_id)
self.assertTokenUser(auth.user, desired_user)
def assertTokenUser(self, actual_user, desired_user):
self.assertEquals(actual_user.id, desired_user.id, "The desired user was not loaded correctly")
self.assertEquals(actual_user.login, desired_user.login, "The desired user login was not loaded correctly")
self.assertEquals(actual_user.name, desired_user.name, "The desired user name was not loaded correctly")
self.assertEquals(actual_user.surname, desired_user.surname,
"The desired user surname was not loaded correctly")
del BaseTestClass
| StarcoderdataPython |
1722514 | import re
PARTITION_METADATA_PATTERN = re.compile('.*CONST (:.*) \[')
KEY_WITH_METADATA_43_TO_5 = ["parrangestart", "parrangeend", "parrangeevery", "parlistvalues"]
KEY_WITH_METADATA = ["version", "parrangestartincl"] + KEY_WITH_METADATA_43_TO_5
# These columns contain CONST or other nodes that may contain :location fields.
# The :location fields will be stripped out before the comparison.
KEYS_WITH_LOCATIONS = ["parrangestart", "parrangeend", "parrangeevery", "parlistvalues"]
class PartitionComparator(object):
def _parse_value(self, partition_info_dict, column_list):
"""
input like:
(({CONST :consttype 1042 :constlen -1 :constbyval false :constisnull false :constvalue 5 [ 0 0 0 5 77 ]}
{CONST :consttype 23 :constlen 4 :constbyval true :constisnull false :constvalue 4 [ 1 0 0 0 0 0 0 0 ]}))
break into chunks, sort the chunks by alpha,
:returns a list of all text chunks split by a space
"""
result = dict(partition_info_dict)
# Strip out any :location fields, they are not relevant for the comparison.
for column_key in KEYS_WITH_LOCATIONS:
if isinstance(result[column_key], str):
result[column_key] = re.sub(' :location -?[0-9]+', '', result[column_key])
for column_key in column_list:
value = result[column_key]
# removing enclosing parenthesis and curly braces '(({' and '}))'
value = value[3:-3]
chunks = value.split("} {")
alpha_chunks = sorted(chunks)
result[column_key] = []
for chunk in alpha_chunks:
result[column_key].extend(chunk.split(" "))
return result
class Version4toXPartitionComparator(PartitionComparator):
def _parse_value(self, partition_info_dict):
return super(Version4toXPartitionComparator, self)._parse_value(partition_info_dict,
KEY_WITH_METADATA_43_TO_5)
def _remove_key_and_value(self, list_info, key_to_remove):
while key_to_remove in list_info:
index = list_info.index(key_to_remove)
# delete also removes the corresponding value
del list_info[index:index+2]
def is_same(self, source_partition_info, dest_partition_info):
"""
TODO: compare attributes not used for list or range, e.g., parisdefault
"""
src_dict = self._parse_value(source_partition_info)
dest_dict = self._parse_value(dest_partition_info)
for key in KEY_WITH_METADATA_43_TO_5:
version_info = dest_dict[key]
# greenplum 5 added a 'consttypmod' attribute.
# remove it so as to compare all other attributes
self._remove_key_and_value(version_info, ':consttypmod')
if src_dict[key] != dest_dict[key]:
return False
return True
class SameVersionPartitionComparator(PartitionComparator):
ALLOW_UNORDERED_COLUMNS_LIST = ['parlistvalues']
def _parse_value(self, partition_info_dict):
return super(SameVersionPartitionComparator, self)._parse_value(partition_info_dict,
self.ALLOW_UNORDERED_COLUMNS_LIST)
def is_same(self, source_partition_info, dest_partition_info):
src_dict = self._parse_value(source_partition_info)
dest_dict = self._parse_value(dest_partition_info)
for key in KEY_WITH_METADATA:
if src_dict[key] != dest_dict[key]:
return False
return True
class PartitionComparatorFactory:
def get(self, source, dest):
source_ver = source['version']
dest_ver = dest['version']
if source_ver == dest_ver:
return SameVersionPartitionComparator()
elif source_ver == '4.3' and dest_ver != '4.3':
return Version4toXPartitionComparator()
raise Exception("No comparator defined for source "
"and dest versions\n: %s \n\n %s" % (source, dest))
| StarcoderdataPython |
189743 | from pyrt.point import Point3
from pyrt.vec3 import Vec3
def test_point_point_addition():
assert isinstance(Point3(0, 0, 0) + Point3(0, 0, 0), Vec3)
def test_point_vec_addition():
assert isinstance(Point3(0, 0, 0) + Vec3(0, 0, 0), Point3)
assert isinstance(Vec3(0, 0, 0) + Point3(0, 0, 0), Point3)
| StarcoderdataPython |
8473 | # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdatePlanRequest(object):
"""Implementation of the 'UpdatePlanRequest' model.
Request for updating a plan
Attributes:
name (string): Plan's name
description (string): Description
installments (list of int): Number os installments
statement_descriptor (string): Text that will be shown on the credit
card's statement
currency (string): Currency
interval (string): Interval
interval_count (int): Interval count
payment_methods (list of string): Payment methods accepted by the
plan
billing_type (string): Billing type
status (string): Plan status
shippable (bool): Indicates if the plan is shippable
billing_days (list of int): Billing days accepted by the plan
metadata (dict<object, string>): Metadata
minimum_price (int): Minimum price
trial_period_days (int): Number of trial period in days, where the
customer will not be charged
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"description":'description',
"installments":'installments',
"statement_descriptor":'statement_descriptor',
"currency":'currency',
"interval":'interval',
"interval_count":'interval_count',
"payment_methods":'payment_methods',
"billing_type":'billing_type',
"status":'status',
"shippable":'shippable',
"billing_days":'billing_days',
"metadata":'metadata',
"minimum_price":'minimum_price',
"trial_period_days":'trial_period_days'
}
def __init__(self,
name=None,
description=None,
installments=None,
statement_descriptor=None,
currency=None,
interval=None,
interval_count=None,
payment_methods=None,
billing_type=None,
status=None,
shippable=None,
billing_days=None,
metadata=None,
minimum_price=None,
trial_period_days=None):
"""Constructor for the UpdatePlanRequest class"""
# Initialize members of the class
self.name = name
self.description = description
self.installments = installments
self.statement_descriptor = statement_descriptor
self.currency = currency
self.interval = interval
self.interval_count = interval_count
self.payment_methods = payment_methods
self.billing_type = billing_type
self.status = status
self.shippable = shippable
self.billing_days = billing_days
self.metadata = metadata
self.minimum_price = minimum_price
self.trial_period_days = trial_period_days
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
description = dictionary.get('description')
installments = dictionary.get('installments')
statement_descriptor = dictionary.get('statement_descriptor')
currency = dictionary.get('currency')
interval = dictionary.get('interval')
interval_count = dictionary.get('interval_count')
payment_methods = dictionary.get('payment_methods')
billing_type = dictionary.get('billing_type')
status = dictionary.get('status')
shippable = dictionary.get('shippable')
billing_days = dictionary.get('billing_days')
metadata = dictionary.get('metadata')
minimum_price = dictionary.get('minimum_price')
trial_period_days = dictionary.get('trial_period_days')
# Return an object of this model
return cls(name,
description,
installments,
statement_descriptor,
currency,
interval,
interval_count,
payment_methods,
billing_type,
status,
shippable,
billing_days,
metadata,
minimum_price,
trial_period_days)
| StarcoderdataPython |
3341633 | """ Functions to easily download census data for a given geography. """
import os
import pandas as pd
from census import Census
from itertools import chain
from functools import reduce
from dotenv import load_dotenv
from dotenv import find_dotenv
def census_key():
""" Retrieve the Census API key.
:return: the US Census API key as a string. """
key = os.environ.get('CENSUS_API_KEY')
if key is not None:
return key
load_dotenv(find_dotenv())
return os.environ.get('CENSUS_API_KEY')
def columns_for_table(table, line_nums):
""" For a given table and line numbers, returns a list of census
columns.
:param table: a string representing the table name
:param line_nums: a list of integers representing the line
numbers in the table to create columns for.
:return: a list of strings that represent the census columns. """
line_ends = ['{}E'.format(str(l).zfill(3)) for l in line_nums]
return ['{}_{}'.format(table, l) for l in line_ends]
def pull_census_columns(cols, census_api=None):
""" Pull a given list of census columns.
:param cols: a list of columns to pull
:param census_api: a Census API object
:return: a DataFrame where the geography is the index and
the columns are the columns. """
if census_api is None:
census_api = Census(census_key())
def pull_chunk(col_chunk):
# Census API only allows 50 columns per call
print('Pulling columns {}'.format(col_chunk))
df = pd.DataFrame(census_api.acs5.zipcode(['NAME'] + col_chunk, '*'))
df['NAME'] = df['NAME'].map(lambda x: x[-5:])
return (df.rename(columns={'NAME': 'zipcode'})
.set_index('zipcode')[col_chunk]
.apply(pd.to_numeric, errors='coerce'))
chunk_fn = lambda c, n: (c[i:i+n] for i in range(0, len(c), n))
return reduce(lambda x, y: x.join(y),
(pull_chunk(chunk) for chunk in chunk_fn(cols, 49)))
def income_variables():
""" Return the list of income variables to pull from.
:return: A DataFrame with the income variables as the columns
and the geography as the index. """
return columns_for_table('B19001', range(2, 18))
def educational_attainment():
""" Return the list of educational attainment variables to pull from.
:return: A DataFrame with the educational variables as the
columns and the geography as the index. """
return columns_for_table('B15003', range(16, 26))
def age_variables():
""" Return the list of age variables to pull from.
:return: A list of column names to pull. """
return columns_for_table('B01001', range(2, 50))
def population_variables():
""" Return the list of population variables to pull from.
:return: A list of columns names to pull. """
return columns_for_table('B00001', range(1, 2))
def census_pipeline(col_pipeline):
""" Given a list of functions, calls them all and joins them to get
a census dataset.
:param col_pipeline: a list of functions that return the list of
columns needed to be pulled.
:return: a DataFrame with all the variables as columns and zipcode
as the index. """
cols = set(chain.from_iterable(f() for f in col_pipeline))
return pull_census_columns(list(cols))
if __name__ == '__main__':
# Example use: build a dataset by pulling income, education, age,
# and population by zipcode and save it out.
df = census_pipeline([income_variables,
educational_attainment,
age_variables,
population_variables])
df.to_csv('census_dataset.csv')
| StarcoderdataPython |
138704 | <filename>subproc_env.py
#!/usr/bin/env python3
import numpy as np
import time
import gym
from typing import Callable
from subproc import subproc
from multi_env import MultiEnv
from phonebot.sim.pybullet.simulator import PybulletPhonebotEnv, PybulletSimulatorSettings
@subproc
class PybulletPhonebotSubprocEnv(PybulletPhonebotEnv):
pass
def main():
# env = PybulletPhonebotSubprocEnv()
action_size = 8
num_env = 4
def get_env(index: int):
# env = PybulletPhonebotEnv(sim_settings=PybulletSimulatorSettings(
# render=False, random_orientation=True))
env = PybulletPhonebotSubprocEnv(
PybulletSimulatorSettings(render=False, random_orientation=True))
env.set_seed(index)
env.reset()
return env
env = MultiEnv(get_env, num_env)
while True:
print(env.sense())
res = env.step([np.zeros(action_size) for _ in range(num_env)])
print(res[0], res[1], res[2], res[3])
time.sleep(0.1)
break
if __name__ == '__main__':
main()
| StarcoderdataPython |
3218976 | # Copyright 2021, UChicago Argonne, LLC
# All Rights Reserved
# Software Name: repast4py
# By: Argonne National Laboratory
# License: BSD-3 - https://github.com/Repast/repast4py/blob/master/LICENSE.txt
from typing import List
from pathlib import Path
# class Timer:
# def __init__(self):
# self.times = {}
# def start_timer(self, name):
# if not name in self.times:
# self.times[name] = [time.time(), 0, []]
# else:
# self.times[name][0] = time.time()
# def stop_timer(self, name):
# t = time.time()
# data = self.times[name]
# data[1] = t
# data[2].append(data[1] - data[0])
# def print_times(self):
# comm = MPI.COMM_WORLD
# all_timings = comm.gather(self.times)
# rank = MPI.COMM_WORLD.Get_rank()
# if rank == 0:
# print('{:<6s}{:<16s}{:>12s}{:>12s}{:>12s}{:>12s}{:>12s}'.
# format('rank', 'timer_name', 'sum','min', 'max', 'mean', 'std'))
# for r, timings in enumerate(all_timings):
# for k, v in timings.items():
# mean = np.mean(v[2])
# sm = np.sum(v[2])
# mn = np.min(v[2])
# mx = np.max(v[2])
# std = np.std(v[2])
# print('{:<6d}{:<16s}{:>12.4f}{:>12.4f}{:>12.4f}{:>12.4f}{:>12.4f}'.
# format(r, k,
# sm, mn, mx, mean, std))
def is_empty(lst: List[List]) -> bool:
"""Returns whether or not the specified list of lists
is empty.
Args:
lst: the list of lists
Returns:
True if the list is empty or all of its nested lists are empty, otherwise False.
"""
for nl in lst:
if len(nl) > 0:
return False
return True
def find_free_filename(file_path: str) -> Path:
"""Given a file path, check if that file exists,
and if so, repeatedly add a numeric infix to that
file path until the file does not exist.
For example, if output/counts.csv, exists check
if counts_1.csv, counts_2.csv, and so on exists until
finding one that doesn't exist.
Args:
file_path: the path to the file to check
Return:
the path to the unused file
"""
op = Path(file_path)
p = Path(file_path)
suffix = p.suffix
infix = 1
while (p.exists()):
p = op.with_name(f'{op.stem}_{infix}{suffix}')
infix += 1
return p
| StarcoderdataPython |
3397639 | num = list()
pares = list()
impares = list()
while True:
num.append(int(input('Digite um número: ')))
continuar = ' '
while continuar not in 'SN':
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if continuar in 'N':
break
for i, v in enumerate(num):
if v % 2 == 0:
pares.append(v)
elif v % 2 == 1:
impares.append(v)
print('=*=' * 30)
print(f'A lista completa é {num}')
print(f'Os números pares são {pares}')
print(f'Os números impares são {impares}')
| StarcoderdataPython |
117236 | <filename>ChunkProcessingContinuation/dataframe_continue.py
import pandas as pd
class Continue:
def __init__(
self,
path_old=None,
path_new=None,
path_init=None,
chunk_path=None,
chunk_processing=None,
chunksize=None
):
self.path_old = path_old
self.path_new = path_new
self.path_init = path_init
self.chunk_path = chunk_path
self.chunk_processing = chunk_processing
self.chunksize = chunksize
def reader(self, path):
if path.endswith(".csv"):
df = pd.read_csv(path)
else:
if path.endswith(".xls") or path.endswith(".xlsx"):
df = pd.read_excel(path)
return df
def get_leftover(self):
df_old = self.reader(self.path_old)
df_new = self.reader(self.path_new)
df_iterator = df_old[df_new.shape[0]:]
df_iterator.to_csv(self.path_init, index=False)
df_iterator = pd.read_csv(self.path_init, iterator=True, chunksize=self.chunksize)
return df_iterator, df_new
def odd(self, num):
if num%2 == 1:
return 1
else:
return 0
def main_process(self):
df_iterator, df_new = self.get_leftover()
chunk_list = [df_new]
i = 1
for data_chunk in df_iterator:
processed_chunk = self.chunk_processing(data_chunk)
chunk_list.append(processed_chunk)
if len(chunk_list)>1:
processed_chunk = pd.concat(chunk_list)
processed_chunk.to_excel(self.chunk_path.format("checkpoint_chunk_"+str(self.odd(i))), index=False)
i += 1
return "Done"
if __name__ == "__main__":
main_process() | StarcoderdataPython |
194894 | import sys
from termcolor import cprint
from colorama import init
from pyfiglet import figlet_format
import pyperclip
cprint(figlet_format('Geometry', font='small'), 'blue', attrs=['bold', 'blink'])
cprint('==============================================', 'white', attrs=['blink'])
cprint('Scientific Calculator v.0.0.0', 'blue', attrs=['bold'])
cprint('==============================================', 'white', attrs=['blink'])
print() | StarcoderdataPython |
3209906 | <gh_stars>1-10
# Generated by Django 4.0.2 on 2022-03-13 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0037_alter_moveoutnotice_reason_and_more'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'verbose_name_plural': 'Visitor Contact'},
),
migrations.AlterModelOptions(
name='moveoutnotice',
options={'verbose_name': "Tenant's To Move Out Notice", 'verbose_name_plural': "Tenant's To Move Out Notice"},
),
migrations.AddField(
model_name='contact',
name='ref_code',
field=models.CharField(default=27838872278, max_length=15),
preserve_default=False,
),
]
| StarcoderdataPython |
1786355 | <filename>src/texttospeechservice/src/util/locale_manager.py
import csv
class LocaleManager:
""" Utility class to handle locale data and transform languages to a given default locale.
This class handles the locales supported by our tts service. It should be used to check
if the locale provided by the user is supported. In case that a user does not send a locale,
it can transform a language to a default locale for the language (e.g. 'en' to 'en-UK')
"""
def __init__(self, locale_mappings_file):
# this may as well be a singleton, since we should only read
# the mappings once. However, due to time constraints and since
# this is only used once in the app it suffices for now
self.mappings = {}
with open(locale_mappings_file, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
self.mappings[row['language']] = row['locale']
def is_locale(self, l):
return l in self.mappings.values()
def try_get_locale(self, language, default=None):
return self.mappings[language] if language in self.mappings else default
| StarcoderdataPython |
55727 | <gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
"""
Implements the Newman-Ziff algorithm for Monte Carlo simulation of percolation
This module implements the Newman-Ziff algorithm for Monte Carlo simulation of
Bernoulli percolation on arbitrary graphs.
The :mod:`percolate` module provides these high-level functions from the
:mod:`percolate.percolate` module:
.. autosummary::
percolate.sample_states
percolate.single_run_arrays
percolate.microcanonical_averages
percolate.microcanonical_averages_arrays
percolate.canonical_averages
percolate.spanning_1d_chain
percolate.spanning_2d_grid
percolate.statistics
See Also
--------
percolate.percolate : low-level functions
Notes
-----
Currently, the module only implements bond percolation.
Spanning cluster detection is implemented, but wrapping detection is not.
The elementary unit of computation is the *sample state*:
This is one particular realization with a given number of edges---one member of
the *microcanonical ensemble*.
As Newman & Ziff suggest [1]_, the module evolves a sample state by
successively adding edges, in a random but predetermined order.
This is implemented as a generator function :func:`sample_states` to iterate
over.
Each step of the iteration adds one edge.
A collection of sample states (realizations) evolved in parallel form a
*microcanonical ensemble* at each iteration step.
A microcanonical ensemble is hence a collection of different sample states
(realizations) but with the same number of edges (*occupation number*).
The :func:`microcanonical_averages` generator function evolves a microcanonical
ensemble.
At each step, it calculates the cluster statistics over all realizations in the
ensemble.
The :func:`microcanonical_averages_arrays` helper function collects these
statistics over all iteration steps into single numpy arrays.
Finally, the :func:`canonical_averages` function calculates the statistics of
the *canonical ensemble* from the microcanonical ensembles.
References
----------
.. [1] Newman, <NAME>. & <NAME>. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`10.1103/physreve.64.016706`__
__ http://dx.doi.org/10.1103/physreve.64.016706
.. todo::
`Implement site percolation`__
__ https://github.com/andsor/pypercolate/issues/5
.. todo::
`Implement wrapping detection`__
__ https://github.com/andsor/pypercolate/issues/6
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
from percolate.percolate import (
sample_states,
single_run_arrays,
microcanonical_averages,
microcanonical_averages_arrays,
canonical_averages,
spanning_1d_chain,
spanning_2d_grid,
statistics,
)
import pkg_resources
__version__ = pkg_resources.get_distribution(__name__).version
| StarcoderdataPython |
1667915 | <gh_stars>0
from gpiozero import Button
button=Button(4)
button.wait_for_release()
print ("Door opened")
| StarcoderdataPython |
3257402 | <reponame>Riteme/test<gh_stars>1-10
#!/usr/bin/env pypy
from sys import *
from random import *
n = int(argv[1])
print n
for v in xrange(2, n + 1):
u = randint(max(1, v - 10), v - 1)
print u, v
| StarcoderdataPython |
1614423 | <reponame>pbx/Mercurial<gh_stars>10-100
from AAAPT.runner import register_tests
TESTS_CLIENT = 'Mercurial.tests.shglib.test_client'
TESTS_PARSING = 'Mercurial.tests.shglib.test_parsing'
TESTS_LOG_SUPPORT = 'Mercurial.tests.shglib.test_log_support'
TESTS_ALL_CLIENT = [TESTS_CLIENT]
TESTS_ALL_PARSING = [TESTS_PARSING]
TESTS_ALL_SUPPORT = [TESTS_LOG_SUPPORT]
test_suites = {
'client': TESTS_ALL_CLIENT,
'parsing': TESTS_ALL_PARSING,
'support': TESTS_ALL_SUPPORT,
}
register_tests(test_suites)
| StarcoderdataPython |
47889 | def sommig(n):
result = 0
while(n>=1):
result += n
n-=1
return result
print(sommig(3))
print(sommig(8))
print(sommig(17))
print(sommig(33)) | StarcoderdataPython |
1604072 | <reponame>Darij/Darij-code-challange-fs-dario
import json
class Suggester:
"""Object to query and return results as suggestions
"""
def __init__(self, qFilter, rFilter, sFilter):
self.matches = []
self.q_filter = qFilter
self.rate_filter = rFilter
self.skill_filter = sFilter
self.num_filters = 0
with open('generated.json', 'r') as file:
self.data = json.load(file)
if qFilter:
self.num_filters += 1
if rFilter:
self.num_filters += 1
if sFilter:
self.num_filters += 1
def _get_comparer(self, sfilter, list_field):
"""If field comparer is a list,
extract element that resembles
filter the most by comparing
length differences.
Arguments:
sfilter {[string]} -- [filter from self to use]
list_field {[list]} -- [field in matched query]
Returns:
Lenght of field to compare
"""
field_comp = None
curr_lenght = 20 # Initial dummy value
for value in list_field:
if sfilter in value:
temp_lenght = len(value) - len(sfilter)
if temp_lenght < curr_lenght:
curr_lenght = temp_lenght
field_comp = value
return field_comp
def _sum_confidence(self, len_filter, len_field):
""" Calculate percentage of how much one word matches another
and add it total confidence
Divide by number of filter used
Arguments:
len_filter {[int]} -- length of filter
len_field {[int]} -- length of field
"""
confidence = len_filter / len_field
confidence = confidence / self.num_filters
return confidence
def _compareStrings(self, ufilter, field):
"""Procedure to compare filter
and string field
Arguments:
skill {[string]}
ufield {[string]} -- Using filter
"""
confidence = 0
filter_length = 0
field_length = len(field)
if isinstance(field, list):
field = self._get_comparer(ufilter, field)
if field is not None:
field_length = len(field)
if field is not None and ufilter in field:
filter_length = len(ufilter)
confidence = self._sum_confidence(filter_length, field_length)
return confidence
def _calculate_Confidence(self, entry):
"""Calculate the % of match confidence
by comparing the similarity between
matched field and filters
Arguments:
entry {[json object]} -- [object with employee data]
"""
for field in entry['matched_fields']:
confidence = 0
field_length = len(field)
if self.rate_filter and self.rate_filter in field:
filter_length = len(self.rate_filter)
confidence += self._sum_confidence(filter_length, field_length)
if self.skill_filter:
confidence += self._compareStrings(self.skill_filter, field)
if self.q_filter:
confidence += self._compareStrings(self.q_filter, field)
entry['score'] += confidence
def _values_match(self, query, data):
"""Compares the type of data in
filter and json field
before comparing value
Arguments:
query {[string]} -- query filter
data {[json]}
Returns:
[boolean] -- [if values match or not]
"""
if isinstance(data, list) and query in data:
return True
elif type(query) == type(data) and query in data:
return True
else:
return False
def _sortResults(self):
def by_score(e):
return e['score']
for entry in self.matches:
self._calculate_Confidence(entry)
self.matches.sort(key=by_score, reverse=True)
def search(self):
"""Query data, sorts
records if matched with filters
"""
for entry in self.data:
matched_fields = []
entry['matched'] = False
if self.rate_filter is not None:
# fix $sign bug
if self.rate_filter in entry['min_rate']:
matched_fields.append(entry['min_rate'])
entry['matched'] = True
if self.skill_filter is not None:
if self.skill_filter in entry['verified_skills']:
if entry['verified_skills'] not in matched_fields:
matched_fields.append(entry['verified_skills'])
entry['matched'] = True
if self.q_filter is not None:
for field in entry:
if self._values_match(self.q_filter, entry[field]):
if entry[field] not in matched_fields:
matched_fields.append(entry[field])
entry['matched'] = True
if entry['matched']:
entry['matched_fields'] = matched_fields
self.matches.append(entry)
self._sortResults()
| StarcoderdataPython |
3316832 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from habitat.core.logging import HabitatLogger
baselines_logger = HabitatLogger(
name="habitat_baselines",
level=int(os.environ.get("HABITAT_BASELINES_LOG", logging.ERROR)),
format_str="[%(levelname)s,%(name)s] %(asctime)-15s %(filename)s:%(lineno)d %(message)s",
)
| StarcoderdataPython |
1647799 | <gh_stars>0
#################################################################
# #
# ASCII-Chess, written by <NAME> in 2021 #
# #
#################################################################
from time import sleep
import urllib3
# Class used to communicate with a remote player
class Network:
http: urllib3
host: str = 'chess.spiffindustries.com'
filename: str = ''
url: str
player: int
msg: str
lastId: int = None
# Constructor: establish connection string.
def __init__(self):
self.http = urllib3.PoolManager()
if self.host.endswith('/'):
self.url = self.host + self.filename
else:
self.url = self.host + '/' + self.filename
# Assign sides randomly at the beginning of the game
def newGame(self) -> int:
r = self.http.request('GET', self.url + '?newGame')
msg = r.data.decode("utf-8")
if msg == 'white':
self.player = 1
elif msg == 'black':
self.player = 2
else:
raise Exception('Initiating new game failed')
return self.player
def readyToStart(self):
print('Waiting for opponent to join...')
while True:
r = self.http.request('GET', self.url + '?readyToStart')
msg = r.data.decode("utf-8")
if msg == 'WAIT':
sleep(1)
else:
break
return True
# Transmit last move to remote player
def sendMove(self, row1: int, col1: int, row2: int, col2: int) -> str:
r = self.http.request('GET', self.url + '?player=' + str(self.player) +
'&row1=' + str(row1) + '&col1=' + str(col1) +
'&row2=' + str(row2) + '&col2=' + str(col2))
msg = r.data.decode('utf-8')
return msg
# Receiving move transmission from remote player
def receiveMove(self, lastId: int) -> str:
while True:
r = self.http.request('GET', self.url + '?id=' + str(lastId))
msg = r.data.decode('utf-8')
if msg == 'WAIT':
sleep(1)
else:
break
return msg
| StarcoderdataPython |
85145 | import argparse
import asyncio
import logging
import math
import os
import cv2
import numpy
from aiortc import RTCPeerConnection
from aiortc.mediastreams import VideoFrame, VideoStreamTrack
from signaling import CopyAndPasteSignaling
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
OUTPUT_PATH = os.path.join(os.path.dirname(__file__), 'output.png')
def frame_from_bgr(data_bgr):
data_yuv = cv2.cvtColor(data_bgr, cv2.COLOR_BGR2YUV_YV12)
return VideoFrame(width=data_bgr.shape[1], height=data_bgr.shape[0], data=data_yuv.tobytes())
def frame_to_bgr(frame):
data_flat = numpy.frombuffer(frame.data, numpy.uint8)
data_yuv = data_flat.reshape((math.ceil(frame.height * 12 / 8), frame.width))
return cv2.cvtColor(data_yuv, cv2.COLOR_YUV2BGR_YV12)
class ColorVideoStreamTrack(VideoStreamTrack):
def __init__(self, width, height, color):
data_bgr = numpy.zeros((height, width, 3), numpy.uint8)
data_bgr[:, :] = color
self.frame = frame_from_bgr(data_bgr=data_bgr)
async def recv(self):
return self.frame
class CombinedVideoStreamTrack(VideoStreamTrack):
def __init__(self, tracks):
self.tracks = tracks
async def recv(self):
coros = [track.recv() for track in self.tracks]
frames = await asyncio.gather(*coros)
data_bgrs = [frame_to_bgr(frame) for frame in frames]
data_bgr = numpy.hstack(data_bgrs)
return frame_from_bgr(data_bgr)
async def run_answer(pc, signaling):
remote_track = None
@pc.on('track')
def on_track(track):
nonlocal remote_track
assert track.kind == 'video'
remote_track = track
# receive offer
offer = await signaling.receive()
await pc.setRemoteDescription(offer)
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
print('Receiving video, press CTRL-C to stop')
while True:
frame = await remote_track.recv()
data_bgr = frame_to_bgr(frame)
cv2.imwrite(OUTPUT_PATH, data_bgr)
async def run_offer(pc, signaling):
# add video track
width = 320
height = 240
local_video = CombinedVideoStreamTrack(tracks=[
ColorVideoStreamTrack(width=width, height=height, color=BLUE),
ColorVideoStreamTrack(width=width, height=height, color=GREEN),
ColorVideoStreamTrack(width=width, height=height, color=RED),
])
pc.addTrack(local_video)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
# receive answer
answer = await signaling.receive()
await pc.setRemoteDescription(answer)
print('Sending video for 10s')
await asyncio.sleep(10)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Video stream with copy-and-paste signaling')
parser.add_argument('role', choices=['offer', 'answer'])
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
pc = RTCPeerConnection()
signaling = CopyAndPasteSignaling()
if args.role == 'offer':
coro = run_offer(pc, signaling)
else:
coro = run_answer(pc, signaling)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
| StarcoderdataPython |
1776853 | translate = {
'English': {
'main_title': 'Sahm',
'sub_title': 'Intelligent financial system',
'Insert_Data': 'Insert Data',
'Symbol': 'Symbol',
'Prediction_days': 'Prediction days',
'Date_Range': 'Date Range',
'Select_a_range_of_Date': 'Select a range of Date',
'From': 'From',
'until_now': 'until now',
'Period': 'Period',
'Current_price': 'Current price',
'SVM_Accuracy': 'SVM Accuracy',
'SVM_Prediction': 'SVM Prediction',
'LR_Accuracy': 'LR Accuracy',
'LR_Prediction': 'LR Prediction',
'TrendLines': 'TrendLines',
'SMA': 'SMA',
'MACD': 'MACD',
'Month': 'Month',
'Months': 'Months',
'Languages': 'Languages',
'RSI': 'RSI',
'Deep_Learning': 'Deep Learning',
'Deep_Learning_Prediction': 'Deep Learning Prediction'
},
'Persian': {
'main_title': 'سهم',
'sub_title': 'سامانه هوشمند مالی',
'Insert_Data': 'اضافه کردن اطلاعات',
'Symbol': 'نماد',
'Prediction_days': 'روز های پیشبینی',
'Date_Range': 'محدوده زمانی',
'Select_a_range_of_Date': 'محدوده ای از تاریخ را انتخاب کنید',
'From': 'از',
'until_now': 'تا امروز',
'Period': 'دوره',
'Current_price': 'قیمت فعلی',
'SVM_Accuracy': 'SVM دقت',
'SVM_Prediction': 'SVM پیشبینی',
'LR_Accuracy': 'LR دقت',
'LR_Prediction': 'LR پیشبینی',
'TrendLines': 'خطوط روند',
'SMA': 'میانگین متحرک',
'MACD': 'مکدی',
'Month': 'ماه',
'Months': 'ماه ها',
'Languages': 'زبان ها',
'RSI': 'آراسآی',
'Deep_Learning': 'یادگیری عمیق',
'Deep_Learning_Prediction': 'پیشبینی یادگیری عمیق'
}
} | StarcoderdataPython |
30186 | import newspaper
from newspaper import Article
def getarticle(url):
articleurl = url
article = Article(articleurl)
try:
article.download()
article.parse()
alltext = article.text
return alltext
except:
return "this website is not available"
| StarcoderdataPython |
3389774 | <filename>modelscript/metamodels/megamodels/__init__.py
# # coding=utf-8
#
METAMODEL=None # Will be filled by
def _setMetaModel(m2):
global METAMODEL
METAMODEL=m2 | StarcoderdataPython |
1710070 | import sys
sys.path.insert(0, './src/uri/basic')
import unittest
from basic_1007 import SimpleDiference
class TestSimpleDiference(unittest.TestCase):
def setUp(self) -> None:
self.test = SimpleDiference()
def test(self):
self.assertEqual(self.test.solution(5, 6, 7, 8), 'DIFERENCA = -26')
self.assertEqual(self.test.solution(0, 0, 7, 8), 'DIFERENCA = -56')
self.assertEqual(self.test.solution(5, 6, -7, 8), 'DIFERENCA = 86')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4809476 | <reponame>JeyDi/E-Grid<gh_stars>1-10
import csv
import urllib
import names
import requests
import yaml
with open('../maps/azuremaps_config.yml', encoding="utf8") as file:
configs = yaml.load(file, Loader=yaml.FullLoader)
url = "https://atlas.microsoft.com/search/address/json?"
i = 900
addresses = {}
with open("hosp_and_others.csv", "rt", encoding="utf8") as inf, \
open("../data/h_addresses.csv", "wt", encoding="utf8") as addf, \
open("../data/hospitals.csv", "wt", encoding="utf8") as outf:
reader = csv.reader(inf)
awriter = csv.writer(addf)
hwriter = csv.writer(outf)
hospitals = {}
params = {'subscription-key': configs['subscription_key'], "api-version": "1.0"}
for line in reader:
if not line[5] or line[5][0] not in ["3"]:
continue
name = line[6].title()
if "osp" not in name.lower():
continue
if name in hospitals:
continue
hospitals[name] = []
address = line[11].title()
zipcode = line[13]
city = line[14].title()
try:
params['query'] = ", ".join([name, address, zipcode, city])
print(", ".join([name, address, zipcode, city]))
encoded = urllib.parse.urlencode(params)
request_url = url + encoded
response = requests.get(request_url)
results = response.json()['results']
latitude = results[0]['position']['lat']
longitude = results[0]['position']['lon']
i +=1
hospitals[name] = [name, i, names.get_full_name(), "Hospital"]
awriter.writerow([i, address.replace("\"", "").replace(",", ""), city, "Lombardia", zipcode, "Italy", latitude, longitude])
hwriter.writerow(hospitals[name])
except Exception as message:
print(f"Impossibile to get information because: {message}")
| StarcoderdataPython |
81405 | """ct.py: Constant time(ish) functions"""
# WARNING: Pure Python is not amenable to the implementation of truly
# constant time cryptography. For more information, please see the
# "Security Notice" section in python/README.md.
def select(subject, result_if_one, result_if_zero):
# type: (int, int, int) -> int
"""Perform a constant time(-ish) branch operation"""
return (~(subject - 1) & result_if_one) | ((subject - 1) & result_if_zero)
| StarcoderdataPython |
157548 | # -*- coding: utf-8 -*-
__author__ = 'Администратор'
from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
import random
def test_add_contact_in_group(app, db):
db = ORMFixture(host="127.0.0.1", name = "addressbook", user="root", password="")
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(last="test"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.contact.add_contact_to_group(contact.id, group)
app.contact.show_contacts_in_group(group)
list_contacts = app.contact.get_contact_list()
def clean(contact):
return Contact(id = contact.id, first = contact.first.strip(), last = contact.last.strip())
db_list = map(clean, db.get_contacts_in_group(group))
assert sorted(list_contacts, key = Contact.id_or_max) == sorted(db_list, key = Contact.id_or_max)
| StarcoderdataPython |
36268 | <gh_stars>1-10
import datetime
from typing import List, Union
import numpy as np
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.types import Date, Float, Integer, Numeric, String
import omicron
from omicron import db
from omicron.client.quotes_fetcher import get_valuation
class Valuation(db.Model):
__tablename__ = "valuation"
id = db.Column(Integer, primary_key=True)
code = db.Column(String, nullable=False)
pe = db.Column(Float)
turnover = db.Column(Float)
pb = db.Column(Float)
ps = db.Column(Float)
pcf = db.Column(Float)
capital = db.Column(Numeric)
market_cap = db.Column(Numeric)
circulating_cap = db.Column(Numeric)
circulating_market_cap = db.Column(Numeric)
pe_lyr = db.Column(Float)
frame = db.Column(Date, nullable=False)
types = {
"code": "O",
"pe": "f4",
"turnover": "f4",
"pb": "f4",
"ps": "f4",
"pcf": "f4",
"capital": "f4",
"market_cap": "f4",
"circulating_cap": "f4",
"circulating_market_cap": "f4",
"pe_lyr": "f4",
"frame": "O",
}
@classmethod
async def get(
cls,
codes: Union[List[str], str],
frame: datetime.date,
fields: List[str] = None,
n: int = 1,
) -> np.array:
"""获取一支或者多支证券的直到`date`的`n`条数据
尽管本函数提供了同时查询多支证券、多个日期市值数据的能力,但为后续处理方便,建议一次仅
查询多支证券同一日的数据;或者一支证券多日的数据。
请调用者保证截止`date`日,证券已存在`n`条市值数据。否则,每次调用都会产生一次无效的数据
库查询:函数在查询数据库后,得不到满足条件的n条数据(无论此前数据是否存到本地,都不满足
此条件),查询无效,还要再次请求上游服务器,但上游服务器的答复数据很可能就是在数据库中
已存在的数据。
无论查询条件如果,返回数据均为numpy structured array。证券代码和日期为该array的index,
记录按date字段升序排列。有多只证券的,证券之间顺序由上游服务器决定。
Args:
codes (Union[List[str], str]): [description]
frame (datetime.date): [description]
fields (List[str]): if None, then returns all columns/fields from
database/remote
n (int):
Returns:
np.array: 返回数据为numpy structured array数组,包含以下字段:
"code", "pe","turnover","pb","ps","pcf","capital","market_cap",
"circulating_cap","circulating_market_cap","pe_lyr", "date",
"""
if omicron.has_db():
fields = fields or [
"code",
"pe",
"turnover",
"pb",
"ps",
"pcf",
"capital",
"market_cap",
"circulating_cap",
"circulating_market_cap",
"pe_lyr",
"frame",
]
if isinstance(codes, str):
codes = [codes]
# 通过指定要查询的字段(即使是全部字段),避免了生成Valuation对象
query = (
cls.select(*fields).where(cls.code.in_(codes)).where(cls.frame <= frame)
)
query = query.order_by(cls.frame.desc()).limit(len(codes) * n)
records = await query.gino.all()
if records and len(records) == n * len(codes) and records[0].frame == frame:
return cls.to_numpy(records, fields)[::-1]
# if no db connection, or no result from database, then try remote fetch
return await get_valuation(codes, frame, fields, n)
@classmethod
def to_numpy(cls, records: List, fields: List[str]) -> np.array:
"""将数据库返回的查询结果转换为numpy structured array
Args:
records (List): [description]
keys (List[str]): [description]
Returns:
np.array: [description]
"""
dtypes = [(name, cls.types[name]) for name in fields]
return np.array(
[tuple(rec[name] for name in fields) for rec in records], dtype=dtypes
)
@classmethod
async def save(cls, recs: np.array):
data = [dict(zip(recs.dtype.names, x)) for x in recs]
qs = insert(cls.__table__).values(data)
return await (
qs.on_conflict_do_update(
index_elements=[cls.code, cls.frame],
set_={col: qs.excluded[col] for col in recs.dtype.names},
)
.returning(cls.id)
.gino.all()
)
@classmethod
async def get_circulating_cap(cls, code: str, frame: datetime.date, n: int):
fields = ["frame", "circulating_cap"]
return await cls.get(code, frame, fields, n)
@classmethod
async def truncate(cls):
"""truncate table in database."""
# db.bind: `https://python-gino.org/docs/en/master/explanation/engine.html`
await db.bind.status(f"truncate table {cls.__tablename__}")
| StarcoderdataPython |
1631659 | <reponame>guanidene/pySudokuSolver
# -*- coding: UTF-8 -*-
"""This is a very important module contributing to both gui and logic.
The attribute LabelsRestrictionCount of the class SudkouCell is a very
important attribute in the working of this app."""
__author__ = u"पुष्पक दगड़े (<NAME>)"
import sys
from PyQt4 import QtGui
class SudokuCell(QtGui.QLabel):
"""Instances of this class will act as (graphical) containers for
labels. Scrolling on an instance will rotate through its permissible
labels"""
# Define constants common to all the instances of the class here.
Labels = ('', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# You can modify the tuple 'Labels' in such a way that -
# 1. Its consists of exactly 10 elements.
# 2. Its first element is necessarily a '' (an empty string).
# 3. All other 9 elements are also strings.
# Ex. Labels = ('','A','B','C','D','E','F','G','H','I') will work.
# Font size, relative to cell size, when mouse pointer
# enters & leaves the cell respectively
FONT_SIZE_FACTOR_MOUSE_OVER = 1.0
FONT_SIZE_FACTOR_NORMAL = 0.8
def __init__(self, *args, **kwargs):
"""
"""
"""The attribute LabelsRestrictionsCount is a very important attribute
of this class. It is a dictionary object whose keys (string) are the
items of the tuple SudokuCell.Labels and their values (int) represent
the number of restrictions on them. These values are initialized to 0.
This attribute helps the method setSudokuCellLabel of the class
SudokuPanel to decide whether or not a Label can be set to a
sudokucell depending on the value corresponding to the key-Label
(in the dict LabelsRestrictionsCount). If this value is greater than
0, then it means that there is a restriction on setting the Label to
the sudokucell (because some other sudokucell in its row, col or box
has the same label and setting the same label more than once in
a row,col or box is not permissible according to the rules of the game)
and so the method setSudokuCellLabel will not allow setting this
label on the sudokucell.
The first key of this dict '' (an empty string) is a special key,
whose corresponding value needs to be kept 0 always. This is because
there can be any number of 'empty' sudokucells in a row, col or box
ie there is no restriction on keeping a sudokucell empty.
Thus, this attribute (of every sudokucell) in conjunction with
setSudokuCellLabel method help in following the game rules tightly.
Note: The values of the dict LabelRestrictionsCount attribute
does not always represent the 'exact' no of restrictions, many a
times it represents more than the actual number of restrictions (but
never less), but this does not affect the working!
(The reason for this is documentated in the setSudokuCellLabel method
of the class SudokuPanel.)
"""
super(self.__class__, self).__init__(*args, **kwargs)
self.LabelRestrictionsCount = dict(zip(SudokuCell.Labels,
[0] * len(SudokuCell.Labels)))
# The above statement is equivalent to -
# self.LabelRestrictionsCount = {'':0,'1':0,'2':0,'3':0,'4':0,
# '5':0,'6':0,'7':0,'8':0,'9':0}
# Although the former has lesser clarity, you won't have to change
# it even if you change SudokuCell.Labels which you will have to
# change in the later case.
def savePosition(self):
"""Get the x and y positions of the cell w.r.t the entire grid"""
# Created a separate method for this task, since objectName is not
# availabe during initialization of the QLabel
# assuming name of SudokuCell class instances is of format label_x_y
# where, x=row index of SudokuCell &
# y=col index of SudokuCell
self.posx = int(self.objectName()[-3])
self.posy = int(self.objectName()[-1])
# Note:
# The following could be a more better way to do the above task:
# One can get the position of a wiget in its parent layout; this can be
# used to directly get the x and y positions; This will help avoid
# setting objectNames for every label individually.
def isEmpty(self):
"""Return 1 if label is '' (empty string) else return 0."""
return self.text() == ''
def isFilled(self):
"""Return not self.isEmpty()."""
return not self.text() == ''
def isLabelPermissible(self, label):
"""Return 1 if value of the key 'label' in the dict
LabelsRestrictionsCount is 0 else return 0. If 'label'
is not a key in the dict LabelRestrictionsCount then
return 0."""
label = str(label).strip()
try:
return self.LabelRestrictionsCount[label] == 0
except Exception:
# will get an exception if label not in self.LabelRestrictionsCount
print '[Got label "%s" which is not in SudokuCell.Labels]' \
% label, sys.stderr
return 0
def labelsPermissible(self):
"""Return a tuple of labels whose corresponding
values in the dict LabelRestrictionsCount is 0."""
# Note: This will always return '' (empty string)
# as its 1st permissible label.
return tuple([label for label in self.LabelRestrictionsCount
if self.LabelRestrictionsCount[label] == 0])
def resizeEvent(self, evt):
"""Scale the font according to the (new) size of the widget."""
font = self.font()
font.setPixelSize(SudokuCell.FONT_SIZE_FACTOR_NORMAL *
min(self.height(), self.width()))
self.setFont(font)
def wheelEvent(self, event):
"""
Set labels to the sudokucell through the list
of its permissible labels in a rotating fashion.
If the scroll event is a scroll up event, then set the next label
from the list of permissible labels else set the previous label
in the list if the scroll event is a scroll down event.
"""
labels = list(SudokuCell.Labels)
index = labels.index(self.text())
labels = labels[index + 1:] + labels[:index]
# If wheelup, try increment, else if wheeldown, try decrement
if event.delta() < 0:
labels.reverse()
for key in labels:
if self.LabelRestrictionsCount[key] == 0:
self.parent().setSudokuCellLabel(self, key)
break
def enterEvent(self, event):
"""Increase the font size of the sudokucell & make it bold when
mouse enters."""
font = self.font()
font.setPixelSize(SudokuCell.FONT_SIZE_FACTOR_MOUSE_OVER *
min(self.height(), self.width()))
font.setBold(True)
self.setFont(font)
def leaveEvent(self, event):
"""Restore the font size of the sudokucell when mouse leaves."""
font = self.font()
font.setPixelSize(SudokuCell.FONT_SIZE_FACTOR_NORMAL *
min(self.height(), self.width()))
font.setBold(False)
self.setFont(font)
| StarcoderdataPython |
3260594 | <reponame>jefalexa/custom_modules<filename>src/jefalexaudf/__init__.py
import pandas as pd
import numpy as np
import datetime as dt
import os, sys
import re
import logging
import ipywidgets as widgets
from ipywidgets import interact, interact_manual, Button, Box, Layout, interactive, fixed, interact_manual
from IPython.display import clear_output
dir_home_options = ["/home/jovyan/work/", "/Users/jefalexa/"]
for dir_home in dir_home_options:
if bool(re.match(dir_home, os.getcwd())):
break
else:
continue
dir_clipboard = os.path.join(dir_home, "Box Sync/data_repo/interim/clipboard")
def check_match(x, y, Match_Case=True):
'''Check if variable (x) matches regex pattern (y). Return True, False or N/A'''
try:
if Match_Case:
pattern = re.compile(y)
else:
pattern = re.compile(y, flags=re.IGNORECASE)
return(bool(re.search(pattern=pattern, string=x)))
except:
return("N/A")
def usd_to_float(test_string):
'''Turn a string representing a dollar amount into a float. '''
pattern = re.compile("(\$)|(USD \$)|(USD)")
try:
split = re.split(pattern, test_string)
return(float(split[-1]))
except:
return(0)
def get_fy_info(date, calendar_fy, field=''):
'''Returns the fiscal calendar information for a given date
INPUTS:
date='%Y-%m-%d'
calendar_fy=DataFrame with Fiscal Information, generaly saved in Interim folder
field=If a valid field from the DF is listed, return just the value, if not, return the entire DF
'''
f1 = calendar_fy['Fiscal Week Start Date'] <= date
f2 = calendar_fy['Fiscal Week End Date'] >= date
if field in calendar_fy.columns:
return(calendar_fy.loc[f1&f2, field].to_list()[0])
else:
return(calendar_fy.loc[f1&f2, :])
def local_find(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
file_list.append(file)
return(file_list)
except:
file_list = []
return(file_list)
def local_find_recent(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
fts_min = 0
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
if ((fts_min < fts) | (fts_min == 0)):
file_list = [file, f2, fdt]
fts_min = fts
return(file_list)
except:
print("Error")
file_list = []
return(file_list)
def local_find_to_df(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fsize = os.stat(f2).st_size
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
#print(file, fsize, fdt)
file_list.append([file, fsize, fdt])
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
except:
print("Error")
file_list = []
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
def local_find_dir(working_dir):
'''Returns a list of root directories in a given directory'''
directory_list = []
for name in os.listdir(working_dir):
if os.path.isdir(os.path.join(working_dir, name)):
directory_list.append(os.path.join(working_dir, name))
return(directory_list)
def interactive_file_saveloc(dir_list, search_pattern):
output_file = ""
def test01(dir_input=dir_list, search_pattern=fixed(search_pattern)):
file_df = local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)
file_list = file_df['Filename'].tolist()
file_list.insert(0, "")
interact(test02, file_picker="{}".format(dt.datetime.strftime(dt.datetime.now(), '%m%d%Y_%H%M')), dir_input=fixed(dir_input), file_df=fixed(file_df))
def test02(file_picker, dir_input, file_df):
global interactive_file_saveloc_output
interactive_file_saveloc_output = [file_picker, os.path.join(dir_input, file_picker), dir_input]
if len(file_picker) > 0:
print(output_file)
return(file_df.loc[file_df['Filename'].apply(lambda x: check_match(x, file_picker)) == True ] )
else:
return(file_df)
interact(test01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def interactive_table_frame(df):
col_list = df.select_dtypes('object').columns
val_list = df.select_dtypes('float').columns
def itf01(Filter1_Name, Filter2_Name, col_list, val_list):
l1 = df[Filter1_Name].sort_values().unique().tolist()
l1.insert(0, 'ANY')
l1.insert(1, '')
l2 = df[Filter2_Name].sort_values().unique().tolist()
l2.insert(0, 'ANY')
interact(test02, Filter1_Value='ANY', Filter2_Value='ANY', SortBy=df.columns, Ascending=[True, False], Clipboard=[False, True], Filter1_Name=fixed(Filter1_Name), Filter2_Name=fixed(Filter2_Name))
def test02(Filter1_Value, Filter2_Value, SortBy, Ascending, Clipboard, Filter1_Name, Filter2_Name):
try:
if Filter1_Value == 'ANY':
pdata1 = df
else:
#pattern = re.compile(r"{}".format(Filter1_Value))
pdata1 = df.loc[df[Filter1_Name].apply(lambda x: check_match(x, Filter1_Value)) == True]
if Filter2_Value == 'ANY':
pdata2 = pdata1
else:
#pattern = re.compile(r"{}".format(Filter2_Value))
pdata2 = pdata1.loc[pdata1[Filter2_Name].apply(lambda x: check_match(x, Filter2_Value)) == True]
pdata3 = pdata2.sort_values(SortBy, ascending=Ascending)
if Clipboard:
pdata3.to_clipboard(index=False)
global interactive_table_frame_output
interactive_table_frame_output = pdata3
return(pdata3)
except:
print("Make a selection")
interact(itf01, Filter1_Name=col_list, Filter2_Name=col_list, col_list=fixed(col_list), val_list=fixed(val_list))
def interactive_tabs(df):
global tab_contents
global tab
#tab_contents = df.columns.sort_values()
tab_contents = df.columns
children = []
for name in tab_contents:
try:
l1 = df[name].dropna().sort_values().unique().tolist()
l1.insert(0, '')
if df[name].dtype == (float or int):
f1 = widgets.HBox([widgets.Label(name), widgets.FloatRangeSlider(value=[df[name].min(), df[name].max()], min=df[name].min(), max=df[name].max(), step=1, disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.0f', ) ])
else:
if len(l1) <= 20:
f1 = widgets.HBox([widgets.Label(name), widgets.SelectMultiple(options=l1, disabled=False) ])
else:
#f1 = widgets.Text(value='.*',placeholder='.*',description='Filter: ',disabled=False)
f1 = widgets.HBox([widgets.Label(name), widgets.Text(value='.*',placeholder='.*',disabled=False) ])
children.append(f1)
except:
print("Error on {}".format(name))
tab = widgets.Tab()
tab.children = children
for i in range(len(children)):
tab.set_title(i, tab_contents[i])
return(tab)
def interactive_tabs_display(df1):
index_num = 0
total_len = len(df1)
for index_num in range(0, len(tab_contents)):
tname = tab_contents[index_num]
tval = tab.children[index_num].children[1].value
if tval:
vt = type(tval)
if vt == type(tuple()):
if df1[tname].dtype == (float or int):
if ((tab.children[index_num].children[1].min == tval[0]) & (tab.children[index_num].children[1].max == tval[1])):
continue
else:
f1 = df1[tname] >= tval[0]
f2 = df1[tname] <= tval[1]
df1 = df1.loc[f1&f2, :]
print("____________\n{} Min: {} - Max: {}".format(tname, tval[0], tval[1]))
print("Matched {} entries".format(len(df1)))
else:
if tval == ('',):
continue
else:
f1 = df1[tname].isin(tval)
df1 = df1.loc[f1, :]
print("____________\n{} {}".format(tname, tval))
print("Matched {} entries".format(len(df1)))
else:
if tval == '.*':
continue
else:
Match_Case = True
df1 = df1.loc[df1[tname].apply(lambda x: check_match(x, tval, Match_Case)) == True]
print("____________\n{}: '{}' Matched:\n".format(tname, tval), df1[tname].value_counts())
print("____________\n", "Matched {} of {} entries".format(len(df1), total_len))
return(df1)
def to_myclip(df):
date_str = dt.datetime.strftime(dt.datetime.now(), '%m-%d-%y_%H%M%S')
file_name = "clipboard_{}.csv".format(date_str)
file = os.path.join(dir_clipboard, file_name)
df.to_csv(file)
print("Saved: {}".format(file))
def read_myclip():
file = local_find_recent(dir_clipboard, x=".*.csv")[1]
df = pd.read_csv(file, index_col=0)
return(df)
class file_picker():
'''
Create a file_picker object mypicker01, then call mypicker01.interactive_file_picker(dir_list=['./', '../'], search_pattern=".*") to pick files from a set of directories.
Then reference file name, full file path and the directory as mypicker01.file_name, mypicker01.file_path, mypicker01.file_dir. Or all three as mypicker01.interactive_file_picker_output
'''
def __init__(self, dir_list=['./', '../'], search_pattern=".*"):
self.file = ""
self.interactive_file_picker_output = []
self.file_name = ""
self.file_path = ""
self.file_dir = ""
self.dir_list = []
self.search_pattern = ""
self.dir_list = dir_list
self.search_pattern = search_pattern
def select(self):
dir_list = self.dir_list
search_pattern = self.search_pattern
def ifp_sub01(dir_input, search_pattern):
file_list = self.__local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)['Filename'].tolist()
file_list.insert(0, "")
interact(ifp_sub02, dir_input=fixed(dir_input), file_picker=file_list, search_pattern=fixed(search_pattern))
def ifp_sub02(dir_input, file_picker, search_pattern):
self.file = os.path.join(dir_input, file_picker)
if len(file_picker) > 0:
file_path = os.path.join(dir_input, self.file)
if os.path.isdir(file_path):
print("'{}' added to directory list. Reload select function.".format(file_path))
self.dir_list.append(file_path)
else:
self.interactive_file_picker_output = [file_picker, self.file, dir_input]
self.file_name, self.file_path, self.file_dir = [file_picker, self.file, dir_input]
print(" File: {}\n Path: {}\n Size: {}\n Modified: {}".format(file_picker, self.file, os.stat(self.file).st_size, dt.datetime.strftime(dt.datetime.fromtimestamp(os.stat(self.file).st_mtime), '%m-%d-%y %H:%M')))
else:
return(self.__local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False))
interact(ifp_sub01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def __local_find_to_df(self, working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fsize = os.stat(f2).st_size
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
#print(file, fsize, fdt)
file_list.append([file, fsize, fdt])
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
except:
print("Error")
file_list = []
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
class interactive_tabs():
def __init__(self, df):
self.tab_contents = []
self.tab = widgets.Tab()
self.df = df
self.cols = df.columns
def select_columns(self):
f1 = widgets.HBox([widgets.Label("Columns"), widgets.SelectMultiple(options=self.df.columns, value=tuple(self.df.columns), disabled=False) ])
def handle_col_change(change):
self.cols = list(f1.children[1].value)
button = widgets.Button(description="Apply")
output = widgets.Output()
with output:
display(self.select())
def on_button_clicked(b):
with output:
self.cols = list(f1.children[1].value)
clear_output(wait=True)
display(self.select())
f1.children[1].observe(on_button_clicked, names='value')
display(f1, output)
def select(self):
self.tab_contents = self.cols
children = []
for name in self.tab_contents:
try:
l1 = self.df[name].dropna().sort_values().unique().tolist()
l1.insert(0, '')
if self.df[name].dtype == (float or int):
f1 = widgets.HBox([widgets.Label(name), widgets.FloatRangeSlider(value=[self.df[name].min(), self.df[name].max()], min=self.df[name].min(), max=self.df[name].max(), step=1, disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.0f', ) ])
else:
if len(l1) <= 30:
f1 = widgets.HBox([widgets.Label(name), widgets.SelectMultiple(options=l1, disabled=False) ])
else:
f1 = widgets.HBox([widgets.Label(name), widgets.Text(value='.*',placeholder='.*',disabled=False) ])
children.append(f1)
except:
print("Error on {}".format(name))
self.tab.children = children
for i in range(len(children)):
self.tab.set_title(i, self.tab_contents[i])
display(self.tab)
def display(self):
index_num = 0
df1 = self.df[self.cols]
total_len = len(df1)
for index_num in range(0, len(self.tab_contents)):
tname = self.tab_contents[index_num]
tval = self.tab.children[index_num].children[1].value
if tval:
vt = type(tval)
if vt == type(tuple()):
if df1[tname].dtype == (float or int):
if ((self.tab.children[index_num].children[1].min == tval[0]) & (self.tab.children[index_num].children[1].max == tval[1])):
df1 = df1
else:
f1 = df1[tname] >= tval[0]
f2 = df1[tname] <= tval[1]
df1 = df1.loc[f1&f2, :]
print("____________\n{} Min: {} - Max: {}".format(tname, tval[0], tval[1]))
print("Matched {} entries".format(len(df1)))
else:
if tval == ('',):
continue
else:
f1 = df1[tname].isin(tval)
df1 = df1.loc[f1, :]
print("____________\n{} {}".format(tname, tval))
print("Matched {} entries".format(len(df1)))
else:
if tval == '.*':
df1 = df1
else:
Match_Case = True
df1 = df1.loc[df1[tname].apply(lambda x: check_match(x, tval, Match_Case)) == True]
print("____________\n{}: '{}' Matched:\n".format(tname, tval), df1[tname].value_counts())
print("____________\n", "Matched {} of {} entries".format(len(df1), total_len))
return(df1)
def datetime_from_exceldate(excel_date):
if type(excel_date) == int:
excel_date = excel_date
elif type(excel_date) == float:
if excel_date > 0:
excel_date = int(excel_date)
else:
return("NA")
elif type(excel_date) == str:
if excel_date.isnumeric():
excel_date = int(excel_date)
else:
return("Error")
else:
return("NA")
return(dt.datetime.fromordinal(dt.datetime(1900, 1, 1).toordinal() + excel_date - 2))
def datetime_plus_years(date, years):
try:
return(date + dt.timedelta(days=years*365))
except:
return("NA")
def datetime_from_string(x):
try:
return(dt.datetime.strptime(x, "%m/%d/%y"))
except:
return("NA")
def datetime_to_year(date):
try:
return(date.year)
except:
return("NA")
def datetime_to_string(date):
try:
return(dt.datetime.strftime(date, "%m/%d/%y"))
except:
return("NA")
| StarcoderdataPython |
3367633 | <gh_stars>1-10
import os
from modelmapper.ui import get_user_choice, get_user_input, YES_NO_CHOICES, split_user_input
from modelmapper.misc import _validate_file_has_start_and_end_lines, load_toml, write_settings
from modelmapper.base import OVERRIDES_FILE_NAME
TRAINING_CSV_MSG = """
Please provide the relative path to the training csv files.
example: training1.csv
example2: "training1.csv","training2.csv"
"""
def _is_valid_path(user_input, setup_dir):
full_path = os.path.join(setup_dir, user_input)
return os.path.exists(full_path)
def _validate_csv_path(user_input, setup_dir):
user_inputs = split_user_input(user_input)
for _inp in user_inputs:
if not _is_valid_path(_inp, setup_dir):
return False
return True
def initialize(path):
"""
Initialize a ModelMapper setup for a model
This creates the setup template that you can use to train your model.
"""
identifier = os.path.basename(path)
setup_dir = os.path.dirname(path)
setup_path = os.path.join(setup_dir, f'{identifier}_setup.toml')
current_dir = os.path.dirname(os.path.abspath(__file__))
template_setup_path = os.path.join(current_dir, '../modelmapper/templates/setup_template.toml')
settings = load_toml(template_setup_path)['settings']
overrides_file_name = OVERRIDES_FILE_NAME.format(identifier)
overrides_path = os.path.join(setup_dir, overrides_file_name)
if os.path.exists(setup_path):
get_user_choice(f'{setup_path} already exists. Do you want to overwrite it?', choices=YES_NO_CHOICES)
if os.path.exists(overrides_path):
get_user_choice(f'{overrides_path} already exists. Do you want to overwrite it?', choices=YES_NO_CHOICES)
with open(overrides_path, 'w') as the_file:
the_file.write('# Overrides filse. You can add your overrides for any fields here.')
output_model_file = get_user_input('Please provide the relative path to the existing ORM model file.',
validate_func=_is_valid_path, setup_dir=setup_dir)
settings['output_model_file'] = output_model_file
output_model_path = os.path.join(setup_dir, output_model_file)
if not _validate_file_has_start_and_end_lines(user_input=None, path=output_model_path, identifier=identifier):
get_user_input(f'Please add the lines in a proper place in {output_model_file} code and enter continue',
_validate_file_has_start_and_end_lines, path=output_model_path, identifier=identifier)
training_csvs = get_user_input(TRAINING_CSV_MSG,
validate_func=_validate_csv_path, setup_dir=setup_dir)
settings['training_csvs'] = split_user_input(training_csvs)
write_settings(setup_path, settings)
print(f'{setup_path} is written.')
print(f'Please verify the generated settings and then train the model by running:\nmodelmapper run {identifier}_setup.toml') # NOQA
| StarcoderdataPython |
3258042 | <gh_stars>1-10
"""Tests for our `watches cluster_health` subcommand."""
import json
from subprocess import PIPE, Popen as popen
from secure_support import TestSecureSupport
from watches.util import ESClientProducer
from six import string_types
class TestClusterHealth(TestSecureSupport):
username_password = ['--username', 'kirk', '--password', '<PASSWORD>']
def test_returns_json(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(output.count("\n") > 1)
o = json.loads(output)
self.assertTrue(len(o) == 15)
self.assertTrue('status' in o)
self.assertTrue('cluster_name' in o)
self.assertTrue('number_of_nodes' in o)
def test_returns_single_line_json(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '-l'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(output.count("\n") == 1)
self.assertTrue('status' in output)
self.assertTrue('cluster_name' in output)
self.assertTrue('number_of_nodes' in output)
# Test unbuffered output, see #20
# In fact we only test that the code can pass through this without issues
# but we do not test the effect of the buffer size. This is at least useful
# when testing against different versions of Python ('cos it depends on low level API).
def test_returns_single_line_unbuffered_json(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '-lb'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(output.count("\n") == 1)
self.assertTrue('status' in output)
self.assertTrue('cluster_name' in output)
self.assertTrue('number_of_nodes' in output)
def test_returns_cluster_health_with_sniffing(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--sniff'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue(len(o) == 15)
self.assertTrue('status' in o)
self.assertTrue('cluster_name' in o)
self.assertTrue('number_of_nodes' in o)
def test_returns_cluster_health_with_verbose(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--verbose'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
# Unless we index some data to cluster we can only check the indices field is present
self.assertTrue('Supplied options' in output)
def test_returns_cluster_health_with_timestamp(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--timestamp'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
# Unless we index some data to cluster we can only check the indices field is present
o = json.loads(output)
self.assertTrue(len(o) == 16)
self.assertTrue('timestamp' in o)
def test_returns_cluster_health(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue('cluster_name' in o)
self.assertTrue('number_of_nodes' in o)
self.assertTrue('number_of_data_nodes' in o)
self.assertTrue('active_primary_shards' in o)
self.assertTrue('active_shards' in o)
self.assertTrue('relocating_shards' in o)
self.assertTrue('initializing_shards' in o)
self.assertTrue('unassigned_shards' in o)
# These are not found in object unless we explicitly use option
self.assertTrue('indices' not in o)
self.assertTrue('shards' not in o)
self.assertTrue('timestamp' not in o)
def test_returns_cluster_health_with_indices(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--level=indices'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue('indices' in o)
self.assertTrue('shards' not in o)
def test_returns_cluster_health_with_shards(self):
es = ESClientProducer.create_client(
self.options_from_list(self.appendSecurityCommands([]))
)
es.create(index='i', doc_type='t', id='1', body={}, ignore=409, refresh=True)
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--level=shards'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue('indices' in o)
self.assertTrue('shards' in o['indices']['i'])
def test_returns_cluster_health_listed_indices(self):
es = ESClientProducer.create_client(
self.options_from_list(self.appendSecurityCommands([]))
)
# make sure we have at least two indices in cluster
es.create(index='i', doc_type='t', id='1', body={}, ignore=409, refresh=True)
es.indices.delete(index='i2', ignore=404)
es.create(index='i2', doc_type='t', id='1', body={'settings':{'number_of_shards':5}}, ignore=409, refresh=True)
# First, we check that number of shards for index "i2" is 5 and then...
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--index=i2'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue('active_primary_shards' in o)
self.assertTrue(o['active_primary_shards'] == 5)
# ... next, we test that number of shards for whole cluster is higher.
cmd = self.appendSecurityCommands(['watches', 'cluster_health'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue('active_primary_shards' in o)
self.assertTrue(o['active_primary_shards'] > 5)
def test_returns_cluster_health_with_shards_filtered(self):
es = ESClientProducer.create_client(
self.options_from_list(self.appendSecurityCommands([]))
)
es.create(index='i', doc_type='t', id='1', body={}, ignore=409, refresh=True)
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--level=shards',
'-f status', '-f *.*.status', '-f indices.*.shards.*.status'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue(len(o) == 2)
self.assertTrue('status' in o)
self.assertTrue('indices' in o)
self.assertTrue(len(o['indices']['i']) == 2)
self.assertTrue('status' in o['indices']['i'])
self.assertTrue('shards' in o['indices']['i'])
self.assertTrue(len(o['indices']['i']['shards']['0']) == 1)
self.assertTrue('status' in o['indices']['i']['shards']['0'])
def test_ca_cert_only(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(TestClusterHealth.username_password)
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue(len(o) == 15)
self.assertTrue('status' in o)
self.assertTrue('cluster_name' in o)
self.assertTrue('number_of_nodes' in o)
def test_ca_cert_only_and_headers(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(TestClusterHealth.username_password)
cmd.extend(['--header', 'X-Foo: foo', '--header', 'X-Bar: bar'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
self.assertTrue(len(o) == 15)
self.assertTrue('status' in o)
self.assertTrue('cluster_name' in o)
self.assertTrue('number_of_nodes' in o)
# negative tests to see if we get Usage: message for bogus arguments
def test_username_no_password(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(['--username', 'junk'])
output = popen(cmd, stderr=PIPE).communicate()[1].decode('ascii')
self.assertTrue('Usage:' in output)
def test_password_no_username(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(['--password', '<PASSWORD>'])
output = popen(cmd, stderr=PIPE).communicate()[1].decode('ascii')
self.assertTrue('Usage:' in output)
def test_cert_no_key(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(['--cert', './junk'])
output = popen(cmd, stderr=PIPE).communicate()[1].decode('ascii')
self.assertTrue('Usage:' in output)
def test_key_no_cert(self):
cmd = self.appendOnlyCAcert(['watches', 'cluster_health'])
cmd.extend(['--key', './junk'])
output = popen(cmd, stderr=PIPE).communicate()[1].decode('ascii')
self.assertTrue('Usage:' in output)
def test_bogus_transform_value(self):
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--transform=bogus'])
output, errout = popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
self.assertRegexpMatches(errout.decode('ascii'), '(?ms)ERROR:.*RuntimeError: Unsupported transform type')
def test_returns_cluster_health_nested(self):
es = ESClientProducer.create_client(
self.options_from_list(self.appendSecurityCommands([]))
)
es.create(index='i', doc_type='t', id='1', body={}, ignore=409, refresh=True)
cmd = self.appendSecurityCommands(['watches', 'cluster_health', '--transform=nested', '--level=shards'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
o = json.loads(output)
# Indices is an array
self.assertTrue('indices' in o)
indices = o['indices']
self.assertTrue(isinstance(indices, list))
self.assertTrue(len(indices) > 0)
for index in indices:
# Each item in indices array must be dictionary
self.assertTrue(isinstance(index, dict))
# Each item must contain 'index' field which is expected to hold index name (thus string type)
self.assertTrue('index' in index)
self.assertTrue(isinstance(index['index'], string_types))
# Each index must contains shards array
self.assertTrue('shards' in index)
shards = index['shards']
self.assertTrue(isinstance(shards, list))
for shard in shards:
# Each item in shards array must be dictionary
self.assertTrue(isinstance(shard, dict))
self.assertTrue('shard' in shard)
# shard id is int type, not string
self.assertTrue(isinstance(shard['shard'], int))
| StarcoderdataPython |
97440 | """clean-room implementation of a mysql client supporting both *connect* and *quit* operations"""
import datetime
import socket
import struct
import sys
import time
from hashlib import sha1
from . import compat
import tornado.gen
import tornado.iostream
def _sxor(lhs, rhs):
if sys.version_info > (3, 0):
return b''.join(compat.bchr(a ^ b) for a, b in zip(lhs, rhs))
else:
return b''.join(compat.bchr(ord(a) ^ ord(b)) for a, b in zip(lhs, rhs))
def _stupid_hash_password(salt, password):
password = <PASSWORD>('<PASSWORD>')
salt = salt.encode('utf-8')
return _sxor(
sha1(password).digest(),
sha1(
salt + sha1(sha1(password).digest()).digest()
).digest()
)
def _read_lenc(buf, offset=0):
first = struct.unpack('B', buf[offset:offset + 1])[0]
if first < 0xfb:
return first, offset + 1
elif first == 0xfc:
return struct.unpack('<H', buf[offset + 1:offset + 3])[0], offset + 3
elif first == 0xfd:
return struct.unpack('<I', buf[offset + 1:offset + 4] + b'\0')[0], offset + 4
elif first == 0xfe:
return struct.unpack('<Q', buf[offset + 1:offset + 9])[0], offset + 9
class MySQLResponse(object):
def __init__(self, packet_contents):
self.packet = packet_contents
self.header = struct.unpack('B', packet_contents[0])[0]
self.message = ''
# per-type response parsing
if self.header == 0x00:
self.response_type = 'OK'
offset = 1
self.rows_affected, offset = _read_lenc(packet_contents, offset)
self.last_insert_id, offset = _read_lenc(packet_contents, offset)
self.status_flags, self.warnings = struct.unpack('<HH', packet_contents[offset:offset + 4])
self.message = packet_contents[offset + 4:]
elif self.header == 0x0a:
self.response_type = 'CONN 10'
sve = packet_contents.index('\0')
self.server_version = packet_contents[1:sve]
sve += 1
self.connection_id, pd_low, cf_low = struct.unpack(
'<I8sx2s',
packet_contents[sve:sve + 15]
)
self.character_set, self.status_flags, cf_high = struct.unpack(
'BH2s',
packet_contents[sve + 15:sve + 21]
)
self.capability_flags = struct.unpack('<I', cf_low + cf_high)[0]
pd_len = struct.unpack('B', packet_contents[sve + 20:sve + 21])[0]
# skip 10 bytes for REASONS
pd_end = sve + 31 + max(13, pd_len - 8)
pd_high = packet_contents[sve + 31:pd_end - 1]
self.plugin_data = pd_low + pd_high
self.auth_method = packet_contents[pd_end:-1]
elif self.header == 0xfe:
self.response_type = 'EOF'
elif self.header == 0xff:
self.response_type = 'ERR'
self.error_code, _, self.sql_state = struct.unpack(
'Hc5s',
packet_contents[1:9]
)
self.message = packet_contents[9:]
else:
self.response_type = self.header
if self.header > 0xf0:
self.OK = False
else:
self.OK = True
def __repr__(self):
return '%s(%s)<%s>' % (self.__class__.__name__, self.response_type, self.message)
class MySQLClient(object):
def __init__(self, host='127.0.0.1', port=3306, global_timeout=0, io_loop=None, timeout_callback=None):
self.host = host
self.port = port
self.global_timeout = global_timeout
self.timeout_callback = timeout_callback
if io_loop is None:
io_loop = tornado.ioloop.IOLoop.current()
self.io_loop = io_loop
self.socket = None
self.stream = None
self.start = 0
self.timeout = None
self.connected = False
self.sequence = 1
@tornado.gen.coroutine
def _connect_socket(self):
self.start = time.time()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = tornado.iostream.IOStream(s, io_loop=self.io_loop)
if self.global_timeout:
self.timeout = self.io_loop.add_timeout(datetime.timedelta(seconds=self.global_timeout), self._timed_out)
yield tornado.gen.Task(self.stream.connect, (self.host, self.port))
self.connected = True
def _timed_out(self):
now = time.time()
try:
self.stream.close()
except Exception:
pass
if self.timeout_callback is not None:
self.timeout_callback(now - self.start)
@tornado.gen.coroutine
def connect(self, username, password):
yield self._connect_socket()
connection_response = yield self.read_response()
assert connection_response.header == 0x0a
connection_packet = struct.pack(
'<IIB23x',
0x200 | 0x400 | 0x8000 | 0x80000, # connection flags
1024, # max packet size
0x21, # char set == utf8
)
connection_packet += username.encode('utf8') + '\0'
auth_response = _stupid_hash_password(password=password, salt=connection_response.plugin_data)
connection_packet += struct.pack('B', len(auth_response))
connection_packet += auth_response
connection_packet += 'mysql_native_password\0'
yield self.write(self._pack_packet(connection_packet))
resp = yield self.read_response()
raise tornado.gen.Return(resp)
def _pack_packet(self, contents):
size = len(contents)
packet_size = struct.pack('<i', size)[:3]
sequence_number = struct.pack('B', self.sequence)
self.sequence += 1
packet = packet_size + sequence_number + contents
return packet
def write(self, bytez):
return tornado.gen.Task(self.stream.write, bytez)
def read_bytes(self, byte_count):
return tornado.gen.Task(self.stream.read_bytes, byte_count)
@tornado.gen.coroutine
def quit(self):
assert self.connected
packet_contents = struct.pack('B', 0x01)
self.sequence = 0
yield self.write(self._pack_packet(packet_contents))
try:
self.stream.close()
except Exception:
pass
@tornado.gen.coroutine
def read_response(self):
packet_length = yield self.read_bytes(3)
packet_length = struct.unpack('<I', packet_length + struct.pack('B', 0x00))[0]
sequence_number = yield self.read_bytes(1)
sequence_number = struct.unpack('B', sequence_number)[0]
packet = yield self.read_bytes(packet_length)
raise tornado.gen.Return(MySQLResponse(packet))
| StarcoderdataPython |
4833304 | <reponame>lanthias/bson-numpy
import argparse
import collections
import math
import string
import sys
import timeit
from functools import partial
import pymongo
import numpy as np
from bson import BSON, CodecOptions, Int64, ObjectId
from bson.raw_bson import RawBSONDocument
try:
import bsonnumpy
except (ImportError, OSError) as exc:
print(exc)
bsonnumpy = None
try:
import monary
except (ImportError, OSError) as exc:
monary = None
assert pymongo.has_c()
# Use large document in tests? If SMALL, no, if LARGE, then yes.
SMALL = False
LARGE = True
db = None
raw_bson = None
large_doc_keys = None
collection_names = {LARGE: "large", SMALL: "small"}
dtypes = {}
raw_bsons = {}
def _setup():
global db
global raw_bson
global large_doc_keys
db = pymongo.MongoClient().bsonnumpy_test
small = db[collection_names[SMALL]]
small.drop()
print("%d small docs, %d bytes each with 3 keys" % (
N_SMALL_DOCS,
len(BSON.encode({'_id': ObjectId(), 'x': 1, 'y': math.pi}))))
small.insert_many([
collections.OrderedDict([('x', 1), ('y', math.pi)])
for _ in range(N_SMALL_DOCS)])
dtypes[SMALL] = np.dtype([('x', np.int64), ('y', np.float64)])
large = db[collection_names[LARGE]]
large.drop()
# 2600 keys: 'a', 'aa', 'aaa', .., 'zz..z'
large_doc_keys = [c * i for c in string.ascii_lowercase
for i in range(1, 101)]
large_doc = collections.OrderedDict([(k, math.pi) for k in large_doc_keys])
print("%d large docs, %dk each with %d keys" % (
N_LARGE_DOCS, len(BSON.encode(large_doc)) // 1024, len(large_doc_keys)))
large.insert_many([large_doc.copy() for _ in range(N_LARGE_DOCS)])
dtypes[LARGE] = np.dtype([(k, np.float64) for k in large_doc_keys])
# Ignore for now that the first batch defaults to 101 documents.
raw_bson_docs_small = [{'x': 1, 'y': math.pi} for _ in range(N_SMALL_DOCS)]
raw_bson_small = BSON.encode({'ok': 1,
'cursor': {
'id': Int64(1234),
'ns': 'db.collection',
'firstBatch': raw_bson_docs_small}})
raw_bson_docs_large = [large_doc.copy() for _ in range(N_LARGE_DOCS)]
raw_bson_large = BSON.encode({'ok': 1,
'cursor': {
'id': Int64(1234),
'ns': 'db.collection',
'firstBatch': raw_bson_docs_large}})
raw_bsons[SMALL] = raw_bson_small
raw_bsons[LARGE] = raw_bson_large
def _teardown():
db.collection.drop()
bench_fns = collections.OrderedDict()
def bench(name):
def assign_name(fn):
bench_fns[name] = fn
return fn
return assign_name
@bench('conventional-to-ndarray')
def conventional_func(use_large):
collection = db[collection_names[use_large]]
cursor = collection.find()
dtype = dtypes[use_large]
if use_large:
np.array([tuple(doc[k] for k in large_doc_keys) for doc in cursor],
dtype=dtype)
else:
np.array([(doc['x'], doc['y']) for doc in cursor], dtype=dtype)
@bench('raw-bson-to-ndarray')
def bson_numpy_func(use_large):
raw_coll = db.get_collection(
collection_names[use_large],
codec_options=CodecOptions(document_class=RawBSONDocument))
cursor = raw_coll.find()
dtype = dtypes[use_large]
bsonnumpy.sequence_to_ndarray(
(doc.raw for doc in cursor), dtype, raw_coll.count())
@bench('raw-batches-to-ndarray')
def raw_bson_func(use_large):
c = db[collection_names[use_large]]
if not hasattr(c, 'find_raw_batches'):
print("Wrong PyMongo: no 'find_raw_batches' feature")
return
dtype = dtypes[use_large]
bsonnumpy.sequence_to_ndarray(c.find_raw_batches(), dtype, c.count())
@bench('monary')
def monary_func(use_large):
# Monary doesn't allow > 1024 keys, and it's too slow to benchmark anyway.
if use_large:
return
m = monary.Monary()
dtype = dtypes[use_large]
m.query(db.name, collection_names[use_large], {}, dtype.names,
["float64"] * len(dtype.names))
@bench('parse-dtype')
def raw_bson_func(use_large):
dtype = dtypes[use_large]
bsonnumpy.sequence_to_ndarray([], dtype, 0)
@bench('decoded-cmd-reply')
def bson_func(use_large):
for _ in BSON(raw_bsons[use_large]).decode()['cursor']['firstBatch']:
pass
@bench('raw-cmd-reply')
def raw_bson_func(use_large):
options = CodecOptions(document_class=RawBSONDocument)
for _ in BSON(raw_bsons[use_large]).decode(options)['cursor']['firstBatch']:
pass
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Available benchmark functions:
%s
""" % ("\n ".join(bench_fns.keys()),))
parser.add_argument('--large', action='store_true',
help='only test with large documents')
parser.add_argument('--small', action='store_true',
help='only test with small documents')
parser.add_argument('--test', action='store_true',
help='quick test of benchmark.py')
parser.add_argument('funcs', nargs='*', default=bench_fns.keys())
options = parser.parse_args()
if options.test:
N_LARGE_DOCS = 2
N_SMALL_DOCS = 2
N_TRIALS = 1
else:
N_LARGE_DOCS = 1000
N_SMALL_DOCS = 100000
N_TRIALS = 5
# Run tests with both small and large documents.
sizes = [SMALL, LARGE]
if options.large and not options.small:
sizes.remove(SMALL)
if options.small and not options.large:
sizes.remove(LARGE)
for name in options.funcs:
if name not in bench_fns:
sys.stderr.write("Unknown function \"%s\"\n" % name)
sys.stderr.write("Available functions:\n%s\n" % ("\n".join(bench_fns)))
sys.exit(1)
_setup()
print()
print("%25s: %7s %7s" % ("BENCH", "SMALL", "LARGE"))
for name, fn in bench_fns.items():
if name in options.funcs:
sys.stdout.write("%25s: " % name)
sys.stdout.flush()
# Test with small and large documents.
for size in (SMALL, LARGE):
if size not in sizes:
sys.stdout.write("%7s" % "-")
else:
timer = timeit.Timer(partial(fn, size))
duration = min(timer.repeat(3, N_TRIALS)) / float(N_TRIALS)
sys.stdout.write("%7.2f " % duration)
sys.stdout.flush()
sys.stdout.write("\n")
_teardown()
| StarcoderdataPython |
1771863 | <reponame>Guzhongren/picuture2thumbnail
# -*- coding: utf-8 -*-
# Author:Guzhongren
# created: 2017-05-08
import os
filetype=[".JPG",".tif"]
folder_path = unicode(r"C:\geocon\解译样本(只提取jpg图片缩略图)", "utf8").encode("gbk")
jpg_count=0
tif_count=0
for parent, dirnames, filenames in os.walk(folder_path):
for filename in filenames:
if filetype[0] in filename:
jpg_count=jpg_count+1
elif filetype[1] in filename:
tif_count= tif_count+1
else:
a=0
print(u" jpg:"+ str(jpg_count)+u" tif:"+ str(tif_count) + u" sum:"+str(jpg_count+tif_count))
| StarcoderdataPython |
48946 | <reponame>DavidBuchanan314/rc4<filename>rc4/__init__.py
from .rc4 import RC4
| StarcoderdataPython |
3390323 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.snapshot_api import SnapshotApi
class TestSnapshotApi(unittest.TestCase):
""" SnapshotApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.snapshot_api.SnapshotApi()
def tearDown(self):
pass
def test_create_snapshot_aliase(self):
"""
Test case for create_snapshot_aliase
"""
pass
def test_create_snapshot_changelist(self):
"""
Test case for create_snapshot_changelist
"""
pass
def test_create_snapshot_repstate(self):
"""
Test case for create_snapshot_repstate
"""
pass
def test_create_snapshot_schedule(self):
"""
Test case for create_snapshot_schedule
"""
pass
def test_create_snapshot_snapshot(self):
"""
Test case for create_snapshot_snapshot
"""
pass
def test_delete_snapshot_aliase(self):
"""
Test case for delete_snapshot_aliase
"""
pass
def test_delete_snapshot_aliases(self):
"""
Test case for delete_snapshot_aliases
"""
pass
def test_delete_snapshot_changelist(self):
"""
Test case for delete_snapshot_changelist
"""
pass
def test_delete_snapshot_repstate(self):
"""
Test case for delete_snapshot_repstate
"""
pass
def test_delete_snapshot_schedule(self):
"""
Test case for delete_snapshot_schedule
"""
pass
def test_delete_snapshot_schedules(self):
"""
Test case for delete_snapshot_schedules
"""
pass
def test_delete_snapshot_snapshot(self):
"""
Test case for delete_snapshot_snapshot
"""
pass
def test_delete_snapshot_snapshots(self):
"""
Test case for delete_snapshot_snapshots
"""
pass
def test_get_snapshot_aliase(self):
"""
Test case for get_snapshot_aliase
"""
pass
def test_get_snapshot_changelist(self):
"""
Test case for get_snapshot_changelist
"""
pass
def test_get_snapshot_license(self):
"""
Test case for get_snapshot_license
"""
pass
def test_get_snapshot_pending(self):
"""
Test case for get_snapshot_pending
"""
pass
def test_get_snapshot_repstate(self):
"""
Test case for get_snapshot_repstate
"""
pass
def test_get_snapshot_schedule(self):
"""
Test case for get_snapshot_schedule
"""
pass
def test_get_snapshot_settings(self):
"""
Test case for get_snapshot_settings
"""
pass
def test_get_snapshot_snapshot(self):
"""
Test case for get_snapshot_snapshot
"""
pass
def test_get_snapshot_snapshots_summary(self):
"""
Test case for get_snapshot_snapshots_summary
"""
pass
def test_list_snapshot_aliases(self):
"""
Test case for list_snapshot_aliases
"""
pass
def test_list_snapshot_changelists(self):
"""
Test case for list_snapshot_changelists
"""
pass
def test_list_snapshot_repstates(self):
"""
Test case for list_snapshot_repstates
"""
pass
def test_list_snapshot_schedules(self):
"""
Test case for list_snapshot_schedules
"""
pass
def test_list_snapshot_snapshots(self):
"""
Test case for list_snapshot_snapshots
"""
pass
def test_update_snapshot_aliase(self):
"""
Test case for update_snapshot_aliase
"""
pass
def test_update_snapshot_schedule(self):
"""
Test case for update_snapshot_schedule
"""
pass
def test_update_snapshot_settings(self):
"""
Test case for update_snapshot_settings
"""
pass
def test_update_snapshot_snapshot(self):
"""
Test case for update_snapshot_snapshot
"""
pass
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3377461 | <filename>process.py
from flask import Flask, render_template, request, jsonify, redirect, url_for
app = Flask(__name__)
@app.route('/')
def index():
message = 'Login'
return render_template('index.html', **locals())
@app.route('/form')
def form():
Description = 'Flask Ajax Practice'
return render_template('form.html', **locals())
@app.route('/process', methods=['POST'])
def process():
# 接收表單資料
email = request.form['email']
name = request.form['name']
# 確認有資料後,回傳給前端
if name and email:
newName = name + ' submit success !'
newEmail = email + ' submit success !'
return jsonify({'name': newName, 'email': newEmail})
return jsonify({'error': 'Missing data!'})
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == "POST":
username = request.form.get('name')
password = request.form.get('pwd')
# 帳號密碼正確的話,直接導向 /form
if username == "abc" and password == "<PASSWORD>":
return redirect(url_for('form'))
else:
message = 'Login Failure ! Please try again.'
return render_template('index.html', **locals())
else:
message = 'Login Failure ! Please try again.'
return render_template('index.html', **locals())
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
1679646 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/AudioFadeResponseMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/AudioFadeResponseMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n1pyatv/mrp/protobuf/AudioFadeResponseMessage.proto\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\"0\n\x18\x41udioFadeResponseMessage\x12\x14\n\x0c\x66\x61\x64\x65\x44uration\x18\x01 \x01(\x03:M\n\x18\x61udioFadeResponseMessage\x12\x10.ProtocolMessage\x18Y \x01(\x0b\x32\x19.AudioFadeResponseMessage'
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,])
AUDIOFADERESPONSEMESSAGE_FIELD_NUMBER = 89
audioFadeResponseMessage = _descriptor.FieldDescriptor(
name='audioFadeResponseMessage', full_name='audioFadeResponseMessage', index=0,
number=89, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_AUDIOFADERESPONSEMESSAGE = _descriptor.Descriptor(
name='AudioFadeResponseMessage',
full_name='AudioFadeResponseMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='fadeDuration', full_name='AudioFadeResponseMessage.fadeDuration', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=143,
)
DESCRIPTOR.message_types_by_name['AudioFadeResponseMessage'] = _AUDIOFADERESPONSEMESSAGE
DESCRIPTOR.extensions_by_name['audioFadeResponseMessage'] = audioFadeResponseMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AudioFadeResponseMessage = _reflection.GeneratedProtocolMessageType('AudioFadeResponseMessage', (_message.Message,), {
'DESCRIPTOR' : _AUDIOFADERESPONSEMESSAGE,
'__module__' : 'pyatv.mrp.protobuf.AudioFadeResponseMessage_pb2'
# @@protoc_insertion_point(class_scope:AudioFadeResponseMessage)
})
_sym_db.RegisterMessage(AudioFadeResponseMessage)
audioFadeResponseMessage.message_type = _AUDIOFADERESPONSEMESSAGE
pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(audioFadeResponseMessage)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3238976 | <reponame>xuyu92327/waveform-analysis
import tables
import h5py
import numpy as np
class AnswerData(tables.IsDescription):
EventID = tables.Int64Col(pos=0)
ChannelID = tables.Int16Col(pos=1)
PETime = tables.Int16Col(pos=2)
Weight = tables.Float32Col(pos=3)
# Create the output file and the group
h5file = tables.open_file("result/Total.h5", mode="w", title="OneTonDetector")
# Create tables
AnswerTable = h5file.create_table("/", "Answer", AnswerData, "Answer")
answer = AnswerTable.row
for i in range(1,1798):
print(i)
with h5py.File("result/"+f"{i}"+"-pgan.h5") as ipt:
hg=ipt["Answer"][()]
# Write data
for j in range(len(hg)):
answer['EventID'] = hg[j,0]
answer['ChannelID'] = hg[j,1]
answer['PETime'] = hg[j,2]
answer['Weight'] = hg[j,3]
answer.append()
# Flush into the output file
AnswerTable.flush()
h5file.close() | StarcoderdataPython |
102438 | '''
'''
#
# Adapted from MATLAB code written by <NAME> (see Nishimoto, et al., 2011).
# <NAME> (Jan, 2016)
#
# Updates:
# <NAME> (Apr, 2020)
#
import itertools
from PIL import Image
import numpy as np
from moten.utils import (DotDict,
iterator_func,
log_compress,
sqrt_sum_squares,
pointwise_square,
)
##############################
#
##############################
def raw_project_stimulus(stimulus,
filters,
vhsize=(),
dtype='float32'):
'''Obtain responses to the stimuli from all filter quadrature-pairs.
Parameters
----------
stimulus : np.ndarray, (nimages, vdim, hdim) or (nimages, npixels)
The movie frames.
If `stimulus` is two-dimensional with shape (nimages, npixels), then
`vhsize=(vdim,hdim)` is required and `npixels == vdim*hdim`.
Returns
-------
output_sin : np.ndarray, (nimages, nfilters)
output_cos : np.ndarray, (nimages, nfilters)
'''
# parameters
if stimulus.ndim == 3:
nimages, vdim, hdim = stimulus.shape
stimulus = stimulus.reshape(stimulus.shape[0], -1)
vhsize = (vdim, hdim)
# checks for 2D stimuli
assert stimulus.ndim == 2 # (nimages, pixels)
assert isinstance(vhsize, tuple) and len(vhsize) == 2 # (hdim, vdim)
assert np.product(vhsize) == stimulus.shape[1] # hdim*vdim == pixels
# Compute responses
nfilters = len(filters)
nimages = stimulus.shape[0]
sin_responses = np.zeros((nimages, nfilters), dtype=dtype)
cos_responses = np.zeros((nimages, nfilters), dtype=dtype)
for gaborid, gabor_parameters in iterator_func(enumerate(filters),
'project_stimulus',
total=len(filters)):
sgabor0, sgabor90, tgabor0, tgabor90 = mk_3d_gabor(vhsize, **gabor_parameters)
channel_sin, channel_cos = dotdelay_frames(sgabor0, sgabor90,
tgabor0, tgabor90,
stimulus)
sin_responses[:, gaborid] = channel_sin
cos_responses[:, gaborid] = channel_cos
return sin_responses, cos_responses
def project_stimulus(stimulus,
filters,
quadrature_combination=sqrt_sum_squares,
output_nonlinearity=log_compress,
vhsize=(),
dtype='float32'):
'''Compute the motion energy filter responses to the stimuli.
Parameters
----------
stimulus : np.ndarray, (nimages, vdim, hdim) or (nimages, npixels)
The movie frames.
If `stimulus` is two-dimensional with shape (nimages, npixels), then
`vhsize=(vdim,hdim)` is required and `npixels == vdim*hdim`.
Returns
-------
filter_responses : np.ndarray, (nimages, nfilters)
'''
# parameters
if stimulus.ndim == 3:
nimages, vdim, hdim = stimulus.shape
stimulus = stimulus.reshape(stimulus.shape[0], -1)
vhsize = (vdim, hdim)
# checks for 2D stimuli
assert stimulus.ndim == 2 # (nimages, pixels)
assert isinstance(vhsize, tuple) and len(vhsize) == 2 # (hdim, vdim)
assert np.product(vhsize) == stimulus.shape[1] # hdim*vdim == pixels
# Compute responses
nfilters = len(filters)
nimages = stimulus.shape[0]
filter_responses = np.zeros((nimages, nfilters), dtype=dtype)
for gaborid, gabor_parameters in iterator_func(enumerate(filters),
'project_stimulus',
total=len(filters)):
sgabor0, sgabor90, tgabor0, tgabor90 = mk_3d_gabor(vhsize, **gabor_parameters)
channel_sin, channel_cos = dotdelay_frames(sgabor0, sgabor90,
tgabor0, tgabor90,
stimulus)
channel_response = quadrature_combination(channel_sin, channel_cos)
channel_response = output_nonlinearity(channel_response)
filter_responses[:, gaborid] = channel_response
return filter_responses
##############################
# core functionality
##############################
def mk_3d_gabor(vhsize,
stimulus_fps,
aspect_ratio='auto',
filter_temporal_width='auto',
centerh=0.5,
centerv=0.5,
direction=45.0,
spatial_freq=16.0,
spatial_env=0.3,
temporal_freq=2.0,
temporal_env=0.3,
spatial_phase_offset=0.0,
):
'''Make a motion energy filter.
A motion energy filter is a 3D gabor with
two spatial and one temporal dimension.
Each dimension is defined by two sine waves which
differ in phase by 90 degrees. The sine waves are
then multiplied by a gaussian.
Parameters
----------
vhsize : tuple of ints, (vdim, hdim)
Size of the stimulus in pixels (vdim, hdim)
`vdim` : vertical dimension
`hdim` : horizontal dimension
stimulus_fps : scalar, [Hz]
Stimulus playback speed in frames per second.
centerv : scalar
Vertical filter position from top of frame (min=0, max=1.0).
centerh : scalar
Horizontal filter position from left of frame (min=0, max=aspect_ratio).
direction : scalar, [degrees]
Direction of filter motion. Degree position corresponds
to standard unit-circle coordinates (i.e. 0=right, 180=left).
spatial_freq : float, [cycles-per-image]
Spatial frequency of the filter.
temporal_freq : float, [Hz]
Temporal frequency of the filter
filter_temporal_width : int
Temporal window of the motion energy filter (e.g. 10).
Defaults to approximately 0.666[secs] (`floor(stimulus_fps*(2/3))`).
aspect_ratio : optional, 'auto' or float-like,
Defaults to stimulus aspect ratio: hdim/vdim
Useful for preserving the spatial gabors circular even
when images have non-square aspect ratios. For example,
a 16:9 image would have `aspect_ratio`=16/9.
spatial_env : float
Spatial envelope (s.d. of the gaussian)
temporal_env : float
Temporal envelope (s.d. of gaussian)
spatial_phase_offset : float, [degrees
Phase offset for the spatial sinusoid
Returns
-------
spatial_gabor_sin : 2D np.ndarray, (vdim, hdim)
spatial_gabor_cos : 2D np.ndarray, (vdim, hdim)
Spatial gabor quadrature pair. ``spatial_gabor_cos`` has
a 90 degree phase offset relative to ``spatial_gabor_sin``
temporal_gabor_sin : 1D np.ndarray, (`filter_temporal_width`,)
temporal_gabor_cos : 1D np.ndarray, (`filter_temporal_width`,)
Temporal gabor quadrature pair. ``temporal_gabor_cos`` has
a 90 degree phase offset relative to ``temporal_gabor_sin``
Notes
-----
Same method as Nishimoto, et al., 2011.
'''
vdim, hdim = vhsize
if aspect_ratio == 'auto':
aspect_ratio = hdim/float(vdim)
if filter_temporal_width == 'auto':
filter_temporal_width = int(stimulus_fps*(2/3.))
# cast filter width to integer frames
assert np.allclose(filter_temporal_width, int(filter_temporal_width))
filter_temporal_width = int(filter_temporal_width)
dh = np.linspace(0, aspect_ratio, hdim, endpoint=True)
dv = np.linspace(0, 1, vdim, endpoint=True)
dt = np.linspace(0, 1, filter_temporal_width, endpoint=False)
# AN: Actually, `dt` should include endpoint.
# Currently, the center of the filter width is +(1./fps)/2.
# However, this would break backwards compatibility.
# TODO: Allow for `dt_endpoint` as an argument
# and set default to False.
ihs, ivs = np.meshgrid(dh,dv)
fh = -spatial_freq*np.cos(direction/180.*np.pi)*2*np.pi
fv = spatial_freq*np.sin(direction/180.*np.pi)*2*np.pi
# normalize temporal frequency to wavelet size
ft = np.real(temporal_freq*(filter_temporal_width/float(stimulus_fps)))*2*np.pi
# spatial filters
spatial_gaussian = np.exp(-((ihs - centerh)**2 + (ivs - centerv)**2)/(2*spatial_env**2))
spatial_grating_sin = np.sin((ihs - centerh)*fh + (ivs - centerv)*fv + spatial_phase_offset)
spatial_grating_cos = np.cos((ihs - centerh)*fh + (ivs - centerv)*fv + spatial_phase_offset)
spatial_gabor_sin = spatial_gaussian * spatial_grating_sin
spatial_gabor_cos = spatial_gaussian * spatial_grating_cos
##############################
temporal_gaussian = np.exp(-(dt - 0.5)**2/(2*temporal_env**2))
temporal_grating_sin = np.sin((dt - 0.5)*ft)
temporal_grating_cos = np.cos((dt - 0.5)*ft)
temporal_gabor_sin = temporal_gaussian*temporal_grating_sin
temporal_gabor_cos = temporal_gaussian*temporal_grating_cos
return spatial_gabor_sin, spatial_gabor_cos, temporal_gabor_sin, temporal_gabor_cos
def generate_3dgabor_array(vhsize=(576,1024),
stimulus_fps=24,
aspect_ratio='auto',
filter_temporal_width='auto',
centerh=0.5,
centerv=0.5,
direction=45.0,
spatial_freq=16.0,
spatial_env=0.3,
temporal_freq=2.0,
temporal_env=0.3,
phase_offset=0.0):
'''
'''
vdim, hdim = vhsize
if aspect_ratio == 'auto':
aspect_ratio = hdim/float(vdim)
if filter_temporal_width == 'auto':
filter_temporal_width = int(stimulus_fps*(2/3.))
gabor_components = mk_3d_gabor(vhsize,
stimulus_fps=stimulus_fps,
aspect_ratio=aspect_ratio,
filter_temporal_width=filter_temporal_width,
centerh=centerh,
centerv=centerv,
direction=direction,
spatial_freq=spatial_freq,
spatial_env=spatial_env,
temporal_freq=temporal_freq,
temporal_env=temporal_env,
phase_offset=phase_offset,
)
gabor_video = mk_spatiotemporal_gabor(*gabor_components)
return gabor_video
def dotspatial_frames(spatial_gabor_sin, spatial_gabor_cos,
stimulus,
masklimit=0.001):
'''Dot the spatial gabor filters filter with the stimulus
Parameters
----------
spatial_gabor_sin : np.array, (vdim,hdim)
spatial_gabor_cos : np.array, (vdim,hdim)
Spatial gabor quadrature pair
stimulus : 2D np.array (nimages, vdim*hdim)
The movie frames with the spatial dimension collapsed.
masklimit : float-like
Threshold to find the non-zero filter region
Returns
-------
channel_sin : np.ndarray, (nimages, )
channel_cos : np.ndarray, (nimages, )
The filter response to each stimulus
The quadrature pair can be combined: (x^2 + y^2)^0.5
'''
gabors = np.asarray([spatial_gabor_sin.ravel(),
spatial_gabor_cos.ravel()])
# dot the gabors with the stimulus
mask = np.abs(gabors).sum(0) > masklimit
gabor_prod = (gabors[:,mask].squeeze() @ stimulus.T[mask].squeeze()).T
gabor_sin, gabor_cos = gabor_prod[:,0], gabor_prod[:,1]
return gabor_sin, gabor_cos
def dotdelay_frames(spatial_gabor_sin, spatial_gabor_cos,
temporal_gabor_sin, temporal_gabor_cos,
stimulus,
masklimit=0.001):
'''Convolve the motion energy filter with a stimulus
Parameters
----------
spatial_gabor_sin : np.array, (vdim,hdim)
spatial_gabor_cos : np.array, (vdim,hdim)
Spatial gabor quadrature pair
temporal_gabor_sin : np.array, (temporal_filter_width,)
temporal_gabor_cos : np.array, (temporal_filter_width,)
Temporal gabor quadrature pair
stimulus : 2D np.array (nimages, vdim*hdim)
The movie frames with the spatial dimension collapsed.
Returns
-------
channel_sin : np.ndarray, (nimages, )
channel_cos : np.ndarray, (nimages, )
The filter response to the stimulus at each time point
The quadrature pair can be combined: (x^2 + y^2)^0.5
'''
gabor_sin, gabor_cos = dotspatial_frames(spatial_gabor_sin, spatial_gabor_cos,
stimulus, masklimit=masklimit)
gabor_prod = np.c_[gabor_sin, gabor_cos]
temporal_gabors = np.asarray([temporal_gabor_sin,
temporal_gabor_cos])
# dot the product with the temporal gabors
outs = gabor_prod[:, [0]] @ temporal_gabors[[1]] + gabor_prod[:, [1]] @ temporal_gabors[[0]]
outc = -gabor_prod[:, [0]] @ temporal_gabors[[0]] + gabor_prod[:, [1]] @ temporal_gabors[[1]]
# sum across delays
nouts = np.zeros_like(outs)
noutc = np.zeros_like(outc)
tdxc = int(np.ceil(outs.shape[1]/2.0))
delays = np.arange(outs.shape[1])-tdxc +1
for ddx, num in enumerate(delays):
if num == 0:
nouts[:, ddx] = outs[:,ddx]
noutc[:, ddx] = outc[:,ddx]
elif num > 0:
nouts[num:, ddx] = outs[:-num,ddx]
noutc[num:, ddx] = outc[:-num,ddx]
elif num < 0:
nouts[:num, ddx] = outs[abs(num):,ddx]
noutc[:num, ddx] = outc[abs(num):,ddx]
channel_sin = nouts.sum(-1)
channel_cos = noutc.sum(-1)
return channel_sin, channel_cos
def mk_spatiotemporal_gabor(spatial_gabor_sin, spatial_gabor_cos,
temporal_gabor_sin, temporal_gabor_cos):
'''Make 3D motion energy filter defined by the spatial and temporal gabors.
Takes the output of :func:`mk_3d_gabor` and constructs the 3D filter.
This is useful for visualization.
Parameters
----------
spatial_gabor_sin : np.array, (vdim,hdim)
spatial_gabor_cos : np.array, (vdim,hdim)
Spatial gabor quadrature pair
temporal_gabor_sin : np.array, (filter_temporal_width,)
temporal_gabor_cos : np.array, (filter_temporal_width,)
Temporal gabor quadrature pair
Returns
-------
motion_energy_filter : np.array, (vdim, hdim, filter_temporal_width)
The motion energy filter
'''
a = -spatial_gabor_sin.ravel()[...,None] @ temporal_gabor_sin[...,None].T
b = spatial_gabor_cos.ravel()[...,None] @ temporal_gabor_cos[...,None].T
x,y = spatial_gabor_sin.shape
t = temporal_gabor_sin.shape[0]
return (a+b).reshape(x,y,t)
def compute_spatial_gabor_responses(stimulus,
aspect_ratio='auto',
spatial_frequencies=[0,2,4,8,16,32],
quadrature_combination=sqrt_sum_squares,
output_nonlinearity=log_compress,
dtype=np.float64,
dozscore=True):
"""Compute the spatial gabor filters' response to each stimulus.
Parameters
----------
stimulus : 3D np.array (n, vdim, hdim)
The stimulus frames.
spatial_frequencies : array-like
The spatial frequencies to compute. The spatial envelope is determined by this.
quadrature_combination : function, optional
Specifies how to combine the channel reponses quadratures.
The function must take the sin and cos as arguments in order.
Defaults to: (sin^2 + cos^2)^1/2
output_nonlinearity : function, optional
Passes the channels (after `quadrature_combination`) through a
non-linearity. The function input is the (`n`,`nfilters`) array.
Defaults to: ln(x + 1e-05)
dozscore : bool, optional
Whether to z-score the channel responses in time
dtype : np.dtype
Defaults to np.float64
Returns
-------
filter_responses : np.array, (n, nfilters)
"""
nimages, vdim, hdim = stimulus.shape
vhsize = (vdim, hdim)
if aspect_ratio == 'auto':
aspect_ratio = hdim/float(vdim)
stimulus = stimulus.reshape(stimulus.shape[0], -1)
parameter_names, gabor_parameters = mk_moten_pyramid_params(
1., # fps
filter_temporal_width=1.,
aspect_ratio=aspect_ratio,
temporal_frequencies=[0.],
spatial_directions=[0.],
spatial_frequencies=spatial_frequencies,
)
ngabors = gabor_parameters.shape[0]
filters = [{name : gabor_parameters[idx, pdx] for pdx, name \
in enumerate(parameter_names)} \
for idx in range(ngabors)]
info = 'Computing responses for #%i filters across #%i images (aspect_ratio=%0.03f)'
print(info%(len(gabor_parameters), nimages, aspect_ratio))
channels = np.zeros((nimages, len(gabor_parameters)), dtype=dtype)
for idx, gabor_param_dict in iterator_func(enumerate(filters),
'%s.compute_spatial_gabor_responses'%__name__,
total=len(gabor_parameters)):
sgabor_sin, sgabor_cos, _, _ = mk_3d_gabor(vhsize,
**gabor_param_dict)
channel_sin, channel_cos = dotspatial_frames(sgabor_sin, sgabor_cos, stimulus)
channel = quadrature_combination(channel_sin, channel_cos)
channels[:, idx] = channel
channels = output_nonlinearity(channels)
if dozscore:
from scipy.stats import zscore
channels = zscore(channels)
return channels
def compute_filter_responses(stimulus,
stimulus_fps,
aspect_ratio='auto',
filter_temporal_width='auto',
quadrature_combination=sqrt_sum_squares,
output_nonlinearity=log_compress,
dozscore=True,
dtype=np.float64,
pyramid_parameters={}):
"""Compute the motion energy filters' response to the stimuli.
Parameters
----------
stimulus : 3D np.array (n, vdim, hdim)
The movie frames.
stimulus_fps : scalar
The temporal frequency of the stimulus
aspect_ratio : bool, or scalar
Defaults to hdim/vdim. Otherwise, pass as scalar
filter_temporal_width : int, None
The number of frames in one filter.
Defaults to approximately 0.666[secs] (floor(stimulus_fps*(2/3))).
quadrature_combination : function, optional
Specifies how to combine the channel reponses quadratures.
The function must take the sin and cos as arguments in order.
Defaults to: (sin^2 + cos^2)^1/2
output_nonlinearity : function, optional
Passes the channels (after `quadrature_combination`) through a
non-linearity. The function input is the (`n`,`nfilters`) array.
Defaults to: ln(x + 1e-05)
dozscore : bool, optional
Whether to z-score the channel responses in time
dtype : np.dtype
Defaults to np.float64
pyramid_parameters: dict
See :func:`mk_moten_pyramid_params` for details on parameters
specifiying a motion energy pyramid.
Returns
-------
filter_responses : np.array, (n, nfilters)
"""
nimages, vdim, hdim = stimulus.shape
stimulus = stimulus.reshape(stimulus.shape[0], -1)
vhsize = (vdim, hdim)
if aspect_ratio == 'auto':
aspect_ratio = hdim/float(vdim)
if filter_temporal_width == 'auto':
filter_temporal_width = int(stimulus_fps*(2./3.))
# pass parameters
pkwargs = dict(aspect_ratio=aspect_ratio,
filter_temporal_width=filter_temporal_width)
pkwargs.update(**pyramid_parameters)
parameter_names, gabor_parameters = mk_moten_pyramid_params(stimulus_fps,
**pkwargs)
ngabors = gabor_parameters.shape[0]
filters = [{name : gabor_parameters[idx, pdx] for pdx, name \
in enumerate(parameter_names)} \
for idx in range(ngabors)]
info = 'Computing responses for #%i filters across #%i images (aspect_ratio=%0.03f)'
print(info%(len(gabor_parameters), nimages, aspect_ratio))
channels = np.zeros((nimages, len(gabor_parameters)), dtype=dtype)
for idx, gabor_param_dict in iterator_func(enumerate(filters),
'%s.compute_filter_responses'%__name__,
total=len(filters)):
gabor = mk_3d_gabor(vhsize,
**gabor_param_dict)
gabor0, gabor90, tgabor0, tgabor90 = gabor
channel_sin, channel_cos = dotdelay_frames(gabor0, gabor90,
tgabor0, tgabor90,
stimulus,
)
channel = quadrature_combination(channel_sin, channel_cos)
channels[:,idx] = channel
channels = output_nonlinearity(channels)
if dozscore:
from scipy.stats import zscore
channels = zscore(channels)
return channels
def mk_moten_pyramid_params(stimulus_fps,
filter_temporal_width='auto',
aspect_ratio='auto',
temporal_frequencies=[0,2,4],
spatial_frequencies=[0,2,4,8,16,32],
spatial_directions=[0,45,90,135,180,225,270,315],
sf_gauss_ratio=0.6,
max_spatial_env=0.3,
gabor_spacing=3.5,
tf_gauss_ratio=10.,
max_temp_env=0.3,
spatial_phase_offset=0.0,
include_edges=False,
):
"""Parametrize a motion energy pyramid that tiles the stimulus.
Parameters
----------
stimulus_fps : scalar, [Hz]
Stimulus playback speed in frames per second.
spatial_frequencies : array-like, [cycles-per-image]
Spatial frequencies for the filters
spatial_directions : array-like, [degrees]
Direction of filter motion. Degree position corresponds
to standard unit-circle coordinates (i.e. 0=right, 180=left).
temporal_frequencies : array-like, [Hz]
Temporal frequencies of the filters
filter_temporal_width : int
Temporal window of the motion energy filter (e.g. 10).
Defaults to approximately 0.666[secs] (`floor(stimulus_fps*(2/3))`).
aspect_ratio : optional, 'auto' or float-like,
Defaults to stimulus aspect ratio: hdim/vdim
Useful for preserving the spatial gabors circular even
when images have non-square aspect ratios. For example,
a 16:9 image would have `aspect_ratio`=16/9.
sf_gauss_ratio : scalar
The ratio of spatial frequency to gaussian s.d.
This controls the number of cycles in a filter
max_spatial_env : scalar
Defines the maximum s.d. of the gaussian
gabor_spacing : scalar
Defines the spacing between spatial gabors
(in s.d. units)
tf_gauss_ratio : scalar
The ratio of temporal frequency to gaussian s.d.
This controls the number of temporal cycles
max_temp_env : scalar
Defines the maximum s.d. of the temporal gaussian
include_edges : bool
Determines whether to include filters at the edge
of the image which might be partially outside the
stimulus field-of-view
Returns
-------
parameter_names : list of strings
The name of the parameters
gabor_parameters : 2D np.ndarray, (nfilters, 11)
Parameters that define the motion energy filter
Each of the `nfilters` has the following parameters:
* centerv,centerh : y:vertical and x:horizontal position ('0,0' is top left)
* direction : direction of motion [degrees]
* spatial_freq : spatial frequency [cpi]
* spatial_env : spatial envelope (gaussian s.d.)
* temporal_freq : temporal frequency [Hz]
* temporal_env : temporal envelope (gaussian s.d.)
* filter_temporal_width : temporal window of filter [frames]
* aspect_ratio : width/height
* stimulus_fps : stimulus playback speed in frames per second
* spatial_phase_offset : filter phase offset in [degrees]
Notes
-----
Same method as Nishimoto, et al., 2011.
"""
assert isinstance(aspect_ratio, (int, float, np.ndarray))
def compute_envelope(freq, ratio):
return np.inf if freq == 0 else (1.0/freq)*ratio
spatial_frequencies = np.asarray(spatial_frequencies)
spatial_directions = np.asarray(spatial_directions)
temporal_frequencies = np.asarray(temporal_frequencies)
include_edges = int(include_edges)
# We have to deal with zero frequency spatial filters differently
include_local_dc = True if 0 in spatial_frequencies else False
spatial_frequencies = np.asarray([t for t in spatial_frequencies if t != 0])
# add temporal envelope max
params = list(itertools.product(spatial_frequencies, spatial_directions))
gabor_parameters = []
for spatial_freq, spatial_direction in params:
spatial_env = min(compute_envelope(spatial_freq, sf_gauss_ratio), max_spatial_env)
# compute the number of gaussians that will fit in the FOV
vertical_space = np.floor(((1.0 - spatial_env*gabor_spacing)/(gabor_spacing*spatial_env))/2.0)
horizontal_space = np.floor(((aspect_ratio - spatial_env*gabor_spacing)/(gabor_spacing*spatial_env))/2.0)
# include the edges of screen?
vertical_space = max(vertical_space, 0) + include_edges
horizontal_space = max(horizontal_space, 0) + include_edges
# get the spatial gabor locations
ycenters = spatial_env*gabor_spacing*np.arange(-vertical_space, vertical_space+1) + 0.5
xcenters = spatial_env*gabor_spacing*np.arange(-horizontal_space, horizontal_space+1) + aspect_ratio/2.
for ii, (cx, cy) in enumerate(itertools.product(xcenters,ycenters)):
for temp_freq in temporal_frequencies:
temp_env = min(compute_envelope(temp_freq, tf_gauss_ratio), max_temp_env)
if temp_freq == 0 and spatial_direction >= 180:
# 0Hz temporal filter doesn't have motion, so
# 0 and 180 degrees orientations are the same filters
continue
gabor_parameters.append([cx,
cy,
spatial_direction,
spatial_freq,
spatial_env,
temp_freq,
temp_env,
filter_temporal_width,
aspect_ratio,
stimulus_fps,
spatial_phase_offset,
])
if spatial_direction == 0 and include_local_dc:
# add local 0 spatial frequency non-directional temporal filter
gabor_parameters.append([cx,
cy,
spatial_direction,
0., # zero spatial freq
spatial_env,
temp_freq,
temp_env,
filter_temporal_width,
aspect_ratio,
stimulus_fps,
spatial_phase_offset,
])
parameter_names = ('centerh',
'centerv',
'direction',
'spatial_freq',
'spatial_env',
'temporal_freq',
'temporal_env',
'filter_temporal_width',
'aspect_ratio',
'stimulus_fps',
'spatial_phase_offset',
)
gabor_parameters = np.asarray(gabor_parameters)
return parameter_names, gabor_parameters
| StarcoderdataPython |
4826305 | <reponame>davehowell/sqlfluff
# No docstring here as it would appear in the rules docs.
# Rule definitions for the standard ruleset, dynamically imported from the directory.
# noqa
import os
from importlib import import_module
from glob import glob
# All rule files are expected in the format of L*.py
rules_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "L*.py")
# Create a rules dictionary for importing in sqlfluff/src/sqlfluff/core/rules/__init__.py
rules = []
# Sphinx effectively runs an import * from this module in rules.rst, so initialise
# __all__ with an empty list before we populate it with the rule names.
__all__ = []
for module in sorted(glob(rules_path)):
# Manipulate the module path to extract the filename without the .py
rule_id = os.path.splitext(os.path.basename(module))[0]
# All rule classes are expected in the format of Rule_L*
rule_class_name = f"Rule_{rule_id}"
try:
rule_class = getattr(
import_module(f"sqlfluff.core.rules.std.{rule_id}"), rule_class_name
)
except AttributeError:
raise (AttributeError("Rule classes must be named in the format of L*."))
# Add the rules to the rules dictionary for sqlfluff/src/sqlfluff/core/rules/__init__.py
rules.append(rule_class)
# Add the rule_classes to the module namespace with globals() so that they can
# be found by Sphinx automodule documentation in rules.rst
# The result is the same as declaring the classes in this file.
globals()[rule_class_name] = rule_class
# Add the rule class names to __all__ for Sphinx automodule discovery
__all__.append(rule_class_name)
| StarcoderdataPython |
3254035 | import math
import random
import time
from roman import Robot, Tool, Joints
from roman.sim.simenv import SimEnv
class Writer():
def __init__(self):
self.frames = []
self.last_time = 0
def __call__(self, arm_state, hand_state, arm_cmd, hand_cmd):
if arm_state.time() - self.last_time < 0.033:
return
self.frames.append((arm_state.clone(), hand_state.clone(), arm_cmd.clone(), hand_cmd.clone()))
self.last_time = arm_state.time()
def replay_speed(use_sim, iterations=1):
print(f"Running {__file__}::{replay_speed.__name__}()")
start_joints = Joints(0, -math.pi / 2, math.pi / 2, -math.pi / 2, -math.pi / 2, 0)
if not use_sim:
robot = Robot(use_sim=use_sim).connect()
robot.move(start_joints, max_speed=1, max_acc=1)
robot.disconnect()
for iter in range(iterations):
writer = Writer()
robot = Robot(use_sim=use_sim, writer=writer).connect()
home = robot.tool_pose
target = Tool.from_xyzrpy(home.to_xyzrpy() + [-0.2 * random.random(), -0.2 * random.random(), 0.2 * random.random(), 0.5 * random.random(), 0.5 * random.random(), 0.5 * random.random()])
start = robot.last_state()[0].time()
robot.move(target)
robot.stop()
robot.stop()
assert(not robot.is_moving())
print(len(writer.frames))
print(robot.last_state()[0].time() - start)
robot.disconnect()
robot = Robot(use_sim=use_sim).connect()
robot.move(home, max_speed=1, max_acc=1)
robot.stop()
start = robot.last_state()[0].time()
initial = writer.frames[0][0].time()
for i in range(len(writer.frames) - 1):
timeout = writer.frames[i + 1][0].time() - writer.frames[i][0].time()
target_speeds = writer.frames[i + 1][0].joint_speeds()
max_acc = writer.frames[i][2].max_acceleration()
while robot.last_state()[0].time() - start < writer.frames[i + 1][0].time() - initial:
robot.move(target_speeds, max_acc=max_acc, timeout=0)
if use_sim:
assert(robot.arm.state.joint_speeds().allclose(target_speeds))
print(robot.last_state()[0].time() - start)
assert(robot.tool_pose.allclose(target, position_tolerance=0.005, rotation_tolerance=0.05))
assert(not robot.is_moving())
robot.move(home, max_speed=1, max_acc=1)
robot.disconnect()
print("Passed.")
def run(use_sim):
replay_speed(use_sim, 1)
if __name__ == '__main__':
run(use_sim=True)
| StarcoderdataPython |
3327947 | #import python imports
import csv
import os
#read the csv and convert into a list
with open('Resources/budget_data.csv') as csvfile:
data_reader = csv.reader(csvfile)
#read the header
header = next(data_reader)
data = list(data_reader)
#calculate the total numbr of months
months = len(data)
#calculate the net total amount
net_total = 0
for x in data:
net_total += int(x[1])
#calculate the average of the changes in "Profit/Losses" over the entire period
total_change = 0
revenue_list = []
for x in range(len(data)-1):
month1 = int(data[x][1])
month2 = int(data[x+1][1])
change = month2 - month1
revenue_list.append(change)
total_change += change
average = total_change / (months - 1)
#calculate the increase of profits and decrease of losses
max_profits = max(revenue_list)
min_profits = min(revenue_list)
Analysis = f"""Financial Analysis
----------------------------
Total Months: {months}
Total: {net_total}
Average Change: {average}
Greatest Increase in Profits: {max_profits}
Greatest Decrease in Profits: {min_profits}"""
print(Analysis) | StarcoderdataPython |
1701656 | import numpy as np
from numpy.core.fromnumeric import size
from scipy.ndimage import affine_transform
from .._transform import Transformer
class Resize(Transformer):
def __init__(self) -> None:
super().__init__()
def transform_matric(self, scale):
assert len(scale) == 2, f'len(sclae) = {len(scale)} != 2'
resize_axis_matrix = np.array(
[[1 / scale[0], 0., 0.],
[0., 1 / scale[1], 0.],
[0., 0., 1.]])
return resize_axis_matrix
def __call__(self, inp, mask, scale=None, size=None):
assert scale is not None or size is not None, \
'Scale is None and size is None.'
assert scale is None or size is None, \
'Ambiguous, scale is not None and size is not None.'
width = mask.shape[0]
height = mask.shape[1]
if scale is not None and not isinstance(scale, (tuple, list)):
scale = (scale, scale)
if size is not None and not isinstance(size, (tuple, list)):
size = (size, size)
if scale is None:
scale = (size[0] / width,
size[1] / height)
if size is None:
size = (int(width * scale[0]),
int(height * scale[1]))
affine_matrix = self.transform_matric(scale)
if inp.ndim == 2:
inp = affine_transform(inp, affine_matrix, output_shape=size)
else:
inp_ = []
for i in range(inp.shape[0]):
inp_.append(affine_transform(inp[i], affine_matrix, output_shape=size))
inp = np.stack(inp_, axis=0)
mask = affine_transform(mask, affine_matrix, order=0, output_shape=size)
return inp, mask.round()
class RandomResize(Transformer):
def __init__(self, r_min, r_max) -> None:
super().__init__()
assert r_max > r_min, \
f'r_max <= r_min, r_max={r_max} and r_min={r_min}'
self.r_max = r_max
self.r_min = r_min
self.resizer = Resize()
def __call__(self, inp, mask):
scale = np.random.rand() * (self.r_max - self.r_min) + self.r_min
return self.resizer(inp, mask, scale=scale)
class ResizeTo(Transformer):
def __init__(self, size) -> None:
super().__init__()
assert isinstance(size, (tuple, list)) and len(size) == 2
self.size = size
self.resizer = Resize()
def __call__(self, inp, mask):
return self.resizer(inp, mask, size=self.size)
| StarcoderdataPython |
4810457 | # for dot acess
# arg = {'name': 'jojonki', age: 100}
# conf = Config(**arg)
# print(conf.name) ==> 'jojonki'
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
| StarcoderdataPython |
1797531 | <filename>images_api/app.py<gh_stars>0
from images_api import create_app
if __name__ == '__main__':
api = create_app()
api.run(host="0.0.0.0", debug=api.config['DEBUG'], port=api.config['FLASK_PORT'])
| StarcoderdataPython |
1704919 | '''
Script to generate embeddings from resnet trained using pcl
Command to run:
python eval_kmeans.py --pretrained experiment_pcl_resume/checkpoint.pth.tar /home/mprabhud/dataset/shapenet_renders/npys/
'''
from __future__ import print_function
import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import argparse
import random
import numpy as np
from tqdm import tqdm
import faiss
from torchvision import transforms, datasets
import torchvision.models as models
import pcl.loader
import ipdb
st = ipdb.set_trace
def parse_option():
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--num-workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--cost', type=str, default='0.5')
parser.add_argument('--seed', default=0, type=int)
# model definition
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--pretrained', default='', type=str,
help='path to pretrained checkpoint')
# dataset
parser.add_argument('--low-shot', default=False, action='store_true', help='whether to perform low-shot training.')
parser.add_argument('--low-dim', default=16, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--pcl-r', default=1024, type=int,
help='queue size; number of negative pairs; needs to be smaller than num_cluster (default: 16384)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--temperature', default=0.2, type=float,
help='softmax temperature')
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco-v2/SimCLR data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
parser.add_argument('--num-cluster', default='2500,5000,10000', type=str,
help='number of clusters')
opt = parser.parse_args()
opt.num_class = 20
# if low shot experiment, do 5 random runs
if opt.low_shot:
opt.n_run = 5
else:
opt.n_run = 1
return opt
def main():
args = parse_option()
args.num_cluster = args.num_cluster.split(',')
random.seed(args.seed)
np.random.seed(args.seed)
########################################################################
# STEP 1: SETuP DATALOADER (MAKE SURE TO CONVERT IT TO PIL IMAGE !!!!!)#
########################################################################
traindir = os.path.join(args.data)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = pcl.loader.ShapeNet(
traindir,
'split_allpt.txt',
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
normalize
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size*2, shuffle=False,
sampler=None, num_workers=args.num_workers, pin_memory=True)
############################
# STEP 2: INITIALIZE MODEL #
############################
# create model
print("=> creating model '{}'".format(args.arch))
kmeans_model = models.__dict__[args.arch](num_classes=16)
kmeans_model.fc = nn.Sequential(nn.Linear(2048, 2048), nn.ReLU(), kmeans_model.fc)
# load from pre-trained
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
state_dict = checkpoint['state_dict']
# rename pre-trained keys
for k in list(state_dict.keys()):
if k.startswith('module.encoder_k'):
# remove prefix
state_dict[k[len("module.encoder_k."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
kmeans_model.load_state_dict(state_dict, strict=False)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
kmeans_model.cuda()
###############################
# STEP 3: GET Kmeans Clusters #
##############################
cluster_result = None
features = compute_embeddings(train_loader, kmeans_model, args) #generate embeddings based on keys encoder (different from eval_embeddings.py)
# placeholder for clustering result
cluster_result = {'im2cluster':[],'centroids':[],'density':[]}
for num_cluster in args.num_cluster:
cluster_result['im2cluster'].append(torch.zeros(len(train_dataset),dtype=torch.long).cuda())
cluster_result['centroids'].append(torch.zeros(int(num_cluster),16).cuda())
cluster_result['density'].append(torch.zeros(int(num_cluster)).cuda())
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
cluster_result = run_kmeans(features,args) #run kmeans clustering
def compute_embeddings(eval_loader, model, args):
print('Computing embeddings...')
model.eval()
features = torch.zeros(len(eval_loader.dataset),16).cuda()
for i, (images, index) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(non_blocking=True)
feat = model(images)
features[index] = feat
return features.cpu()
def run_kmeans(x, args):
"""
Args:
x: data to be clustered
"""
results = {'im2cluster':[],'centroids':[],'density':[]}
for seed, num_cluster in enumerate(args.num_cluster):
print('performing kmeans clustering on ...',num_cluster)
# intialize faiss clustering parameters
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = True
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
D, I = index.search(x, 1) # for each sample, find cluster distance and assignments
im2cluster = [int(n[0]) for n in I]
# get cluster centroids
centroids = faiss.vector_to_array(clus.centroids).reshape(k,d)
# sample-to-centroid distances for each cluster
Dcluster = [[] for c in range(k)]
for im,i in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
# concentration estimation (phi)
density = np.zeros(k)
for i,dist in enumerate(Dcluster):
if len(dist)>1:
d = (np.asarray(dist)**0.5).mean()/np.log(len(dist)+10)
density[i] = d
#if cluster only has one point, use the max to estimate its concentration
dmax = density.max()
for i,dist in enumerate(Dcluster):
if len(dist)<=1:
density[i] = dmax
density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability
density = args.temperature*density/density.mean() #scale the mean to temperature
# convert to cuda Tensors for broadcast
centroids = torch.Tensor(centroids).cuda()
centroids = nn.functional.normalize(centroids, p=2, dim=1)
im2cluster = torch.LongTensor(im2cluster).cuda()
density = torch.Tensor(density).cuda()
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
return results
if __name__ == '__main__':
main() | StarcoderdataPython |
1775319 | from typing import Callable, Generic, Dict, List, TypeVar, Type
from .fnode import FNode
Request = TypeVar('Request')
Response = TypeVar('Response')
Value = TypeVar('Value')
class Flow(Generic[Value]):
def __init__(self):
self.start_node = None
self.nodes: Dict[str, FNode] = {}
def configure(self, lines: List[str]) -> 'Flow[Value]':
for line in lines:
line = line.replace('\n', '').replace(' ', '').strip()
if not line or line.startswith('//'):
continue
output, input_name = line.split("--")
output_name, output_channel = output.split('.')
self.nodes[output_name].on(
output_channel, self.nodes[input_name])
return self
def configure_by_file(self, filename: str) -> 'Flow[Value]':
with open(filename, 'r') as f:
return self.configure(f.readlines())
def Node(self, name: str, s: Type[Request], t: Type[Response],
input: Callable[[Request, Callable[[str, Response], None], Callable[
[str, Exception], None]], None]) -> 'FNode[Request, Response]':
node = FNode[Request, Response](input)
self.nodes[name] = node
self.start_node = self.start_node or node
return node
def node(self,
name: str, s: Type[Request], t: Type[Response],
input: Callable[[Request, Callable[[str, Response], None], Callable[
[str, Exception], None]], None]) -> 'Flow[Value]':
self.Node(name, s, t, input)
return self
def run(self, value: Value = None):
self.start_node.run(value)
| StarcoderdataPython |
44249 | <reponame>Xiul109/eeglib
import unittest
import numpy as np
from itertools import product
import eeglib.auxFunctions as aux
class TestAuxFuncs(unittest.TestCase):
dictToFlat = {"asd":1, "lol":[2,3], "var":{"xd":4, "XD":5}}
listToFlat = [0, 1, 2, [3, 4], [5, [6, 7]]]
simpleDict = {"a":0, "b":1}
simpleList = [0, 1]
def test_flatData_dict(self):
trueFlatten = {'slang_asd': 1, 'slang_lol_0': 2, 'slang_lol_1': 3,
'slang_var_xd': 4, 'slang_var_XD': 5}
flatten = aux.flatData(self.dictToFlat, "slang")
self.assertEqual(flatten, trueFlatten)
def test_flatData_list(self):
trueFlatten = {'list_0': 0, 'list_1': 1, 'list_2': 2, 'list_3_0': 3,
'list_3_1': 4, 'list_4_0': 5, 'list_4_1_0': 6, 'list_4_1_1': 7}
flatten = aux.flatData(self.listToFlat, "list")
self.assertEqual(flatten, trueFlatten)
def test_flatData_names(self):
#Flat List with name
trueFlatten = {'l_0': 0, 'l_1': 1}
flatten = aux.flatData(self.simpleList, "l")
self.assertEqual(flatten, trueFlatten)
#Flat List without name
trueFlatten = {'0': 0, '1': 1}
flatten = aux.flatData(self.simpleList, "")
self.assertEqual(flatten, trueFlatten)
#Flat Dict with name
trueFlatten = {'l_a': 0, 'l_b': 1}
flatten = aux.flatData(self.simpleDict, "l")
self.assertEqual(flatten, trueFlatten)
#Flat Dict without name
trueFlatten = {'a': 0, 'b': 1}
flatten = aux.flatData(self.simpleDict, "")
self.assertEqual(flatten, trueFlatten)
def test_flatData_separators(self):
#Flat List with tabs separator
trueFlatten = {'l\t0': 0, 'l\t1': 1}
flatten = aux.flatData(self.simpleList, "l", separator="\t")
self.assertEqual(flatten, trueFlatten)
#Flat List with empty separator
trueFlatten = {'l0': 0, 'l1': 1}
flatten = aux.flatData(self.simpleList, "l", separator="")
self.assertEqual(flatten, trueFlatten)
#Flat Dict with tabs separator
trueFlatten = {'l\ta': 0, 'l\tb': 1}
flatten = aux.flatData(self.simpleDict, "l", separator="\t")
self.assertEqual(flatten, trueFlatten)
#Flat Dict with empty separator
trueFlatten = {'la': 0, 'lb': 1}
flatten = aux.flatData(self.simpleDict, "l", separator="")
self.assertEqual(flatten, trueFlatten)
def test_listType_noList(self):
noList = "asd"
with self.assertRaises(ValueError):
aux.listType(noList)
def test_listType_noLength(self):
testList = []
v = aux.listType(testList)
self.assertEqual(v, None)
def test_listType_diffTypes(self):
v = aux.listType([1, "2", 3.0])
self.assertEqual(v, None)
v = aux.listType([*range(10),"10", *range(11,21)])
self.assertEqual(v, None)
def test_listType_sameTypes(self):
v = aux.listType([*range(10)])
self.assertEqual(v, int)
v = aux.listType(["a", 'b', "1", '2', """"long text"""])
self.assertEqual(v, str)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
122794 | import numpy as np
from pyriemann.estimation import Covariances
from pyriemann.spatialfilters import CSP
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from moabb.pipelines.utils import FilterBank
parameters = {"C": np.logspace(-2, 2, 10)}
clf = GridSearchCV(SVC(kernel="linear"), parameters)
fb = FilterBank(make_pipeline(Covariances(estimator="oas"), CSP(nfilter=4)))
pipe = make_pipeline(fb, SelectKBest(score_func=mutual_info_classif, k=10), clf)
# this is what will be loaded
PIPELINE = {
"name": "FBCSP + optSVM",
"paradigms": ["FilterBankMotorImagery"],
"pipeline": pipe,
}
| StarcoderdataPython |
3233675 | from typing import BinaryIO
from nbt.classes.base import NBTBase, NBTPrimitiveFloat, NBTPrimitiveInt
class NBTTagEnd(NBTBase):
def write(self, data_stream: BinaryIO) -> None:
pass
@classmethod
def read(cls, data_stream: BinaryIO, depth: int) -> 'NBTBase':
return cls()
@classmethod
def id(cls) -> int:
return 0
def copy(self) -> 'NBTBase':
return NBTTagEnd()
def __str__(self) -> str:
return "END"
class NBTTagByte(NBTPrimitiveInt):
@classmethod
def format(cls) -> str:
return 'b'
@classmethod
def id(cls) -> int:
return 1
def __str__(self) -> str:
return str(int(self)) + "b"
class NBTTagShort(NBTPrimitiveInt):
@classmethod
def format(cls) -> str:
return 'h'
@classmethod
def id(cls) -> int:
return 2
def __str__(self) -> str:
return str(int(self)) + "s"
class NBTTagInt(NBTPrimitiveInt):
@classmethod
def format(cls) -> str:
return 'i'
@classmethod
def id(cls) -> int:
return 3
def __str__(self) -> str:
return str(int(self))
class NBTTagLong(NBTPrimitiveInt):
@classmethod
def format(cls) -> str:
return 'q'
@classmethod
def id(cls) -> int:
return 4
def __str__(self) -> str:
return str(int(self)) + "L"
class NBTTagFloat(NBTPrimitiveFloat):
@classmethod
def format(cls) -> str:
return 'f'
@classmethod
def id(cls) -> int:
return 5
def __str__(self) -> str:
return str(float(self)) + "f"
class NBTTagDouble(NBTPrimitiveFloat):
@classmethod
def format(cls) -> str:
return 'd'
@classmethod
def id(cls) -> int:
return 6
def __str__(self) -> str:
return str(float(self)) + "d"
| StarcoderdataPython |
3246574 | ''' Copyright [2018] [<NAME>]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://github.com/Robosid/Drone-Intelligence/blob/master/License.pdf
https://github.com/Robosid/Drone-Intelligence/blob/master/License.rtf
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# Created by <NAME> (aka Robosid). for AutoMav of Project Heartbeat.
# Script for take off and control with directional velocity maneuver, but with send_global_velocity.
# Last modified by : Robosid
# Last modifed on : 03 / 22 / 2018
import time
from dronekit import connect, VehicleMode, LocationGlobalRelative, Command, LocationGlobal
from pymavlink import mavutil
#-- Connect to the vehicle
#print('Connecting...')
#vehicle = connect('udp:127.0.0.1:14551')
print(">>>> Connecting with the UAV <<<")
import argparse
parser = argparse.ArgumentParser(description='commands')
parser.add_argument('--connect', default='/dev/ttyS0')
args = parser.parse_args()
connection_string = args.connect
print("Connection to the vehicle on %s"%connection_string)
vehicle = connect(args.connect, baud=921600, wait_ready=True) #- wait_ready flag hold the program untill all the parameters are been read (=, not .)
#-- Setup the commanded flying speed
gnd_speed = 2 # [m/s]
#-- Define arm and takeoff
def arm_and_takeoff(altitude):
while not vehicle.is_armable:
print("waiting to be armable")
time.sleep(1)
print("Arming motors")
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed: time.sleep(1)
print("Taking Off")
vehicle.simple_takeoff(altitude)
while True:
v_alt = vehicle.location.global_relative_frame.alt
print(">> Altitude = %.1f m"%v_alt)
if v_alt >= altitude - 1.0:
print("Target altitude reached")
break
time.sleep(1)
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
# Set up velocity mappings
# velocity_x > 0 => fly North
# velocity_x < 0 => fly South
# velocity_y > 0 => fly East
# velocity_y < 0 => fly West
# velocity_z < 0 => ascend
# velocity_z > 0 => descend
SOUTH=-2
UP=-0.5 #NOTE: up is negative!
arm_and_takeoff(5)
time.sleep(20)
#Fly south and up.
send_ned_velocity(SOUTH,0,UP,DURATION)
time.sleep(5)
send_global_velocity(SOUTH,0,UP,DURATION) | StarcoderdataPython |
3369816 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import epitran
class TestArabic(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('ara-Arab')
def test_Iraq(self):
tr = self.epi.transliterate('العراق')
self.assertEqual(tr, 'alʕraːq')
def test_Quran(self):
tr = self.epi.transliterate('القرآن')
self.assertEqual(tr, 'alqrʔaːn')
def test_(self):
tr = self.epi.transliterate('محمد')
self.assertEqual(tr, 'mħmd')
| StarcoderdataPython |
3227568 | <reponame>delitamakanda/cautious-journey
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from django.views.decorators.cache import cache_page
# from django.views.decorators.csrf import csrf_exempt
from rest_framework.documentation import include_docs_urls
from accounts.api.views import (
UserFanficDetailView,
UserDetailView,
AccountProfileDetailView,
SocialListApiView,
SocialDestroyApiView,
GroupListView,
SignupView,
FavoritedFanficView,
UnfavoritedFanficView,
FollowUserView,
FollowStoriesView,
DeleteAccountView,
FollowAuthorDeleteView,
FollowStoriesDeleteView,
)
from helpcenter.api.views import (
FoireAuxQuestionsApiView,
LexiqueApiView,
)
from categories.api.views import (
CategoryDetailView,
CategoryListView,
SubCategoryDetailView,
SubCategoryListView,
)
from posts.api.views import (
PostCreateAPIView,
PostDetailAPIView,
PostListAPIView,
PostUpdateAPIView,
TagListAPIView,
)
from fanfics.api.views import (
GenresListView,
ClassementListView,
StatusListView,
FanficCreateApiView,
FanficDetailView,
FanficUpdateDetailView,
)
from comments.api.views import (
CommentCreateApiView,
CommentListApiView,
)
from chapters.api.views import (
ChapterCreateApiView,
ChapterDetailView
)
from api.views import (
ApiRootView,
)
from api.api import (
EmailFeedbackView,
ContactMailView,
FlatPagesView,
FlatPagesByTypeView,
FlatPagesHTMLByTypeView,
ContentTypeView,
NotificationListView,
BrowseFanfictionListView,
)
from api.api_fanfic import (
ShareFanficAPIView,
)
from api.api_auth import (
CheckoutUserView,
UserCreateView,
LoginView,
LogoutView,
SocialSignUp,
ChangePasswordView,
RemovePhotoFromAccount,
)
from api.api_session import (
get_csrf,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include(('accounts.urls', 'accounts'), namespace='accounts')),
path('help/', include(('helpcenter.urls', 'helpcenter'), namespace='helpcenter')),
path('posts/', include(('posts.urls', 'posts'), namespace='posts')),
path('', cache_page(60 * 5)(TemplateView.as_view(
template_name='frontend/index.html')), name='index'),
# path('', TemplateView.as_view(
# template_name='frontend/index.html'), name='index'),
]
urlpatterns += [
path('api/', ApiRootView.as_view(), name=ApiRootView.name),
path('api/docs/', include_docs_urls(title='Fanfiction API', public=False)),
path('api/csrf/', get_csrf, name='csrf'),
path('api/user/', CheckoutUserView.as_view(), name='user'),
path('api/signup/', UserCreateView.as_view(), name='signup'),
path('api/login/', LoginView.as_view(), name='login'),
path('api/logout/', LogoutView.as_view(), name='logout'),
path('api/social_sign_up/', SocialSignUp.as_view(), name="social_sign_up"),
path('api/change-password/', ChangePasswordView.as_view(),
name='change-password'),
path('api/remove-photo/<int:pk>/',
RemovePhotoFromAccount.as_view(), name='remove-photo'),
path('api/share/', ShareFanficAPIView.as_view(), name='share'),
path('api/feedback/', EmailFeedbackView.as_view(), name='feedback'),
path('api/favorite/', FavoritedFanficView.as_view(), name='favorite'),
path('api/unfavorite/', UnfavoritedFanficView.as_view(), name='unfavorite'),
path('api/follow-stories/', FollowStoriesView.as_view(), name='follow-stories'),
path('api/follow-user/', FollowUserView.as_view(), name='follow-user'),
path('api/story-followed/<int:to_fanfic>/', FollowStoriesDeleteView.as_view()),
path('api/author-followed/<int:user_to>/', FollowAuthorDeleteView.as_view()),
path('api/disable-account/', DeleteAccountView.as_view(), name='disable-account'),
path('api/contact-mail/', ContactMailView.as_view(), name='contact-mail'),
path('api/pages/<str:type>/',
FlatPagesByTypeView.as_view(), name='pages-by-type'),
path('api/page/<str:type>/html/',
FlatPagesHTMLByTypeView.as_view(), name='pages-html-by-type'),
path('api/pages/', FlatPagesView.as_view(), name='all-pages'),
path('api/notifications/', NotificationListView.as_view(), name='notifications'),
path('api/contenttype/<int:pk>/',
ContentTypeView.as_view(), name='contenttype-detail'),
path('api/browse-fanfics/',
BrowseFanfictionListView.as_view(), name='browse-fanfics'),
path('api/users/<str:username>/account/',
UserFanficDetailView.as_view(), name='user-list'),
path('api/users/<int:pk>/', UserDetailView.as_view(), name='user-detail'),
path('api/users/<str:user__username>/profile/',
AccountProfileDetailView.as_view(), name='user-edit-account-profile'),
path('api/users/<int:account>/socialaccount/',
SocialListApiView.as_view(), name='socialaccount-view'),
path('api/users/social-account/', SocialListApiView.as_view(),
name='socialaccount-createview'),
path('api/users/social-account/<int:pk>/delete/',
SocialDestroyApiView.as_view(), name='socialaccount-destroy'),
path('api/groups/', GroupListView.as_view()),
path('api/sign_up/', SignupView.as_view(), name="sign_up"),
path('api/faq/', FoireAuxQuestionsApiView.as_view(), name='faq'),
path('api/lexique/', LexiqueApiView.as_view(), name='lexique'),
path('api/category/', CategoryListView.as_view(), name='category-list'),
path('api/category/<int:pk>/',
CategoryDetailView.as_view(), name='category-detail'),
path('api/subcategory/', SubCategoryListView.as_view(), name='subcategory-list'),
path('api/subcategory/<int:pk>/',
SubCategoryDetailView.as_view(), name='subcategory-detail'),
path('api/tags/', TagListAPIView.as_view(), name='tag-list'),
path('api/posts/', PostListAPIView.as_view(), name='post-list'),
path('api/posts/create/', PostCreateAPIView.as_view(), name='post-create'),
path('api/posts/<int:pk>/update/',
PostUpdateAPIView.as_view(), name='post-update'),
path('api/posts/<int:pk>/remove/',
PostUpdateAPIView.as_view(), name='post-delete'),
path('api/posts/<str:slug>/', PostDetailAPIView.as_view(), name='post-detail'),
path('api/genres/', GenresListView.as_view(), name='genre-list'),
path('api/classement/', ClassementListView.as_view(), name='classement-list'),
path('api/status/', StatusListView.as_view(), name='status-list'),
path('api/fanfics/', FanficCreateApiView.as_view(), name='fanfic-list'),
path('api/fanfics/<str:author>/',
FanficCreateApiView.as_view(), name='fanfic-list-by-author'),
path('api/fanfics/<str:slug>/detail/',
FanficDetailView.as_view(), name='fanfic-detail'),
path('api/fanfics/<int:pk>/fanfic-detail/',
FanficUpdateDetailView.as_view(), name='fanfic-update-detail'),
path('api/comments/', CommentCreateApiView.as_view(), name='comment-create'),
path('api/comments-list/', CommentListApiView.as_view(), name='comment-list'),
path('api/chapters/<int:fanfic>/list/',
ChapterCreateApiView.as_view(), name='chapter-list-by-fanfic'),
path('api/chapters/create/', ChapterCreateApiView.as_view(), name='chapter-list'),
path('api/chapters/<int:pk>/',
ChapterDetailView.as_view(), name='chapter-detail'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += [
path('markdownx/', include('markdownx.urls')),
path('api/oauth2-social/', include('rest_framework_social_oauth2.urls')),
path('api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
]
| StarcoderdataPython |
1646738 | <reponame>FabianGroeger96/hslu-roblab-hs18
#!/usr/bin/env python
# Class autogenerated from /home/sam/Downloads/aldebaran_sw/nao/naoqi-sdk-2.1.4.13-linux64/include/alproxies/alchestbuttonproxy.h
# by <NAME>'s <Sammy.Pfeiffer at student.<EMAIL>.<EMAIL>.au> generator
# You need an ALBroker running
from naoqi import ALProxy
class ALChestButton(object):
def __init__(self, session):
self.proxy = None
self.session = session
def force_connect(self):
self.proxy = self.session.service("ALChestButton")
def ping(self):
"""Just a ping. Always returns true
:returns bool: returns true
"""
if not self.proxy:
self.proxy = self.session.service("ALChestButton")
return self.proxy.ping()
def version(self):
"""Returns the version of the module.
:returns str: A string containing the version of the module.
"""
if not self.proxy:
self.proxy = self.session.service("ALChestButton")
return self.proxy.version()
| StarcoderdataPython |
159567 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 <NAME>
# wwdtm_scoreimage is relased under the terms of the Apache License 2.0
"""Generate PNG image file based on WWDTM show score totals"""
import json
import math
import os
from typing import List
import mysql.connector
from mysql.connector.errors import DatabaseError, ProgrammingError
import numpy
from PIL import Image
BASE_IMAGE_WIDTH = 30
IMAGE_SCALE = 40
def retrieve_show_total_scores(database_connection: mysql.connector) -> List[int]:
"""Retrieve total scores for each show"""
cursor = database_connection.cursor()
query = ("select sum(pm.panelistscore) as total "
"from ww_showpnlmap pm "
"join ww_shows s on s.showid = pm.showid "
"where s.bestof = 0 and s.repeatshowid is null "
"group by s.showdate "
"having sum(pm.panelistscore) > 0")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
scores = []
for row in result:
scores.append(int(row[0]))
return scores
def remap(in_value: int,
in_minimum: int,
in_maximum: int,
out_minimum: int,
out_maximum: int) -> int:
"""Remap a value from one value range to another value range
while maintaining ratio"""
new_value = (in_value - in_minimum) * (out_maximum - out_minimum) \
/ (in_maximum - in_minimum) + out_minimum
return math.floor(new_value)
def pad(list_object: List, content, width: int) -> List:
list_object.extend([content] * (width - len(list_object)))
return list_object
def split(values):
for i in range(0, len(values), BASE_IMAGE_WIDTH):
yield values[i:i+BASE_IMAGE_WIDTH]
def convert_list_to_pixels(values: List[int]) -> List[List]:
pixels = []
for row in values:
row_tuples = []
for value in row:
row_tuples.append((value, math.floor(value / 3), 0))
if len(row_tuples) < BASE_IMAGE_WIDTH:
pad(row_tuples, (0, 0, 0), BASE_IMAGE_WIDTH)
pixels.append(row_tuples)
return pixels
def generate_image(values, dimension_side: int):
"""Generate a PNG image based on a list of integers"""
image_size = dimension_side * IMAGE_SCALE
array = numpy.array(values, dtype=numpy.uint8)
image = Image.fromarray(array)
resized_image = image.resize((image_size, image_size), Image.NEAREST)
resized_image.save('output.png')
resized_image.show()
def load_config(app_environment):
"""Load configuration file from config.json"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
if app_environment.startswith("develop"):
if "development" in config_dict:
config = config_dict["development"]
else:
raise Exception("Missing 'development' section in config file")
elif app_environment.startswith("prod"):
if "production" in config_dict:
config = config_dict['production']
else:
raise Exception("Missing 'production' section in config file")
else:
if "local" in config_dict:
config = config_dict["local"]
else:
raise Exception("Missing 'local' section in config file")
return config
def main():
"""Pull in scoring data and generate image based on the data"""
app_environment = os.getenv("APP_ENV", "local").strip().lower()
config = load_config(app_environment)
database_connection = mysql.connector.connect(**config["database"])
original_totals = retrieve_show_total_scores(database_connection)
if not original_totals:
print("No scores to process")
original_min_total = min(original_totals)
original_max_total = max(original_totals)
new_min_value = 0
new_max_value = 255
remapped_totals = []
for total in original_totals:
remapped_totals.append(remap(total,
original_min_total,
original_max_total,
new_min_value,
new_max_value))
list_values = list(split(remapped_totals))
pixels = list(convert_list_to_pixels(list_values))
side = math.ceil(math.sqrt(len(original_totals)))
generate_image(pixels, side)
# Only run if executed as a script and not imported
if __name__ == '__main__':
main()
| StarcoderdataPython |
3395624 | <reponame>opendatacube/datacube-zarr<filename>integration_tests/conftest.py
# coding=utf-8
"""
Common methods for index integration tests.
"""
import itertools
import multiprocessing
import os
import re
from pathlib import Path
from time import sleep
import pytest
import boto3
import datacube.scripts.cli_app
import datacube.utils
import fsspec
from click.testing import CliRunner
from datacube.config import LocalConfig
from datacube.drivers.postgres import PostgresDb, _core
from datacube.index import index_connect
from datacube.index._metadata_types import default_metadata_type_docs
from hypothesis import HealthCheck, settings
from moto.server import main as moto_server_main
from s3path import S3Path, register_configuration_parameter
from integration_tests.utils import copytree
INTEGRATION_TESTS_DIR = Path(__file__).parent
#: Number of time slices to create in sample data
NUM_TIME_SLICES = 3
PROJECT_ROOT = Path(__file__).parents[1]
TEST_DATA = PROJECT_ROOT / 'tests' / 'data' / 'lbg'
LBG_NBAR = 'LS5_TM_NBAR_P54_GANBAR01-002_090_084_19920323'
LBG_PQ = 'LS5_TM_PQ_P55_GAPQ01-002_090_084_19920323'
TEST_DATA_LS8 = PROJECT_ROOT / 'tests' / 'data' / 'espa' / 'ls8_sr'
CONFIG_SAMPLES = PROJECT_ROOT / 'docs' / 'config_samples'
CONFIG_FILE_PATHS = [
str(INTEGRATION_TESTS_DIR / 'agdcintegration.conf'),
os.path.expanduser('~/.datacube_integration.conf'),
]
# Configure Hypothesis to allow slower tests, because we're testing datasets
# and disk IO rather than scalar values in memory. Ask @Zac-HD for details.
settings.register_profile(
'opendatacube',
deadline=5000,
max_examples=10,
suppress_health_check=[HealthCheck.too_slow],
)
settings.load_profile('opendatacube')
@pytest.fixture(scope='session')
def monkeypatch_session():
"""A patch for a session-scoped `monkeypatch`
https://github.com/pytest-dev/pytest/issues/1872
note: private import _pytest).
"""
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
yield m
m.undo()
@pytest.fixture(scope='session')
def moto_aws_credentials(monkeypatch_session):
'''Mocked AWS Credentials for moto.'''
MOCK_AWS_CREDENTIALS = {
'AWS_ACCESS_KEY_ID': 'mock-key-id',
'AWS_SECRET_ACCESS_KEY': 'mock-secret',
'AWS_DEFAULT_REGION': "mock-region",
}
for k, v in MOCK_AWS_CREDENTIALS.items():
monkeypatch_session.setenv(k, v)
return MOCK_AWS_CREDENTIALS
@pytest.fixture(scope="session")
def moto_s3_server(monkeypatch_session):
"""Mock AWS S3 Server."""
address = "http://127.0.0.1:5000"
# Run a moto server
proc = multiprocessing.Process(
target=moto_server_main,
name="moto_s3_server",
args=(["s3"],),
daemon=True,
)
proc.start()
sleep(0.3)
yield address
proc.terminate()
proc.join()
@pytest.fixture(scope='session')
def gdal_mock_s3_endpoint(moto_s3_server, monkeypatch_session):
"""Set environment variables for GDAL/rasterio to access moto server."""
monkeypatch_session.setenv('AWS_S3_ENDPOINT', moto_s3_server.split("://")[1])
monkeypatch_session.setenv('AWS_VIRTUAL_HOSTING', 'FALSE')
monkeypatch_session.setenv('AWS_HTTPS', 'NO')
@pytest.fixture(scope='session')
def fsspec_mock_s3_endpoint(moto_s3_server, moto_aws_credentials):
"""Set the boto s3 endpoint via fspec config.
Boto libraries don't offer any way to do this."""
fsspec_conf = {
"s3": {
"client_kwargs": {
"endpoint_url": moto_s3_server,
"region_name": moto_aws_credentials['AWS_DEFAULT_REGION'],
}
}
}
fsspec.config.conf.update(fsspec_conf)
@pytest.fixture(scope="session")
def moto_s3_resource(moto_s3_server, moto_aws_credentials):
"""A boto3 s3 resource pointing to the moto server."""
s3resource = boto3.resource(
's3',
endpoint_url=moto_s3_server,
aws_access_key_id=moto_aws_credentials['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=moto_aws_credentials['AWS_SECRET_ACCESS_KEY'],
# config=Config(signature_version='s3v4'),
region_name=moto_aws_credentials['AWS_DEFAULT_REGION'],
)
return s3resource
@pytest.fixture(scope="session")
def s3path_mock_s3_endpoint(moto_s3_resource):
"""Set boto resource for s3path libaray to access moto server."""
all_buckets = S3Path('/')
register_configuration_parameter(all_buckets, resource=moto_s3_resource)
@pytest.fixture(scope="session")
def s3(
gdal_mock_s3_endpoint,
s3path_mock_s3_endpoint,
fsspec_mock_s3_endpoint,
):
"""Collect together all requires per-session mock s3 fixtures and return a bucket."""
s3_bucket = S3Path("/mock-s3-bucket-integration")
s3_bucket.mkdir()
return s3_bucket
@pytest.fixture(scope="session")
def tmp_s3path_factory(s3):
"""S3Path version of pytest tmp_path_factory."""
def _as_int(s):
try:
return int(s)
except ValueError:
return -1
class TmpS3PathFactory:
def __init__(self, basetmp):
self.basetmp = basetmp
def mktemp(self, name):
suffixes = [
str(p.relative_to(self.basetmp))[len(name) :]
for p in self.basetmp.glob(f"{name}*")
]
max_existing = max([_as_int(s) for s in suffixes], default=-1)
p = self.basetmp / f"{name}{max_existing + 1}"
return p
return TmpS3PathFactory(basetmp=s3 / "pytest")
@pytest.fixture()
def tmp_s3path(request, tmp_s3path_factory):
"""S3Path vesrion of tmp_path fixture."""
MAXVAL = 30
name = re.sub(r"[\W]", "_", request.node.name)[:MAXVAL]
return tmp_s3path_factory.mktemp(name)
@pytest.fixture(params=('file', 's3'))
def tmp_storage_path(request, tmp_path, tmp_s3path):
return tmp_s3path if request.param == "s3" else tmp_path
@pytest.fixture()
def tmp_input_storage_path(tmp_storage_path):
return tmp_storage_path / "input"
@pytest.fixture(scope="session")
def global_integration_cli_args():
"""
The first arguments to pass to a cli command for integration test configuration.
"""
# List of a config files in order.
return list(itertools.chain(*(('--config', f) for f in CONFIG_FILE_PATHS)))
@pytest.fixture
def datacube_env_name(request):
if hasattr(request, 'param'):
return request.param
else:
return 'datacube'
@pytest.fixture
def local_config(datacube_env_name):
"""Provides a :class:`LocalConfig` configured with suitable config file paths.
.. seealso::
The :func:`integration_config_paths` fixture sets up the config files.
"""
return LocalConfig.find(CONFIG_FILE_PATHS, env=datacube_env_name)
@pytest.fixture
def uninitialised_postgres_db(local_config):
"""
Return a connection to an empty PostgreSQL database
"""
timezone = "UTC"
db = PostgresDb.from_config(
local_config, application_name='test-run', validate_connection=False
)
# Drop tables so our tests have a clean db.
_core.drop_db(db._engine)
db._engine.execute(
'alter database %s set timezone = %r' % (local_config['db_database'], timezone)
)
# We need to run this as well
# I think because SQLAlchemy grabs them into it's MetaData,
# and attempts to recreate them. TODO FIX
remove_dynamic_indexes()
yield db
# with db.begin() as c: # Drop SCHEMA
_core.drop_db(db._engine)
db.close()
@pytest.fixture
def index(local_config, uninitialised_postgres_db: PostgresDb):
index = index_connect(local_config, validate_connection=False)
index.init_db()
return index
@pytest.fixture
def index_empty(local_config, uninitialised_postgres_db: PostgresDb):
index = index_connect(local_config, validate_connection=False)
index.init_db(with_default_types=False)
return index
def remove_dynamic_indexes():
"""
Clear any dynamically created postgresql indexes from the schema.
"""
# Our normal indexes start with "ix_", dynamic indexes with "dix_"
for table in _core.METADATA.tables.values():
table.indexes.intersection_update(
[i for i in table.indexes if not i.name.startswith('dix_')]
)
@pytest.fixture
def default_metadata_type_doc():
return [doc for doc in default_metadata_type_docs() if doc['name'] == 'eo'][0]
@pytest.fixture
def default_metadata_types(index):
"""Inserts the default metadata types into the Index"""
for d in default_metadata_type_docs():
index.metadata_types.add(index.metadata_types.from_doc(d))
return index.metadata_types.get_all()
@pytest.fixture
def default_metadata_type(index, default_metadata_types):
return index.metadata_types.get_by_name('eo')
@pytest.fixture()
def ls5_dataset_path(tmp_input_storage_path):
"""LS5 test dataset on filesystem and s3."""
dataset_path = tmp_input_storage_path / "geotifs" / "lbg"
copytree(TEST_DATA, dataset_path)
return dataset_path
@pytest.fixture()
def ls8_dataset_path(tmp_path):
"""LS8 test dataset on filesystem."""
dataset_path = tmp_path / "geotifs" / "espa" / "ls8_sr"
copytree(TEST_DATA_LS8, dataset_path)
return dataset_path
@pytest.fixture
def clirunner(global_integration_cli_args, datacube_env_name):
def _run_cli(
opts,
catch_exceptions=False,
expect_success=True,
cli_method=datacube.scripts.cli_app.cli,
verbose_flag='-v',
):
exe_opts = list(itertools.chain(*(('--config', f) for f in CONFIG_FILE_PATHS)))
exe_opts += ['--env', datacube_env_name]
if verbose_flag:
exe_opts.append(verbose_flag)
exe_opts.extend(opts)
runner = CliRunner()
result = runner.invoke(cli_method, exe_opts, catch_exceptions=catch_exceptions)
if expect_success:
assert 0 == result.exit_code, "Error for %r. output: %r" % (
opts,
result.output,
)
return result
return _run_cli
| StarcoderdataPython |
134146 | <filename>django_ltree_field/migrations/0001_initial.py
# Generated by Django 3.1.7 on 2021-03-29 01:59
from django.db import migrations
import django.contrib.postgres.operations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
django.contrib.postgres.operations.CreateExtension(
name='ltree',
)
]
| StarcoderdataPython |
3253174 | <filename>semDiff/coverage.py<gh_stars>1-10
class Coverage:
"""
A class that compute the overlap between two structures
"""
jsonld_ignored_keys = ["@id", "@context", "@type"]
@staticmethod
def json_schema_context_coverage(schema, context):
""" Static method to compute the coverage of a context file for a particular json-schema
:param schema: the content of a JSON schema
:param context: the content of a JSON-LD context associated with the schema
:return: an array contain the overlap and the diff
"""
overlap = []
diff = []
for field in schema["properties"]:
if field not in Coverage.jsonld_ignored_keys:
if field in context["@context"]:
overlap.append(field)
else:
diff.append(field)
return overlap, diff
@staticmethod
def json_schema_context_mapping(schema, *contexts):
""" Static method to retrieve the mapping between a schema and multiple context files
:param schema: the content of a JSON schema
:param contexts: one or more JSON-LD context contexts associated with the schema
:return: an mapping dictionary
"""
mappings = {}
for field in schema["properties"]:
terms = []
if field not in Coverage.jsonld_ignored_keys:
for context in contexts:
if field in context["@context"]:
if "@id" in context["@context"][field]:
terms.append(context["@context"][field]["@id"])
else:
terms.append(context["@context"][field])
else:
terms.append("")
if field in mappings:
previous_terms = mappings[field]
terms.append(previous_terms)
mappings.update({field: terms})
return mappings
| StarcoderdataPython |
1705049 | <filename>email form/react.py
// Customize this 'myform.js' script and add it to your JS bundle.
// Then import it with 'import MyForm from "./myform.js"'.
// Finally, add a <MyForm/> element whereever you wish to display the form.
import React from "react";
export default class MyForm extends React.Component {
constructor(props) {
super(props);
this.submitForm = this.submitForm.bind(this);
this.state = {
status: ""
};
}
render() {
const { status } = this.state;
return (
<form
onSubmit={this.submitForm}
action="https://formspree.io/myynqdzd"
method="POST"
>
<!-- add your custom form HTML here -->
<label>Email:</label>
<input type="email" name="email" />
<label>Message:</label>
<input type="text" name="message" />
{status === "SUCCESS" ? <p>Thanks!</p> : <button>Submit</button>}
{status === "ERROR" && <p>Ooops! There was an error.</p>}
</form>
);
}
submitForm(ev) {
ev.preventDefault();
const form = ev.target;
const data = new FormData(form);
const xhr = new XMLHttpRequest();
xhr.open(form.method, form.action);
xhr.setRequestHeader("Accept", "application/json");
xhr.onreadystatechange = () => {
if (xhr.readyState !== XMLHttpRequest.DONE) return;
if (xhr.status === 200) {
form.reset();
this.setState({ status: "SUCCESS" });
} else {
this.setState({ status: "ERROR" });
}
};
xhr.send(data);
}
} | StarcoderdataPython |
3399876 | <filename>homeassistant/components/blovee/dtos.py<gh_stars>0
from attr import dataclass
@dataclass
class BloveeDevice:
name: str
model: str
mac: str
err: str
is_on: bool
brightness: int
| StarcoderdataPython |
51556 | <reponame>stefantaubert/life<gh_stars>1-10
import os
from geo.data_paths import get_suffix_pro
from geo.data_paths import get_suffix_prote
from geo.data_paths import get_suffix_o
from geo.data_dir_config import root
analysis_dir = root + "analysis/"
heatmaps_dir = root + "analysis/Heatmaps/"
most_common_values_best_features = analysis_dir + "most_common_values_best_features" + get_suffix_pro() + ".txt"
most_common_values_unique_count = analysis_dir + "most_common_values_unique_count" + get_suffix_pro() + ".pdf"
most_common_values_diagram = analysis_dir + "most_common_values" + get_suffix_pro() + ".pdf"
species_occurences_per_value = analysis_dir + "species_occurences_per_value" + get_suffix_pro() + ".pdf"
species_value_occurences = analysis_dir + "species_value_occurences" + get_suffix_pro() + ".pdf"
values_occurences_train = analysis_dir + "values_occurences_train" + get_suffix_pro() + ".pdf"
values_occurences_test = analysis_dir + "values_occurences_test" + get_suffix_pro() + ".pdf"
group_network = analysis_dir + "group_network" + get_suffix_prote() + ".pdf"
group_length_probabilities = analysis_dir + "group_length_probabilities" + get_suffix_prote() + ".pdf"
species_channel_map_dir = analysis_dir + "channel_maps" + get_suffix_pro() + "/"
value_occurences_species_dir = analysis_dir + "value_occurences_species" + get_suffix_pro() + "/"
if not os.path.exists(analysis_dir):
os.makedirs(analysis_dir)
if not os.path.exists(species_channel_map_dir):
os.makedirs(species_channel_map_dir) | StarcoderdataPython |
1651646 | <gh_stars>0
import random
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
from app.app.entities.shuttle import Shuttle
from app.app.routing.graph import Graph
from app.app.entities.models.battery_model import BatteryModel
from app.app.entities.models.velocity_model import VelocityModel
from app.app.routing.edge import Edge
class RandomShuttle(Shuttle):
def first_move(self):
self.edge = self.__seed()
self.position = self.edge.origin.geometry
def pick_next(self):
self.edge = self.__next_edge(self.edge)
self.position = self.edge.origin.geometry
def __seed(self):
node_ids = self.__keys(self.graph)
start_id = self.__random(node_ids)
start = self.graph[start_id]
end = self.__seed_end_node(start, None)
return Edge(start, end)
def __next_edge(self, previous):
start = previous.destination
end = self.__seed_end_node(start, previous.origin.node_id)
return Edge(start, end)
def __seed_end_node(self, node, exclude=None):
neighbors = self.__keys(node.neighbors)
if exclude is not None and exclude in neighbors and len(neighbors) > 1:
neighbors.remove(exclude)
return self.graph[self.__random(neighbors)]
@staticmethod
def __keys(some_dict):
return list(some_dict.keys())
@staticmethod
def __random(some_list):
return random.choice(some_list)
if __name__ == '__main__':
shuttle = RandomShuttle(Graph.load_default(), 0, VelocityModel(), BatteryModel())
print(shuttle.to_geojson())
for t in range(0, 100):
shuttle.move(1)
print(shuttle.to_geojson())
| StarcoderdataPython |
4808976 | <filename>sneruz/errors.py
class Error(Exception):
"""Base class for exceptions in this module"""
class TruthAssignmentError(Error):
"""Raised when a deduction is attempted with no truth assignment"""
class TableFormatError(Error):
"""Raised when a inconsistency in table size/shape is present""" | StarcoderdataPython |
3209175 | import os
import os.path
from subprocess import Popen, PIPE
import h5py
import numpy as np
import pytopkapi
def show_banner(ini_file, nb_cell, nb_time_step):
"""Show an ASCII banner at run time describing the model.
Parameters
----------
ini_file : str
The name of the PyTOPKAPI initialization file passed to the
run function.
nb_cell : int
The number of model cells to be processed.
nb_time_step : int
The number of model time-steps to be processed.
"""
print('===============================================================\n',
'\n PyTOPKAPI\n',
'A Python implementation of the TOPKAPI Hydrological model\n\n',
'Version {}\n'.format(pytopkapi.__version__),
'Number of model cells: {:d}\n'.format(nb_cell),
'Number of model time-steps: {:d}\n'.format(nb_time_step),
'Running simulation from file: {}\n'.format(ini_file),
'\r===============================================================\n')
def _create_dataset(h5file, grp_name, dset_name, shape, units):
"""Create HDF5 dataset if it doesn't exist.
"""
if '{}/{}'.format(grp_name, dset_name) not in h5file:
dset = h5file.create_dataset('{}/{}'.format(grp_name, dset_name),
shape, maxshape=(None, None), compression='gzip')
dset.attrs['units'] = units
def open_simulation_file(file_out, fmode, Vs0, Vo0, Vc0, no_data,
nb_cell, nb_time_step, append_output, first_run):
"""Open simulation file and return handles to it's content.
"""
h5file = h5py.File(file_out, fmode)
dset_shape = (nb_time_step+1, nb_cell)
h5file.attrs['title'] = 'PyTOPKAPI simulation'
h5file.attrs['pytopkapi_version'] = pytopkapi.__version__
h5file.attrs['pytopkapi_git_revision'] = pytopkapi.__git_revision__
# create file structure as necessary
grp_name = 'Soil'
dset_name = 'Qs_out'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3/s')
dset_Qs_out = h5file['{}/{}'.format(grp_name, dset_name)]
dset_name = 'V_s'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3')
dset_Vs = h5file['{}/{}'.format(grp_name, dset_name)]
grp_name = 'Overland'
dset_name = 'Qo_out'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3/s')
dset_Qo_out = h5file['{}/{}'.format(grp_name, dset_name)]
dset_name = 'V_o'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3')
dset_Vo = h5file['{}/{}'.format(grp_name, dset_name)]
grp_name = 'Channel'
dset_name = 'Qc_out'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3/s')
dset_Qc_out = h5file['{}/{}'.format(grp_name, dset_name)]
dset_name = 'V_c'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3')
dset_Vc = h5file['{}/{}'.format(grp_name, dset_name)]
dset_name = 'Ec_out'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3')
dset_Ec_out = h5file['{}/{}'.format(grp_name, dset_name)]
grp_name = ''
dset_name = 'ET_out'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'mm')
dset_ET_out = h5file['{}/{}'.format(grp_name, dset_name)]
dset_name = 'Q_down'
_create_dataset(h5file, grp_name, dset_name, dset_shape, 'm3/s')
dset_Q_down = h5file['{}/{}'.format(grp_name, dset_name)]
if append_output is False or first_run is True:
#Write the initial values into the output file
dset_Vs[0] = Vs0
dset_Vo[0] = Vo0
dset_Vc[0] = Vc0
dset_Qs_out[0] = np.ones(nb_cell)*no_data
dset_Qo_out[0] = np.ones(nb_cell)*no_data
dset_Qc_out[0] = np.zeros(nb_cell)
dset_Q_down[0] = np.ones(nb_cell)*no_data
dset_ET_out[0] = np.zeros(nb_cell)
dset_Ec_out[0] = np.zeros(nb_cell)
return h5file, dset_Vs, dset_Vo, dset_Vc, dset_Qs_out, \
dset_Qo_out, dset_Qc_out, dset_Q_down, dset_ET_out, dset_Ec_out
# System utility functions
def exec_command(cmd_args):
"""Execute a shell command in a subprocess
Convenience wrapper around subprocess to execute a shell command
and pass back stdout, stderr, and the return code. This function
waits for the subprocess to complete, before returning.
Usage example:
>>> stdout, stderr, retcode = exec_command(['ls', '-lhot'])
Parameters
----------
cmd_args : list of strings
The args to pass to subprocess. The first arg is the program
name.
Returns
-------
stdout : string
The contents of stdout produced by the shell command
stderr : string
The contents of stderr produced by the shell command
retcode : int
The return code produced by the shell command
"""
proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
proc.wait()
return stdout, stderr, proc.returncode
########################
## For graphics ##
########################
def CRange(ar_x):
'''
Returns the range of an array
'''
lim=np.array([min(ar_x),max(ar_x)])
return lim
def f_axe(p,xc):
'''
Returns the value in the array xc
associated to a relative value inside [0,1]
'''
xc=np.sort(xc)
pos=xc[0]+p*(xc[-1]-xc[0])
return pos
def string(integer,len_str_out):
"""
From a given integer, return an string of length len_str_out completed by zero
Example:
ut.string(1,3)-->'001'
"""
str_zero='0'
str_int=str(integer)
len_int=len(str_int)
if len_str_out-len_int<0:
print('****ERROR: length of string too short')
str_out=''
else:
str_out=(len_str_out-len_int)*str_zero+str_int
return str_out
def from_float_array_to_string_array(ar_float,unique=False):
if unique:
a=str(np.unique(ar_float)).split()[1:]
else:
a=str(ar_float).split()[1:]
a[-1]=a[-1].replace(']','')
ar_string=a
return ar_string
##############################
## For file management ##
##############################
def check_file_exist(filename):
path_name, file_name = os.path.split(filename)
if not os.path.exists(path_name) and path_name != '':
os.makedirs(path_name)
def check_folder_exist(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
def read_one_array_hdf(file_h5, group, name):
"""Read a single array from a PyTOPKAPI simulation file.
"""
h5file_in = h5py.File(file_h5)
dset_string = '/%s/%s' % (group, name)
array = h5file_in[dset_string][...]
h5file_in.close()
return array
##############################
## Statistics ##
##############################
def mov_avg(ar_float,period):
'''
period is a multiple of 2
'''
nb_ind=len(ar_float)-period
ar_out=np.zeros(nb_ind)
for i in range(nb_ind):
n=period/2
ind_mid=i+n
ar_out[i]=np.average(ar_float[ind_mid-n:ind_mid+n])
return ar_out
##~~~ Comparison of 2 vectors ~~~~##
# Functions defining useful criteria comparing two vectors
## REFERENCE is Y
def R(ar_x,ar_y):
R=np.corrcoef(ar_x,ar_y)
return R[0,1]
def R2(ar_x,ar_y):
R=np.corrcoef(ar_x,ar_y)
return R[0,1]**2
def Nash(ar_x,ar_y):
eff=1-sum((ar_y-ar_x)**2)/sum((ar_y-np.mean(ar_y))**2)
return eff
def RMSE(ar_x,ar_y):
rmserr=(np.mean((ar_y-ar_x)**2))**0.5
return rmserr
def RMSE_norm(ar_x,ar_y):
rmserr=(np.mean((ar_y-ar_x)**2))**0.5
rmsenorm=rmserr/np.mean(ar_y)
return rmsenorm
def Bias_cumul(ar_x,ar_y):
b=sum(ar_x)/sum(ar_y)
return b
def Diff_cumul(ar_x,ar_y):
diff=sum(ar_x)-sum(ar_y)
return diff
def Abs_cumul(ar_x,ar_y):
abs_diff=abs(sum(ar_x)-sum(ar_y))
return abs_diff
def Err_cumul(ar_x,ar_y):
err_rel=abs(sum(ar_x)-sum(ar_y))/sum(ar_y)
return err_rel
##############################
## HDF5 files ##
##############################
####HOW to remove a group
#h5file.removeNode('/', groupname)
##############################
## Works on vectors ##
##############################
def find_dist_max(ar_coorx,ar_coory):
"""
Compute the maximum distance between several points defined by their coordinates ar_coorx and ar_coory
"""
nb_cell=len(ar_coorx)
max_dist=0.
for i in range(nb_cell):
for j in range(nb_cell):
max_dist=max(max_dist,distance(ar_coorx[i],ar_coory[i],ar_coorx[j],ar_coory[j]))
return max_dist
def distance(x1,y1,x2,y2):
"""
Compute the distance between two points
"""
dist=((x1-x2)**2+(y1-y2)**2)**0.5
return dist
def find_cell_coordinates(ar_cell_label, Xoutlet, Youtlet,
ar_coorx, ar_coory, ar_lambda, channel=True):
"""Find the label of the cell closest to (Xoutlet, Youtlet).
Find the label of the model cell containing the specified location. The
co-ordinates of the location must be given in the same co-ordinate system
as that specifying the model catchment.
Parameters
----------
ar_cell_label : (N,) int array
Numbers labelling each cell.
Xoutlet : float
The x co-ordinate of a point. This is the Longitude expressed in
metres using the same projection as `ar_coorx`.
Youtlet : float
The y co-ordinate of a point. This is the Longitude expressed in
metres using the same projection as `ar_coory`.
ar_coorx : (N,) float array
The x co-ordinate of the centre of each cell (m). This is the Longitude
expressed in metres using a Transverse Mercator projection, but any
appropriate projection can be used.
ar_coory : (N,) float array
The y co-ordinate of the centre of each cell (m). This is the Latitude
expressed in metres using a Transverse Mercator projection, but any
appropriate projection can be used.
ar_lambda : (N,) int array
Switch indicating whether the current cell contains a channel. A value
of `1` indicates a channel cell, `0` indicates no channel.
channel : boolean (default=True)
Allows cells with or without channels to be chosen.
Returns
-------
cell_outlet : int
The label for the cell closest to the defined location.
"""
tab_x=np.unique(ar_coorx);X=abs(tab_x[0]-tab_x[1])
dist_max=3*X
dist_min=dist_max
nb_cell=len(ar_cell_label)
cell_outlet=-999.9
for i in range(nb_cell):
dist=distance(Xoutlet,Youtlet,ar_coorx[i],ar_coory[i])
if channel:
if dist < dist_min and ar_lambda[i]==1.:
dist_min=dist
cell_outlet=ar_cell_label[i]
else:
if dist<dist_min:
dist_min=dist
cell_outlet=ar_cell_label[i]
if cell_outlet<0:
print("Wrong coordinates")
stop
return cell_outlet
| StarcoderdataPython |
3316400 | <reponame>atr00025/ML_Image_classification_with_Tensorflow<filename>cat-vs-dog.py<gh_stars>0
import os
import zipfile
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#Edit the storage directories as same as this file's location.
wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \
-O /Image_classification_part3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
local_weights_file = '/Image_classification_part3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = InceptionV3(
input_shape=(150, 150, 3), include_top=False, weights=None)
pre_trained_model.load_weights(local_weights_file)
#Edit the storage directories as same as this file's location.
wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O \
/Image_classification_part3/cats_and_dogs_filtered.zip
local_zip = '/Image_classification_part3/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/Image_classification_part3')
zip_ref.close()
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
x = layers.Flatten()(last_output)
x = layers.Dense(1024, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.0001),
metrics=['acc'])
base_dir = 'C:/ML things/Image_classification_part2/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
#Dir with training cat data
train_cats_dir = os.path.join(train_dir, 'cats')
#Dir with training dog
train_dogs_dir = os.path.join(train_dir, 'dogs')
#Dir with validaton cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
#Dir with validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir(train_cats_dir)
train_dog_fnames = os.listdir(train_dogs_dir)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=2,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
unfreeze = False
for layer in pre_trained_model.layers:
if unfreeze:
layer.trainable = True
if layer.name == 'mixed6':
unfreeze = True
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.0001,
momentum=0.9),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=50,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Trianing and validation accurency.')
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Trianing and validation loss.')
| StarcoderdataPython |
3621 | <reponame>ehiller/mobilecsp-v18<filename>modules/courses/courses.py
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = '<NAME> (<EMAIL>)'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def can_pick_all_locales(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, All_LOCALES_PERMISSION)
def can_see_drafts(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, SEE_DRAFTS_PERMISSION)
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(custom_module, permissions_callback)
resource.Registry.register(resources_display.ResourceCourseSettings)
resource.Registry.register(resources_display.ResourceUnit)
resource.Registry.register(resources_display.ResourceAssessment)
resource.Registry.register(resources_display.ResourceLink)
resource.Registry.register(resources_display.ResourceLesson)
resource.Registry.register(utils.ResourceHtmlHook)
def permissions_callback(unused_app_context):
return [
roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION),
roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)
]
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/resources', utils.ResourcesHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| StarcoderdataPython |
3383930 | from django.test import RequestFactory
from manager.api.views.docs import schema_view
def run(to):
response = schema_view(RequestFactory().get("/api/schema"))
response.render()
with open(to, "wb") as file:
file.write(response.content)
| StarcoderdataPython |
163026 | <reponame>iKsSs/sslsplit<filename>extra/logreader.py
#!/usr/bin/env python
# vim: set ft=python list et ts=8 sts=4 sw=4:
# SSLsplit contributed code: Log parser for sslsplit -L
# This script reads the log from standard input and parses it.
# Standard input can point to a file or a named pipe.
# Copyright (C) 2015, <NAME> <<EMAIL>>.
# Copyright (C) 2015, <NAME> <<EMAIL>>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import select
import re
def read_line(f):
"""Read a single line from a file stream; return empty string on EOF"""
buf = ''
while not buf.endswith("\n"):
r, w, e = select.select([f], [], [])
if r:
nextbyte = f.read(1)
if not nextbyte:
return ''
buf += nextbyte
else:
break
return buf
def read_count(f, n):
"""Read n bytes from a file stream; return empty string on EOF"""
buf = ''
while len(buf) < n:
nextchunk = f.read(n - len(buf))
if not nextchunk:
return ''
buf += nextchunk
return buf
class LogSyntaxError(Exception):
"""SSLsplit log file contains unexpected syntax"""
pass
def parse_header(line):
"""Parse the header line into a dict with useful fields"""
# 2015-09-27 14:55:41 UTC [192.0.2.1]:56721 -> [192.0.2.2]:443 (37):
m = re.match(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \S+) \[(.+?)\]:(\d+) -> \[(.+?)\]:(\d+) \((\d+|EOF)\):?', line)
if not m:
raise LogSyntaxError(line)
res = {}
res['timestamp'] = m.group(1)
res['src_addr'] = m.group(2)
res['src_port'] = int(m.group(3))
res['dst_addr'] = m.group(4)
res['dst_port'] = int(m.group(5))
if m.group(6) == 'EOF':
res['eof'] = True
else:
res['eof'] = False
res['size'] = int(m.group(6))
return res
def parse_log(f):
"""Read log entries from file stream in blocking mode until EOF"""
while True:
line = read_line(f)
if not line:
break
res = parse_header(line)
if (not res['eof']):
res['data'] = read_count(f, res['size'])
yield res
if __name__ == '__main__':
for result in parse_log(sys.stdin):
print result
| StarcoderdataPython |
1714049 | <reponame>mvirkkunen/dwprog<filename>generatedevices.py<gh_stars>10-100
#!/usr/bin/env python3
# converts Atmel device files into devices.py
from xml.etree import ElementTree
from fnmatch import fnmatch
from zipfile import ZipFile
import sys
spmcsr_bits = {
0x01: ["SPMEN", "SELFPRGEN"],
0x02: ["PGERS"],
0x04: ["PGWRT"],
}
def process_doc(doc):
device = doc.find("devices/device")
name = device.attrib["name"]
jtagid = device.find("property-groups/property-group[@name='SIGNATURES']/property[@name='JTAGID']")
flash = device.find("address-spaces/address-space[@name='prog']/memory-segment[@name='FLASH']")
cpu = doc.find("modules/module[@name='CPU']")
dwdr = doc.find(".//register[@name='DWDR']")
spmcsr = doc.find(".//register[@name='SPMCSR']")
if spmcsr is None:
spmcsr = doc.find(".//register[@name='SPMCR']")
errors = []
if jtagid is None:
errors.append("no JTAGID")
if flash is None:
errors.append("no flash segment")
if spmcsr is None:
errors.append("no SPMCSR register")
else:
for bmask, bname in spmcsr_bits.items():
field = next(
(f for f in spmcsr.findall("bitfield") if int(f.attrib["mask"], 16) == bmask),
None)
if field is None:
errors.append("missing " + str(bname) + " bit")
elif field.attrib["name"] not in bname:
errors.append(str(bname) + " field has a weird name")
if len(errors):
print("Unsupported device: {} ({})".format(name, ", ".join(errors)), file=sys.stderr)
return
devid = name.lower()
signature = int(jtagid.attrib["value"], 16)
flash_size = int(flash.attrib["size"], 16)
flash_pagesize = int(flash.attrib["pagesize"], 16)
reg_dwdr = (int(dwdr.attrib["offset"], 16) - 0x20) if dwdr is not None else None
reg_spmcsr = int(spmcsr.attrib["offset"], 16) - 0x20
print(" Device(devid=\"{}\", name=\"{}\", signature=0x{:x}, flash_size=0x{:x}, flash_pagesize=0x{:x}, reg_dwdr={}, reg_spmcsr=0x{:x}),"
.format(
devid,
name,
signature,
flash_size,
flash_pagesize,
"0x{:x}".format(reg_dwdr) if reg_dwdr else "None",
reg_spmcsr))
if len(sys.argv) < 2:
print("USAGE: generatedevices.py *.atpack", file=sys.stderr)
sys.exit(1)
print("""# Generated with generatedevices.py
class Device:
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
devices = [""")
for packfile in sys.argv[1:]:
with ZipFile(packfile) as zfile:
for name in (n for n in zfile.namelist() if fnmatch(n, "*.atdf")):
with zfile.open(name) as dfile:
doc = ElementTree.fromstring(dfile.read())
process_doc(doc)
print("]") | StarcoderdataPython |
1756291 | import shutil
import time
SECONDS_IN_HOUR = 3600
SECONDS_IN_MINUTE = 60
SHORTEST_AMOUNT_OF_TIME = 0
class InvalidTimeParameterError(Exception):
pass
def _get_delay_time(session):
"""
Helper function to extract the delay time from the session.
:param session: Pytest session object.
:return: Returns the delay time for each test loop.
"""
return session.config.option.delay
def _get_total_time(session):
"""
Takes all the user available time options, adds them and returns it in seconds.
:param session: Pytest session object.
:return: Returns total amount of time in seconds.
"""
hours_in_seconds = session.config.option.hours * SECONDS_IN_HOUR
minutes_in_seconds = session.config.option.minutes * SECONDS_IN_MINUTE
seconds = session.config.option.seconds
total_time = hours_in_seconds + minutes_in_seconds + seconds
if total_time < SHORTEST_AMOUNT_OF_TIME:
raise InvalidTimeParameterError(f"Total time cannot be less than: {SHORTEST_AMOUNT_OF_TIME}!")
return total_time
def _print_loop_count(count):
"""
Helper function to simply print out what loop number we're on.
:param count: The number to print.
:return: None.
"""
column_length = shutil.get_terminal_size().columns
print("\n")
print(f" Loop # {count} ".center(column_length, "="))
def _timed_out(session, start_time):
"""
Helper function to check if the user specified amount of time has lapsed.
:param session: Pytest session object.
:return: Returns True if the timeout has expired, False otherwise.
"""
return time.time() - start_time > _get_total_time(session)
def pytest_addoption(parser):
"""
Add our command line options.
"""
stress = parser.getgroup("stress")
stress.addoption(
"--delay",
action="store",
default=0,
help="The amount of time to wait between each test loop.",
type=int,
)
stress.addoption(
"--hours",
action="store",
default=0,
help="The number of hours to loop the tests for.",
type=int,
)
stress.addoption(
"--minutes",
action="store",
default=0,
help="The number of minutes to loop the tests for.",
type=int,
)
stress.addoption(
"--seconds",
action="store",
default=0,
help="The number of seconds to loop the tests for.",
type=int,
)
def pytest_runtestloop(session):
"""
Reimplement the test loop but loop for the user defined amount of time.
Note: Check against pytest repo for any updates so we don't fall behind.
"""
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
start_time = time.time()
count = 1
while True:
if _get_total_time(session):
_print_loop_count(count)
for index, item in enumerate(session.items):
next_item = session.items[index + 1] if index + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=next_item)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
count += 1
if _timed_out(session, start_time):
break
time.sleep(_get_delay_time(session))
return True
| StarcoderdataPython |
3335974 | <reponame>paultag/billy<filename>billy/reports/legislators.py
from collections import defaultdict
from billy.core import db
from billy.core import settings
from billy.reports.utils import update_common
# semi-optional keys to check for on active legislators
checked_keys = ('photo_url', 'url', 'email', 'transparencydata_id', 'offices')
def scan_legislators(abbr):
duplicate_sources = defaultdict(int)
report = {'upper_active_count': 0,
'lower_active_count': 0,
'inactive_count': 0,
'_updated_today_count': 0,
'_updated_this_month_count': 0,
'_updated_this_year_count': 0,
}
for key in checked_keys:
report[key] = 0
# initialize seat counts
district_seats = {'upper': defaultdict(int), 'lower': defaultdict(int)}
for district in db.districts.find({'abbr': abbr}):
district_seats[district['chamber']][district['name']] = \
district['num_seats']
for leg in db.legislators.find({settings.LEVEL_FIELD: abbr}):
# do common details
update_common(leg, report)
# most checks only apply to active set
if leg.get('active'):
chamber = leg.get('chamber')
if chamber == 'upper':
report['upper_active_count'] += 1
elif chamber == 'lower':
report['lower_active_count'] += 1
else:
# TODO: track these? (executives)
continue
# decrement empty seats (if it goes negative, we have too many)
district_seats[chamber][leg['district']] -= 1
for key in checked_keys:
if leg.get(key):
report[key] += 1
else:
report['inactive_count'] += 1
for source in leg['sources']:
duplicate_sources[source['url']] += 1
report['duplicate_sources'] = []
for url, n in duplicate_sources.iteritems():
if n > 1:
report['duplicate_sources'].append(url)
# copy over seat issues into report
report['overfilled_seats'] = []
report['vacant_seats'] = []
for chamber, chamber_seats in district_seats.iteritems():
for seat, count in chamber_seats.iteritems():
if count < 0:
report['overfilled_seats'].append((chamber, seat, -count))
elif count > 0:
report['vacant_seats'].append((chamber, seat, count))
return report
def calculate_percentages(report):
active_count = float(report['lower_active_count'] +
report['upper_active_count'])
total_count = float(active_count + report['inactive_count']) / 100
if active_count:
for key in checked_keys:
report[key] /= (active_count / 100)
if total_count:
report['updated_this_year'] = (report.pop('_updated_this_year_count') /
total_count)
report['updated_this_month'] = (report.pop('_updated_this_month_count')
/ total_count)
report['updated_today'] = (report.pop('_updated_today_count') /
total_count)
def legislator_report(abbr):
report = scan_legislators(abbr)
calculate_percentages(report)
return report
| StarcoderdataPython |
3395205 | <filename>hojehatransportes/social_auth_extra/sapo.py
"""
SAPO OAuth support.
This adds support for SAPO OAuth service. An application must
be registered first on twitter and the settings SAPO_CONSUMER_KEY
and SAPO_CONSUMER_SECRET must be defined with they corresponding
values.
User screen name is used to generate username.
By default account id is stored in extra_data field, check OAuthBackend
class for details on how to extend it.
"""
import json
from social.backends.oauth import BaseOAuth1
# Sapo configuration
SAPO_SERVER = 'id.sapo.pt'
SAPO_REQUEST_TOKEN_URL = 'https://%s/oauth/request_token' % SAPO_SERVER
SAPO_ACCESS_TOKEN_URL = 'https://%s/oauth/access_token' % SAPO_SERVER
# Note: oauth/authorize forces the user to authorize every time.
# oauth/authenticate uses their previous selection, barring revocation.
SAPO_AUTHORIZATION_URL = 'http://%s/oauth/authenticate' % SAPO_SERVER
SAPO_CHECK_AUTH = 'https://services.sapo.pt/SSO/GetPublicProfile'
class SapoOAuth(BaseOAuth1):
"""Sapo OAuth authentication backend"""
name = 'sapo'
EXTRA_DATA = [('id', 'id')]
AUTHORIZATION_URL = SAPO_AUTHORIZATION_URL
REQUEST_TOKEN_URL = SAPO_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = SAPO_ACCESS_TOKEN_URL
SERVER_URL = SAPO_SERVER
SETTINGS_KEY_NAME = 'SAPO_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'SAPO_CONSUMER_SECRET'
def get_user_details(self, response):
"""Return user details from Sapo account"""
return {'username': response['screen_name'],
'email': '', # not supplied
'fullname': response['name'],
'first_name': response['name'],
'last_name': ''}
def user_data(self, access_token):
"""Return user data provided"""
request = self.oauth_request(access_token, SAPO_CHECK_AUTH)
json = self.fetch_response(request)
try:
return json.loads(json)
except json.JSONDecodeError:
return None
# Backend definition
BACKENDS = {
'sapo': SapoOAuth,
}
| StarcoderdataPython |
3309526 | <reponame>AlgTUDelft/ExpensiveOptimBenchmark
from .base import BaseProblem
import os.path
import numpy as np
import tsplib95
import networkx
class TSP(BaseProblem):
def __init__(self, name, W, n_iter, noise_seed=None, noise_factor=1):
self.name = name
self.d = W.shape[0] - 2
self.W = W
self.n_iter = n_iter
self.noise_seed = noise_seed
self.noise_rng = np.random.RandomState(seed=noise_seed)
self.noise_factor = noise_factor
def evaluate(self, x):
robust_total_route_length = 0.0
for iteration in range(self.n_iter):
current = 0
unvisited = list(range(1, self.d+2))
total_route_length = 0.0
for di, i in enumerate(x):
next_up = unvisited.pop(int(round(i)))
total_route_length += self.W[current, next_up]
total_route_length += self.noise_rng.random() * self.noise_factor
current = next_up
last = unvisited.pop()
total_route_length += self.W[current, last]
total_route_length += self.noise_rng.random() * self.noise_factor
total_route_length += self.W[last, 0]
total_route_length += self.noise_rng.random() * self.noise_factor
robust_total_route_length = max(total_route_length, robust_total_route_length)
return robust_total_route_length
def lbs(self):
return np.zeros(self.d, dtype=int)
def ubs(self):
return np.array([self.d-x for x in range(0, self.d)])
def vartype(self):
return np.array(['int'] * self.d)
def dims(self):
return self.d
def __str__(self):
return f"TSP(name={self.name},iterations={self.n_iter},noise_seed={self.noise_seed})"
def load_explicit_tsp(path, iters=100, noise_seed=0):
with open(path) as f:
W = np.array([list(map(lambda x: float(x.strip()))) for line in f.readlines()])
return TSP(os.path.basename(path), W, iters, noise_seed=noise_seed)
def load_tsplib(path, iters=100, noise_seed=0):
instance = tsplib95.load(path)
W = networkx.to_numpy_matrix(instance.get_graph())
return TSP(instance.name, W, iters, noise_seed=noise_seed)
| StarcoderdataPython |
60572 | import pathlib
from setuptools import setup, find_packages
from pokemontcgsdkasync.config import __version__, __pypi_package_name__, __github_username__, __github_repo_name__
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text(encoding='utf-8')
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
url = 'https://github.com/' + __github_username__ + '/' + __github_repo_name__
download_url = "{}/tarball/{}".format(url, __version__)
setup(
name=__pypi_package_name__,
version=__version__,
description='Pokemon TCG SDK for pokemontcg.io using asyncio',
long_description=README,
long_description_content_type="text/markdown",
url=url,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
keywords='pokemon tcg sdk trading card game api rest async',
download_url=download_url,
packages=find_packages(),
include_package_data=False,
python_requires=">=3.6",
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.y+1.0' notation (this way you get bugfixes)
'dacite>=1.6.0,<2.0.0',
'aiohttp>=3.8.1,<4.0.0',
],
extras_require={
'tests': tests_require,
},
# entry_points={
# 'console_scripts': [
# # add cli scripts here in this form:
# # 'pokemontcgsdk=pokemontcgsdk.cli:main',
# ],
# },
)
| StarcoderdataPython |
67934 | <gh_stars>0
# Generated by Django 3.1.7 on 2021-03-24 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employee_information_site', '0008_auto_20210324_2116'),
]
operations = [
migrations.AlterModelOptions(
name='employee',
options={'ordering': ['-department', 'position', 'full_name']},
),
]
| StarcoderdataPython |
189541 | # HEAD
# Classes - Metaclasses for class modification
# DESCRIPTION
# Describes how to use metaclasses dynamically to modify classes during instatiation
# Describes how to add attributes to a metaclass
# or re-implement the type __call__ methods
# Public
# RESOURCES
#
# /opt/pycharm/pycharm-community-2019.2.1/bin# sh pycharm.sh
# Concept taken from following
# https://github.com/django/django/blob/master/django/db/models/base.py
# Creating a Base meta class using type
class ModelBase(type):
# Creating a method to be applied to all classed extending metaclass ModelBase
def hello(cls):
print("Test", type(cls))
# __new__ , __init__ , __call__ , __dict__
# Overriding implementation of __init__ of type and returning a class
def __init__(cls, name, bases, dct):
# Printing arguments
print('bases', bases)
print('name', name)
print('dict', dct)
print('cls.__dict__', cls.__dict__)
# Returning the class as is - No changes
# init passes an argument of cls or self
return super(ModelBase, cls).__init__(cls)
def __call__(self, *args, **kwargs):
# Adding hello and sayHello method
setattr(self, "hello", self.hello)
setattr(self, "sayHello", self.hello)
# Returning the modified class with two new methods
# Call does not pass a argument
return super(ModelBase, self).__call__()
class MyTest(metaclass=ModelBase):
attributesname = 10
# Creating a method testhello
def testhello(self):
self.sayHello()
print("Printing class details inside of MyTest", type(self))
# Instantiating MyTest class extended using the metaclass ModelBase
obj = MyTest()
# Checking availability of attributes/methods
obj.sayHello()
obj.hello()
obj.testhello()
| StarcoderdataPython |
9460 | <filename>arc113/b.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(SOLVE_PARAMS):
pass
def main():
A, B, C = map(int, input().split())
doubling = [B % 20]
for i in range(32):
doubling.append(
(doubling[-1] ** 2) % 20
)
BC = 1
for i in range(32):
if C % 2:
BC *= doubling[i]
BC %= 20
C //= 2
if BC == 0:
BC = 20
ret = (A % 10) ** BC
ret %= 10
print(ret)
# tests
T1 = """
4 3 2
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
4
"""
T2 = """
1 2 3
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
1
"""
T3 = """
3141592 6535897 9323846
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
2
"""
T4 = """
2 10 1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
4
"""
T5 = """
2 20 1
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
6
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| StarcoderdataPython |
3247153 | <gh_stars>100-1000
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
"""
$ ~/anaconda3/bin/python eval_pred.py --evaluate --checkpoint_dir="./runs/1523240176/checkpoints/"
$ ~/anaconda3/bin/python eval_pred.py --predict --checkpoint_dir="./runs/1523240176/checkpoints/"
"""
import numpy as np
import pandas as pd
import os
import time
import csv
import yaml
import datetime
import tensorflow as tf
from tensorflow.contrib import learn
from sklearn import metrics
import jieba
import jieba.posseg as pseg
import data_helpers
def zh_tokenizer(iterator):
for value in iterator:
yield list(jieba.cut(value, cut_all=False))
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("model_type", "clf", "The type of model, classification or regression (default: clf)") # clf/reg
# Evaluating Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("evaluate", False, "Evaluate on all training data")
tf.flags.DEFINE_boolean("predict", False, "Predict on test dataset")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# CHANGE THIS: Load data. Load your own evaluating set or testing set here
datasets = None
dataset_name = cfg["datasets"]["default"]
if FLAGS.evaluate:
if dataset_name == "mrpolarity":
datasets = data_helpers.get_datasets_mrpolarity(cfg["datasets"][dataset_name]["positive_data_file"]["path"],
cfg["datasets"][dataset_name]["negative_data_file"]["path"])
elif dataset_name == "20newsgroup":
datasets = data_helpers.get_datasets_20newsgroup(subset="test",
categories=cfg["datasets"][dataset_name]["categories"],
shuffle=cfg["datasets"][dataset_name]["shuffle"],
random_state=cfg["datasets"][dataset_name]["random_state"])
elif dataset_name == "financenews":
datasets = data_helpers.get_datasets_financenews(cfg["datasets"][dataset_name]["path"])
elif dataset_name == "scoringdocuments":
datasets = data_helpers.get_datasets_scoringdocuments(cfg["datasets"][dataset_name]["path"])
if FLAGS.model_type == 'clf':
x_raw, y_test = data_helpers.load_data_labels(datasets)
y_test = np.argmax(y_test, axis=1)
elif FLAGS.model_type == 'reg':
x_raw, y_test = data_helpers.load_data_label(datasets)
elif FLAGS.predict:
if dataset_name == "mrpolarity":
datasets = {"target_names": ['positive_examples', 'negative_examples']}
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = None
elif dataset_name == "20newsgroup":
datasets = {"target_names": ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']}
x_raw = ["The number of reported cases of gonorrhea in Colorado increased",
"I am in the market for a 24-bit graphics card for a PC"]
y_test = None
elif dataset_name == "financenews":
datasets = {"target_names": ['strong_neg_examples', 'weak_neg_examples', 'neutral_examples', 'weak_pos_examples', 'strong_pos_examples']}
datasets = data_helpers.get_datasets_financenews_test(cfg["datasets"][dataset_name]["test_path"])
x_raw = data_helpers.load_data(datasets)
y_test = None
elif dataset_name == "scoringdocuments":
datasets = {"target_names": ['document_score']}
datasets = data_helpers.get_datasets_scoringdocuments_test(cfg["datasets"][dataset_name]["test_path"])
x_raw = data_helpers.load_data(datasets)
y_test = None
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nPredicting...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_probabilities = None
for index, x_test_batch in enumerate(batches):
batch_predictions_scores = sess.run([predictions, scores], {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions_scores[0]])
if FLAGS.model_type == 'clf':
probabilities = softmax(batch_predictions_scores[1])
elif FLAGS.model_type == 'reg':
probabilities = batch_predictions_scores[1]
if all_probabilities is not None:
all_probabilities = np.concatenate([all_probabilities, probabilities])
else:
all_probabilities = probabilities
time_str = datetime.datetime.now().isoformat()
print("{}: step {}".format(time_str, (index+1)*FLAGS.batch_size))
# Print accuracy if y_test is defined
if y_test is not None and FLAGS.model_type == 'clf':
y_test = y_test[:len(y_test)-len(y_test)%FLAGS.batch_size]
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
print(metrics.classification_report(y_test, all_predictions, target_names=datasets['target_names']))
print(metrics.confusion_matrix(y_test, all_predictions))
# Save the evaluation result or testing result to a csv
x_raw = x_raw[:len(x_raw)-len(x_raw)%FLAGS.batch_size]
if FLAGS.model_type == 'clf':
predictions_human_readable = np.column_stack((np.array(x_raw),
[int(prediction)+1 for prediction in all_predictions],
["{}".format(probability) for probability in all_probabilities]))
predict_results = pd.DataFrame(predictions_human_readable, columns=['Content','Label','Probabilities'])
elif FLAGS.model_type == 'reg':
predictions_human_readable = np.column_stack((np.array(x_raw),
["{}".format(prediction) for prediction in all_predictions],
[probability[0] for probability in all_probabilities]))
predict_results = pd.DataFrame(predictions_human_readable, columns=['Content','Value','Score'])
if FLAGS.evaluate:
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "evaluation.csv")
elif FLAGS.predict:
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
predict_results.to_csv(out_path, index=False)
| StarcoderdataPython |
3389445 | <reponame>Loodos/zemberek-python
class TurkicLetter:
"""
This class represents a Letter which contains Turkic language specific attributes, such as vowel type,
English equivalent characters.
"""
UNDEFINED: 'TurkicLetter'
def __init__(self, char_value: str = False, vowel: bool = False, frontal: bool = False, rounded: bool = False,
voiceless: bool = False, continuant: bool = False):
self.char_value = char_value
self.vowel = vowel
self.frontal = frontal
self.rounded = rounded
self.voiceless = voiceless
self.continuant = continuant
def is_vowel(self):
return self.vowel
def is_consonant(self):
return not self.vowel
def is_frontal(self):
return self.frontal
def is_rounded(self):
return self.rounded
def is_voiceless(self):
return self.voiceless
def is_stop_consonant(self):
return self.voiceless and not self.continuant
def copy_for(self, c: str) -> 'TurkicLetter':
return TurkicLetter(c, self.vowel, self.frontal, self.rounded, self.voiceless, self.continuant)
def __eq__(self, other):
if self is other:
return True
elif other is not None and isinstance(other, TurkicLetter):
return self.char_value == other.char_value
else:
return False
def __hash__(self):
return ord(self.char_value)
TurkicLetter.UNDEFINED = TurkicLetter('\u0000')
| StarcoderdataPython |
1693156 | from movies import movie_dataset, movie_ratings
# Euclydian distance
def distance(movie1, movie2):
squared_difference = 0
for i in range(len(movie1)):
squared_difference += (movie1[i] - movie2[i]) ** 2
final_distance = squared_difference ** 0.5
return final_distance
# Get "K" closest titles (from "unknown") in "dataset", then do a regression using "movie_ratings"
def predict(unknown, dataset, movie_ratings, k):
# find closest titles based on distance
distances = []
for title in dataset:
movie = dataset[title]
distance_to_point = distance(movie, unknown)
distances.append([distance_to_point, title])
distances.sort()
# Get "k" neighbors
neighbors = distances[0:k]
# Estimate movie rating based doing a weighted regression
numerator = 0
denominator = 0
for neighbor in neighbors:
numerator += movie_ratings[neighbor[1]] / neighbor[0]
denominator += 1 / neighbor[0]
return numerator / denominator
print (predict([0.016, 0.300, 1.022], movie_dataset, movie_ratings, 5)) | StarcoderdataPython |
478 | <gh_stars>0
import argparse
import imageio
import progressbar
from _routines import ffi, lib
from pylab import *
from random import Random
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
max_iter = 32
im_buf = ffi.new("double[]", args.width * args.height)
cut_buf = ffi.new("double[]", max_iter)
fixed_seed = Random(1)
for i in range(max_iter):
cut_buf[i] = i*fixed_seed.random()
for n in progressbar.progressbar(range(args.num_frames)):
tg = n / (args.num_frames - 1)
t = tg
lib.mandelbrot(im_buf, args.width, args.height, 0.7, 0.8, 3.5, t-20, cut_buf, max_iter)
im = array(list(im_buf)).reshape(args.height, args.width)
# for i in range(max_iter):
# cut_buf[i] *= 0.05**args.dt
bg = (im < 0)
im /= im.max()
fg = 1 - bg
red = im
green = 1 - im
blue = 4*im*(1-im)
blue = blue + 0.2*green
red = 0.1 + 0.8*red + green**3
green = 0.2 + 0.21*green
frame = make_video_frame([red*fg + 0.15*bg, green*fg + 0.08*bg, blue*fg + 0.1*bg], indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render audio samples')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--params', type=str, help='Parameter YAML file name')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close()
| StarcoderdataPython |
1606270 | <reponame>cookejames/uibbq
"""Micropython iBBQ Interface"""
__version__ = "1.0"
from struct import unpack_from
import uasyncio as asyncio
import bluetooth
import aioble
class iBBQ:
_DEVICE_NAME = "iBBQ"
_PRIMARY_SERVICE = bluetooth.UUID(0xFFF0)
_ACCOUNT_AND_VERIFY_CHARACTERISTIC = bluetooth.UUID(0xFFF2)
_SETTINGS_RESULT_CHARACTERISTIC = bluetooth.UUID(0xFFF1)
_SETTINGS_WRITE_CHARACTERISTIC = bluetooth.UUID(0xFFF5)
_REAL_TIME_DATA_CHARACTERISTIC = bluetooth.UUID(0xFFF4)
_HISTORIC_DATA_CHARACTERISTIC = bluetooth.UUID(0xFFF3)
_CREDENTIALS_MSG = b"\x21\x07\x06\x05\x04\x03\x02\x01\xb8\x22\x00\x00\x00\x00\x00"
_REALTIME_DATA_ENABLE_MSG = b"\x0B\x01\x00\x00\x00\x00"
_UNITS_FAHRENHEIT_MSG = b"\x02\x01\x00\x00\x00\x00"
_UNITS_CELSIUS_MSG = b"\x02\x00\x00\x00\x00\x00"
_REQUEST_BATTERY_LEVEL_MSG = b"\x08\x24\x00\x00\x00\x00"
def __init__(self, data_handler):
self._device = None
self._connection = None
self._real_time_data = None
self._settings_data = None
self._data_handler = data_handler
def reset(self):
self._device = None
self._connection = None
self._real_time_data = None
self._data_handler = None
self._settings_data = None
async def set_display_to_celcius(self):
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._SETTINGS_WRITE_CHARACTERISTIC,
iBBQ._UNITS_CELSIUS_MSG,
)
async def set_display_to_farenheit(self):
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._SETTINGS_WRITE_CHARACTERISTIC,
iBBQ._UNITS_FAHRENHEIT_MSG,
)
async def find_ibbq(self):
# Scan for 5 seconds, in active mode, with very low interval/window (to
# maximise detection rate).
async with aioble.scan(
5000, interval_us=30000, window_us=30000, active=True
) as scanner:
async for result in scanner:
# See if it matches our name
if result.name() == iBBQ._DEVICE_NAME:
self._device = result.device
return True
return False
async def _write(self, service, characteristic, message):
if not self._connection.is_connected():
raise "Cannot write, device disconnected"
try:
_service = await self._connection.service(service)
_characteristic = await _service.characteristic(characteristic)
await _characteristic.write(message)
return _characteristic
except asyncio.TimeoutError:
raise "Timeout during write"
async def _subscribe(self, service, characteristic):
if not self._connection.is_connected():
raise "Cannot write, device disconnected"
try:
_service = await self._connection.service(service)
_characteristic = await _service.characteristic(characteristic)
await _characteristic.subscribe()
return _characteristic
except asyncio.TimeoutError:
raise "Timeout during subscribe"
async def connect(self):
await self.find_ibbq()
if not self._device:
print("iBBQ not found")
return
print("Connecting to", self._device)
self._connection = await self._device.connect()
print("Connected to", self._device)
try:
# login
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._ACCOUNT_AND_VERIFY_CHARACTERISTIC,
iBBQ._CREDENTIALS_MSG,
)
# subscribe to settings
self._settings_data = await self._subscribe(
iBBQ._PRIMARY_SERVICE, iBBQ._SETTINGS_RESULT_CHARACTERISTIC
)
# subscribe to real time data
self._real_time_data = await self._subscribe(
iBBQ._PRIMARY_SERVICE, iBBQ._REAL_TIME_DATA_CHARACTERISTIC
)
# enable real time data
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._SETTINGS_WRITE_CHARACTERISTIC,
iBBQ._REALTIME_DATA_ENABLE_MSG,
)
# enable real time data
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._SETTINGS_WRITE_CHARACTERISTIC,
iBBQ._REALTIME_DATA_ENABLE_MSG,
)
asyncio.create_task(self._read_data())
except Exception as e:
self.disconnect()
raise e
async def battery_level(self):
"""Get current battery level in volts as ``(current_voltage, max_voltage)``.
Results are approximate and may differ from the
actual battery voltage by 0.1v or so.
"""
try:
await self._write(
iBBQ._PRIMARY_SERVICE,
iBBQ._SETTINGS_WRITE_CHARACTERISTIC,
iBBQ._REQUEST_BATTERY_LEVEL_MSG,
)
data = await self._settings_data.notified(1000)
if len(data) > 5:
header, current_voltage, max_voltage = unpack_from("<BHH", data)
if header == 0x24:
# From https://github.com/adafruit/Adafruit_CircuitPython_BLE_iBBQ
# Calibration was determined empirically, by comparing
# the returned values with actual measurements of battery voltage,
# on one sample each of two different products.
return (
current_voltage / 2000 - 0.3,
(6550 if max_voltage == 0 else max_voltage) / 2000,
)
return None
except Exception as e:
print("Error retrieving battery level")
print(e)
return None
async def _read_data(self):
while self._connection.is_connected():
try:
data = await self._real_time_data.notified(1000)
if data:
probe_data = []
for r in range(len(data) - 1):
if r % 2 == 0:
temperature = unpack_from("<H", data[r : r + 2])[0] / 10
probe_data.append(
None if temperature == 6552.6 else int(temperature)
)
self._data_handler(probe_data)
except Exception as e:
pass
await asyncio.sleep(0.1)
async def disconnect(self):
await self._connection.disconnect()
| StarcoderdataPython |
9274 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| StarcoderdataPython |
19458 | """
From http://jimorlin.wordpress.com/2009/02/17/colored-letters-labeled-dice-a-logic-puzzle/
There are 13 words as follows: buoy, cave, celt, flub, fork, hemp, judy, junk, limn, quip, swag, visa.
There are 24 different letters that appear in the 13 words.
The question is: can one assign the 24 letters to 4 different cubes so that the four letters of each word appears on different cubes.
There is one letter from each word on each cube.
The puzzle was created by <NAME>.
Execution:
python3 LabeledDice.py
"""
from pycsp3 import *
words = ["buoy", "cave", "celt", "flub", "fork", "hemp", "judy", "junk", "limn", "quip", "swag", "visa"]
# x[i] is the cube where the ith letter of the alphabet is put
x = VarArray(size=26, dom=lambda i: range(1, 5) if i in alphabet_positions("".join(words)) else None)
satisfy(
# the four letters of each word appear on different cubes
[AllDifferent(x[i] for i in alphabet_positions(w)) for w in words],
# each cube is assigned 6 letters
Cardinality(x, occurrences={i: 6 for i in range(1, 5)})
)
| StarcoderdataPython |
1775115 | #input data:
#```
#NZ_AFRW01000000.gbk:WGS AFRW01000001-AFRW01000138
#NZ_AFUK01000000.gbk:WGS AFUK01000001
#NZ_AFUR01000000.gbk:WGS AFUR01000001-AFUR01000007
#```
import os
import sys
from Bio import Entrez
for lines in open(sys.argv[1], 'rU'):
line = lines.strip().split()
ncbi_org_acc = line[0].split(".")[0]
if not os.path.exists(ncbi_org_acc):
os.mkdir(ncbi_org_acc)
if "-" in line[1]:
contig_acc_range = line[1].split("-")
start = int(contig_acc_range[0][-6:])
end = int(contig_acc_range[-1][-6:])
l = range(start,end+1)
prefix = contig_acc_range[0][:-6]
else:
contig = line[1].strip()
l = [int(contig[-6:])]
prefix = contig[:-6]
f = open(ncbi_org_acc + '/' + ncbi_org_acc + '.contigs.txt', 'w')
for seqs in l:
contigs = prefix + "%06d" % seqs
f.write('%s\n' % contigs)
| StarcoderdataPython |
1730325 | # Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import click
from flask import current_app
from flask.blueprints import Blueprint
from flask.cli import with_appcontext
from flask_migrate import current, stamp, upgrade
from lifemonitor.auth.models import User
# set module level logger
logger = logging.getLogger()
# define the blueprint for DB commands
blueprint = Blueprint('init', __name__)
# set initial revision number
initial_revision = '8b2e530dc029'
@blueprint.cli.command('db')
@click.option("-r", "--revision", default="head")
@with_appcontext
def init_db(revision):
"""
Initialize LifeMonitor App
"""
from lifemonitor.db import create_db, db, db_initialized, db_revision
is_initialized = db_initialized()
logger.info("LifeMonitor app initialized: %r", is_initialized)
if is_initialized:
current_revision = db_revision()
if not current_revision:
# if DB is initialized with no revision
# set the initial revision and then apply migrations
stamp(revision=initial_revision)
logger.info(f"Set initial revision: {initial_revision}")
# Apply migrations
logger.info(f"Applying migrations up to revision '{revision}'...")
upgrade(revision=revision)
logger.info("Migrations applied!")
logger.info("Current revision: %r", db_revision())
else:
logger.debug("Initializing DB...")
create_db(settings=current_app.config)
db.create_all()
stamp()
current()
logger.info("DB initialized")
# create a default admin user if not exists
admin = User.find_by_username('admin')
if not admin:
admin = User('admin')
admin.password = current_app.config["LIFEMONITOR_ADMIN_PASSWORD"]
db.session.add(admin)
db.session.commit()
| StarcoderdataPython |
101874 | <filename>mitosis/async_node/async_node.py<gh_stars>1-10
import logging
from contextlib import AsyncExitStack
from typing import Any, Callable, Optional
from anyio import (
TASK_STATUS_IGNORED,
BrokenResourceError,
CancelScope,
ClosedResourceError,
Event,
WouldBlock,
current_effective_deadline,
current_time,
sleep_until,
)
from anyio.abc import AsyncResource, TaskStatus
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from ..model import EdgeModel, NodeModel, PortModel, SpecificPort
from ..util import edge_matches_input_port, edge_matches_output_port
from .port_group import InGroup, OutGroup
_LOGGER = logging.getLogger(__name__)
class AsyncNode(AsyncResource):
def __init__(
self,
name: str,
model: NodeModel,
attachments_receivers: Optional[dict[str, MemoryObjectReceiveStream]] = None,
):
self.name = name
self.model = model
self._stack = AsyncExitStack()
self.running: bool = True
# Executable function
self._executable: Callable = model.get_executable()
# Timings
self._last_run = current_time()
self._time_between_runs = 1.0 / model.config.frequency # [s]
# Default Input/Output-groups TODO: Break these out into their own component
self.ins = InGroup(self.name, model.inputs)
self.outs = OutGroup(self.name, model.outputs)
# ReceiveStream for new attached connections
self._attachments_receiver: Optional[MemoryObjectReceiveStream] = None
if attachments_receivers is not None:
self._attachments_receiver = attachments_receivers[self.name]
# If all senders are empty
if self.should_stop():
self.stop()
def offer_buffers(
self,
senders: dict[SpecificPort, MemoryObjectSendStream],
receivers: dict[SpecificPort, MemoryObjectReceiveStream],
) -> None:
"""Pass buffers in and create new input/output-groups."""
self.ins = InGroup(self.name, self.model.inputs, receivers)
self.outs = OutGroup(self.name, self.model.outputs, senders)
def start(self):
"""Start the Node execution."""
self.running = True
def should_stop(self):
"""Check if a node should stop execution."""
if (
self.model.config.shut_down_when_ignored is True
and self.outs.can_send() is False
):
return True
return False
def stop(self):
"""Stop the Node execution."""
self.running = False
async def attach_new_senders(self, new_attachments):
self.outs.offer_senders(new_attachments)
async def __aenter__(self):
"""Put all senders and receivers on the stack."""
await self._stack.enter_async_context(self.ins)
await self._stack.enter_async_context(self.outs)
return self
async def aclose(self):
"""Unwind the local stack."""
await self._stack.aclose()
async def __call__(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED):
"""Run the infinite loop of a node."""
with CancelScope() as scope:
task_status.started(scope)
while True:
# Starting and stopping is idempotent
if self.should_stop():
self.stop()
else:
self.start()
if self._attachments_receiver is not None:
if not self.running:
# If node is not running, it will wait here until new attachments are available
new_attachments = await self._attachments_receiver.receive()
# When received, attach new senders to output ports.
await self.attach_new_senders(new_attachments)
# Check if there are anywhere to send data now
continue
# If node is running, just check (nowait) if there are any new attachments
try:
new_attachments = self._attachments_receiver.receive_nowait()
except WouldBlock:
# This is the usual case for a running node
pass
else:
await self.attach_new_senders(new_attachments)
continue
myfunc_inputs = await self.ins.pull()
# Run executable code
myfunc_outputs = self._executable(*myfunc_inputs)
# Push results to child nodes.
await self.outs.push(myfunc_outputs)
# Wait until it is time to run again
# TODO: Different strategies for waiting.
await sleep_until(self._last_run + self._time_between_runs)
self._last_run = current_time()
| StarcoderdataPython |
1787244 | <reponame>nuagenetworks/nuage-tempest-plugin<gh_stars>1-10
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import time
from tempest.api.network import base
from tempest.common import utils
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest import test
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.tests.scenario \
import base_nuage_network_scenario_test
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
EXTRA_DHCP_OPT_MTU_VALUE = '1498'
EXTRA_DHCP_OPT_DOMAIN_NAME = 'nuagenetworks.com'
EXTRA_DHCP_OPT_DOMAIN_SEARCH = 'sales.domain.com;eng.domain.org'
FIP_RATE_LIMIT = '5'
FIP_UPDATE_DELAY = 4
class TestNetworkBasicOps(
base_nuage_network_scenario_test.NuageNetworkScenarioTest,
base.BaseNetworkTest):
"""TestNetworkBasicOps
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def skip_checks(cls):
super(TestNetworkBasicOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
for ext in ['router', 'security-group']:
if not utils.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkBasicOps, cls).setup_credentials()
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _setup_network_and_servers(self, **kwargs):
boot_with_port = kwargs.pop('boot_with_port', False)
self.security_group = self._create_security_group()
self.network, self.subnet, self.router = self.create_networks(**kwargs)
self.check_networks()
self.port_id = None
if boot_with_port:
# create a port on the network and boot with that
# Don't forget to add the security group to allow ssh
extra_dhcp_opts = [
{'opt_value': EXTRA_DHCP_OPT_MTU_VALUE,
'opt_name': 'mtu'},
{'opt_value': EXTRA_DHCP_OPT_DOMAIN_NAME,
'opt_name': 'domain-name'},
{'opt_value': EXTRA_DHCP_OPT_DOMAIN_SEARCH,
'opt_name': 'domain-search'}
]
port_kwargs = {
'extra_dhcp_opts': extra_dhcp_opts,
'security_groups': [self.security_group['id']]
}
self.port_id = self._create_port(
self.network['id'], **port_kwargs)['id']
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network, self.port_id)
self._check_nuage_tenant_network_connectivity()
# Create floating IP with FIP rate limiting
result = self.os_primary.floating_ips_client.create_floatingip(
floating_network_id=CONF.network.public_network_id,
port_id=self.port_id,
nuage_fip_rate=FIP_RATE_LIMIT)
self.floating_ips.append(result['floatingip'])
# QA REPO -- does NOT work on dev repo - no net_resources attr
# convert to format used throughout this file
# floating_ip = self.net_resources.DeletableFloatingIp(
# client=self.os_primary.floating_ips_client,
# **result['floatingip'])
# QA repo
# DEV REPO start
floating_ip = result['floatingip']
# DEV REPO end
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""check_networks
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets]
"""
seen_nets = self.os_admin.networks_client.\
list_networks()['networks']
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network['name'], seen_names)
self.assertIn(self.network['id'], seen_ids)
if self.subnet:
seen_subnets = self.os_admin.subnets_client.\
list_subnets()['subnets']
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network['id'], seen_net_ids)
self.assertIn(self.subnet['id'], seen_subnet_ids)
if self.router:
seen_routers = self.os_admin.routers_client.\
list_routers()['routers']
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router['name'],
seen_router_names)
self.assertIn(self.router['id'],
seen_router_ids)
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
server = self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
security_groups=security_groups,
wait_until='ACTIVE')
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_nuage_tenant_network_connectivity(self):
ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(TestNetworkBasicOps, self).\
_check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _check_public_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True):
"""_check_public_connectivity
Verifies connectivity to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
negative or positive.
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
"""
ssh_login = CONF.validation.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip['floating_ip_address']
private_key = None
floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
# Check FloatingIP Status before initiating a connection
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
# call the common method in the parent class
self.check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self, create_gateway=False):
self.new_net = self._create_network(tenant_id=self.tenant_id)
if create_gateway:
self.new_subnet = self._create_subnet(
network=self.new_net)
else:
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server['id'])
self.assertEqual(1, len(port_list))
old_port = port_list[0]
interface = self.interface_client.create_interface(
server=server['id'],
network_id=self.new_net.id)
self.addCleanup(self.network_client.wait_for_resource_deletion,
'port',
interface['port_id'])
self.addCleanup(self.delete_wrapper,
self.interface_client.delete_interface,
server['id'], interface['port_id'])
def check_ports():
self.new_port_list = [port for port in
self._list_ports(device_id=server['id'])
if port['id'] != old_port['id']]
return len(self.new_port_list) == 1
if not test.call_until_true(
check_ports,
CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException(
"No new port attached to the server in time (%s sec)! "
"Old port: %s. Number of new ports: %d" % (
CONF.network.build_timeout, old_port,
len(self.new_port_list)))
new_port = self.net_resources.DeletablePort(
client=self.network_client, **self.new_port_list[0])
def check_new_nic():
new_nic_list = self._get_server_nics(ssh_client)
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
return len(self.diff_list) == 1
if not test.call_until_true(
check_new_nic,
CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("Interface not visible on the "
"guest after %s sec"
% CONF.network.build_timeout)
num, new_nic = self.diff_list[0]
ssh_client.assign_static_ip(nic=new_nic,
addr=new_port.fixed_ips[0]['ip_address'])
ssh_client.turn_nic_on(nic=new_nic)
@staticmethod
def _get_server_nics(ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.get_ip_list()
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network,
should_connect=True):
"""_check_network_internal_connectivity
via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
self._list_ports(tenant_id=server['tenant_id'],
network_id=network.id)
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip,
internal_ips,
should_connect)
def _check_network_external_connectivity(self):
"""_check_network_external_connectivity
ping public network default gateway to imply external connectivity
"""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
# We ping the external IP from the instance using its floating IP
# which is always IPv4, so we must only test connectivity to
# external IPv4 IPs if the external network is dualstack.
v4_subnets = [s for s in self._list_subnets(
network_id=CONF.network.public_network_id)
if s['ip_version'] == 4]
self.assertEqual(1, len(v4_subnets),
"Found %d IPv4 subnets" % len(v4_subnets))
external_ips = [v4_subnets[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list,
should_connect=True):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
if should_connect:
msg = "Timed out waiting for %s to become reachable" \
% remote_ip
else:
msg = "ip address %s is reachable" % remote_ip
try:
self.assertTrue(self._check_remote_connectivity
(ssh_source, remote_ip, should_connect),
msg)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise
@staticmethod
def _get_server_mtu(ssh_client, interface='eth0'):
command = 'ip a | grep -v inet | grep ' + interface + \
' | cut -d" " -f 5'
mtu = ssh_client.exec_command(command)
return int(mtu)
@staticmethod
def _get_server_domain_name(ssh_client):
command = 'grep search /etc/resolv.conf | cut -d" " -f2'
domain_name = str(ssh_client.exec_command(command)).rstrip('\n')
return domain_name
def _check_extra_dhcp_opts_on_server(self, server, floating_ip_address):
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(floating_ip_address,
private_key=private_key)
# Fetch MTU FROM ETH0
# command = 'ip a | grep -v inet | grep eth0 | cut -d" " -f 5'
mtu = self._get_server_mtu(ssh_client, 'eth0')
domain_name = self._get_server_domain_name(ssh_client)
# Compare with values used when creating the port
self.assertEqual(int(mtu), int(EXTRA_DHCP_OPT_MTU_VALUE),
'Extra DHCP option <mut> not set correclty on the VM')
self.assertEqual(domain_name, EXTRA_DHCP_OPT_DOMAIN_NAME,
'Extra DHCP option <domain-name> not set correcty '
'on the VM')
LOG.info("EXTRA DHCP OPTIONS validated OK")
# TODO(KRIS) FURTHER INVESTIGATE BUT SOMETHING UPSTREAM BROKE THIS TEST
# TODO(KRIS) CONNECTIVITY IS ANYHOW THESE DAYS TESTED MUCH MORE ALREADY ...
# TODO(KRIS) ADDED nuage.connectivity.
# TODO(KRIS) test_icmp_connectivity_os_managed_l3_domain_using_fip
# TODO(KRIS) for testbed also
def FIXME_KRIS_test_nuage_fip_network_basic_ops(self):
"""test_nuage_fip_network_basic_ops
Spin a VM with a security group on an internal network, with
a floating IP in the public network.
Relies on the fact that there is connectivity form the test runner
to this network.
We use the FIP 2 underlay feature (underlay=true) on the public network
"""
# Use a port, on which we add :
# extra dhcp options (done)
kwargs = {'boot_with_port': True}
self._setup_network_and_servers(**kwargs)
time.sleep(5) # giving time for servers to come up - TODO(check this)
self._check_public_connectivity(
should_connect=True, should_check_floating_ip_status=False)
# Verify whether our extra dhcp options mad it to the VM
floating_ip, this_server = self.floating_ip_tuple
self._check_extra_dhcp_opts_on_server(
this_server, floating_ip['floating_ip_address'])
# Check dissassociate / associate of the FIP on the same port
# a number of times
loop_range = 4
LOG.info("Starting FIP-2-underlay dis/associate loop on " +
str(floating_ip['floating_ip_address']))
for count in range(1, loop_range, 1):
self._disassociate_floating_ips()
time.sleep(FIP_UPDATE_DELAY)
LOG.info("Loop " + str(count) + "/" + str(loop_range) +
" Connectivity is GONE")
self._check_public_connectivity(
should_connect=False, should_check_floating_ip_status=False)
# disassociate de-populates the server in the tuple,
# populate it again:
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, this_server)
self._associate_floating_ip(floating_ip, this_server)
time.sleep(FIP_UPDATE_DELAY)
LOG.info("Loop " + str(count) + "/" + str(loop_range) +
" Connectivity is BACK")
self._check_public_connectivity(
should_connect=True, should_check_floating_ip_status=False)
| StarcoderdataPython |
3240954 | <reponame>carlos-moreno/curso-de-selenium
# print <- Função que escreve na tela, quando chamada
# print('Oi, meu nome é Carlos')
idade = input('Qual o seu idade? ')
print(idade)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.