hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7940711705a8a4416004da51be1863926ad797e8 | 17,124 | py | Python | tests/unit/project_lifecycle/project/brenttests/test_validate.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 3 | 2021-07-19T09:46:01.000Z | 2022-03-07T13:51:25.000Z | tests/unit/project_lifecycle/project/brenttests/test_validate.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 43 | 2019-08-27T12:36:29.000Z | 2020-08-27T14:50:40.000Z | tests/unit/project_lifecycle/project/brenttests/test_validate.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 7 | 2020-09-22T20:32:17.000Z | 2022-03-29T12:25:51.000Z | import os
from tests.common.project_testing import (ProjectSimTestCase,
PROJECT_CONTAINS_DIR, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE,
BRENT_INFRASTRUCTURE_DIR, BRENT_INFRASTRUCTURE_MANIFEST_FILE, BRENT_LIFECYCLE_DIR,
BRENT_LIFECYCLE_MANIFEST_FILE, BRENT_OPENSTACK_DIR, BRENT_OPENSTACK_HEAT_YAML_FILE,
BRENT_OPENSTACK_DISCOVER_YAML_FILE, BRENT_KUBERNETES_DIR)
from lmctl.project.source.core import Project, Options, ValidateOptions, BuildOptions
from lmctl.project.validation import ValidationResult
import tests.common.simulations.project_lab as project_lab
EXPECTED_AUTOCORRECT_MANIFESTS_DESCRIPTOR = '''\
description: descriptor
infrastructure:
Openstack: {}
Kubernetes: {}
AWS: {}
lifecycle:
Create:
drivers:
openstack:
selector:
infrastructure-type:
- Openstack
kubernetes:
selector:
infrastructure-type:
- Kubernetes
AWS:
selector:
infrastructure-type:
- AWS
Install: {}
Delete:
drivers:
openstack:
selector:
infrastructure-type:
- Openstack
kubernetes:
selector:
infrastructure-type:
- Kubernetes
AWS:
selector:
infrastructure-type:
- AWS
default-driver:
ansible:
selector:
infrastructure-type:
- Openstack
- Kubernetes
shell:
selector:
infrastructure-type:
- AWS
'''
EXPECTED_AUTOCORRECT_INFRASTRUCTURE_DESCRIPTOR = '''\
description: descriptor for with_infrastructure_templates
infrastructure:
Openstack: {}
Kubernetes: {}
lifecycle:
Create:
drivers:
openstack:
selector:
infrastructure-type:
- Openstack
kubernetes:
selector:
infrastructure-type:
- Kubernetes
Install: {}
Delete:
drivers:
openstack:
selector:
infrastructure-type:
- Openstack
kubernetes:
selector:
infrastructure-type:
- Kubernetes
default-driver:
ansible:
selector:
infrastructure-type:
- '*'
queries:
drivers:
openstack:
selector:
infrastructure-type:
- Openstack
'''
EXPECTED_AUTOCORRECT_WITH_MISSING_DRIVER_SELECTORS_DESCRIPTOR = '''\
description: descriptor for with_missing_driver_selector
lifecycle:
Install:
drivers:
ansible:
selector:
infrastructure-type:
- '*'
default-driver:
ansible:
selector:
infrastructure-type:
- '*'
operations:
test-op:
drivers:
ansible:
selector:
infrastructure-type:
- '*'
'''
class TestValidateBrentProjects(ProjectSimTestCase):
def test_validate_resource(self):
project_sim = self.simlab.simulate_brent_basic()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
self.assertIsInstance(result, ValidationResult)
self.assertFalse(result.has_errors())
self.assertFalse(result.has_warnings())
def test_validate_resource_descriptor_name(self):
project_sim = self.simlab.simulate_invalid_brent_mismatch_descriptor_name()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
expected_errors = []
expected_errors.append(
'Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes type \'assembly\' but this should be \'resource\' based on project configuration'.format(descriptor_path))
expected_errors.append(
'Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes \'notvalid\' but this should be \'invalid_mismatch_lm_descriptor_name\' based on project configuration'.format(descriptor_path))
expected_errors.append('Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes version \'5.4\' but this should be \'1.0\' based on project configuration'.format(descriptor_path))
self.assert_validation_errors(result, *expected_errors)
def test_validate_resource_without_definitions(self):
project_sim = self.simlab.simulate_invalid_brent_no_definitions()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lm_definitions(self):
project_sim = self.simlab.simulate_invalid_brent_no_lm_definitions()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lm_descriptor(self):
project_sim = self.simlab.simulate_invalid_brent_no_lm_descriptor()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lifecycle(self):
project_sim = self.simlab.simulate_invalid_brent_no_lifecycle()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
lifecycle_path = os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR)
self.assert_validation_errors(result, 'No Lifecycle directory found at: {0}'.format(lifecycle_path))
def test_validate_errors_on_manifests(self):
project_sim = self.simlab.simulate_brent_with_prealpha_style()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
inf_manifest_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, BRENT_INFRASTRUCTURE_MANIFEST_FILE)
lifeycle_manifest_path = os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR, BRENT_LIFECYCLE_MANIFEST_FILE)
self.assert_validation_errors(result, 'Found lifecycle manifest [{0}]: this file is no longer supported by the Brent Resource Manager. Add this information to the Resource descriptor instead or enable the autocorrect option'.format(lifeycle_manifest_path), 'Found infrastructure manifest [{0}]: this file is no longer supported by the Brent Resource Manager. Add this information to the Resource descriptor instead or enable the autocorrect option'.format(inf_manifest_path))
def test_validate_allow_autocorrect_fixes_manifests(self):
project_sim = self.simlab.simulate_brent_with_prealpha_style()
project = Project(project_sim.path)
validate_options = ValidateOptions()
validate_options.allow_autocorrect = True
result = project.validate(validate_options)
self.assertFalse(result.has_errors())
self.assertFalse(result.has_warnings())
project = Project(project_sim.path)
tester = self.assert_project(project)
inf_manifest_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, BRENT_INFRASTRUCTURE_MANIFEST_FILE)
tester.assert_has_no_file(inf_manifest_path)
lifeycle_manifest_path = os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR, BRENT_LIFECYCLE_MANIFEST_FILE)
tester.assert_has_no_file(lifeycle_manifest_path)
lm_dir = os.path.join(BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR)
descriptor_path = os.path.join(lm_dir, BRENT_DESCRIPTOR_YML_FILE)
tester.assert_has_file(descriptor_path, EXPECTED_AUTOCORRECT_MANIFESTS_DESCRIPTOR)
def test_validate_errors_on_infrastructure_templates(self):
project_sim = self.simlab.simulate_brent_with_infrastructure_templates()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'Found infrastructure entries referencing templates [{0}]: this format is no longer supported by the Brent Resource Manager. Add this information to the Create/Delete lifecycle and/or queries instead or enable the autocorrect option'.format(descriptor_path))
def test_validate_allow_autocorrect_moves_infrastructure_templates_to_lifecycle(self):
project_sim = self.simlab.simulate_brent_with_infrastructure_templates()
project = Project(project_sim.path)
find_yaml_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, 'find.yaml')
with open(find_yaml_path, 'r') as f:
find_yaml_content = f.read()
heat_yaml_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, 'openstack.yaml')
with open(heat_yaml_path, 'r') as f:
heat_yaml_content = f.read()
kube_yaml_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, 'kube.yaml')
with open(kube_yaml_path, 'r') as f:
kube_yaml_content = f.read()
validate_options = ValidateOptions()
validate_options.allow_autocorrect = True
result = project.validate(validate_options)
self.assertFalse(result.has_errors())
self.assertFalse(result.has_warnings())
project = Project(project_sim.path)
tester = self.assert_project(project)
inf_manifest_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_INFRASTRUCTURE_DIR, BRENT_INFRASTRUCTURE_MANIFEST_FILE)
tester.assert_has_no_file(find_yaml_path)
tester.assert_has_no_file(heat_yaml_path)
tester.assert_has_no_file(kube_yaml_path)
tester.assert_has_file(os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR, BRENT_OPENSTACK_DIR, BRENT_OPENSTACK_HEAT_YAML_FILE), heat_yaml_content)
tester.assert_has_file(os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR, BRENT_OPENSTACK_DIR, BRENT_OPENSTACK_DISCOVER_YAML_FILE), find_yaml_content)
tester.assert_has_file(os.path.join(project_sim.path, BRENT_LIFECYCLE_DIR, BRENT_KUBERNETES_DIR, 'kube.yaml'), kube_yaml_content)
lm_dir = os.path.join(BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR)
descriptor_path = os.path.join(lm_dir, BRENT_DESCRIPTOR_YML_FILE)
tester.assert_has_file(descriptor_path, EXPECTED_AUTOCORRECT_INFRASTRUCTURE_DESCRIPTOR)
def test_validate_errors_on_driver_entries_missing_selector_in_descriptor(self):
project_sim = self.simlab.simulate_brent_with_missing_driver_selector()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'Found lifecycle/operation/default-driver entries missing \'selector\' key before \'infrastructure-type\' [{0}]: this format is no longer supported by the Brent Resource Manager. Move infrastructure-type information under the selector key or enable the autocorrect option'.format(descriptor_path))
def test_validate_allow_autocorrect_adds_selector_to_driver_entries_in_descriptor(self):
project_sim = self.simlab.simulate_brent_with_missing_driver_selector()
project = Project(project_sim.path)
validate_options = ValidateOptions()
validate_options.allow_autocorrect = True
result = project.validate(validate_options)
self.assertFalse(result.has_errors())
self.assertFalse(result.has_warnings())
project = Project(project_sim.path)
tester = self.assert_project(project)
lm_dir = os.path.join(BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR)
descriptor_path = os.path.join(lm_dir, BRENT_DESCRIPTOR_YML_FILE)
tester.assert_has_file(descriptor_path, EXPECTED_AUTOCORRECT_WITH_MISSING_DRIVER_SELECTORS_DESCRIPTOR)
class TestValidateBrentSubprojects(ProjectSimTestCase):
def test_validate_resource(self):
project_sim = self.simlab.simulate_assembly_contains_brent_basic()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
self.assertIsInstance(result, ValidationResult)
self.assertFalse(result.has_errors())
self.assertFalse(result.has_warnings())
def test_validate_resource_descriptor_name(self):
project_sim = self.simlab.simulate_assembly_contains_invalid_brent_mismatch_descriptor_name()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, PROJECT_CONTAINS_DIR, project_lab.SUBPROJECT_NAME_INVALID_BRENT_MISMATCH_DESCRIPTOR_NAME, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
expected_errors = []
expected_errors.append(
'Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes type \'assembly\' but this should be \'resource\' based on project configuration'.format(descriptor_path))
expected_errors.append(
'Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes \'notvalid\' but this should be \'sub_invalid_mismatch_descriptor_name-contains_invalid_mismatch_descriptor_name\' based on project configuration'.format(descriptor_path))
expected_errors.append('Descriptor [{0}]: name \'assembly::notvalid::5.4\' includes version \'5.4\' but this should be \'1.0\' based on project configuration'.format(descriptor_path))
self.assert_validation_errors(result, *expected_errors)
def test_validate_resource_without_definitions(self):
project_sim = self.simlab.simulate_assembly_contains_invalid_brent_no_definitions()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, PROJECT_CONTAINS_DIR, project_lab.SUBPROJECT_NAME_INVALID_BRENT_NO_DEFINITIONS, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lm_definitions(self):
project_sim = self.simlab.simulate_assembly_contains_invalid_brent_no_lm_definitions()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, PROJECT_CONTAINS_DIR, project_lab.SUBPROJECT_NAME_INVALID_BRENT_NO_LM_DEFINITIONS, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lm_descriptor(self):
project_sim = self.simlab.simulate_assembly_contains_invalid_brent_no_lm_descriptor()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
descriptor_path = os.path.join(project_sim.path, PROJECT_CONTAINS_DIR, project_lab.SUBPROJECT_NAME_INVALID_BRENT_NO_DESCRIPTOR, BRENT_DEFINITIONS_DIR, BRENT_DESCRIPTOR_DIR, BRENT_DESCRIPTOR_YML_FILE)
self.assert_validation_errors(result, 'No descriptor found at: {0}'.format(descriptor_path))
def test_validate_resource_without_lifecycle(self):
project_sim = self.simlab.simulate_assembly_contains_invalid_brent_no_lifecycle()
project = Project(project_sim.path)
validate_options = ValidateOptions()
result = project.validate(validate_options)
inf_path = os.path.join(project_sim.path, PROJECT_CONTAINS_DIR, project_lab.SUBPROJECT_NAME_INVALID_BRENT_NO_LIFECYCLE, BRENT_LIFECYCLE_DIR)
self.assert_validation_errors(result, 'No Lifecycle directory found at: {0}'.format(inf_path))
| 53.180124 | 483 | 0.740364 |
79407118d0d92676da1b7e0d8fe3a8cea8dfee75 | 3,906 | py | Python | sam_annotations/extended/bad_samples.py | oaxiom/human | f0656c20e76ca253eda0b8e622fc37b982b8fb5d | [
"MIT"
] | 1 | 2021-06-09T01:44:22.000Z | 2021-06-09T01:44:22.000Z | sam_annotations/extended/bad_samples.py | oaxiom/human | f0656c20e76ca253eda0b8e622fc37b982b8fb5d | [
"MIT"
] | null | null | null | sam_annotations/extended/bad_samples.py | oaxiom/human | f0656c20e76ca253eda0b8e622fc37b982b8fb5d | [
"MIT"
] | null | null | null | bad_samples = set([
# <0.5e6 reads
'Caudate_nucleus_rp1', 'Caudate_nucleus_rp2', 'Caudate_nucleus_rp3', 'Caudate_nucleus_rp4', 'Caudate_nucleus_rp5',
'Frontal_pole_rp2', 'Frontal_pole_rp6',
'hESC_H1_rp15',
'Hippocampus_rp1', 'Hippocampus_rp2', 'Hippocampus_rp3', 'Hippocampus_rp3', 'Hippocampus_rp4', 'Hippocampus_rp5' , 'Hippocampus_rp6',
'Macrophages_rp1', 'Macrophages_rp2', 'Sperm_rp3',
'astrocyte_fetal_rp1', 'astrocyte_fetal_rp2', 'astrocyte_fetal_rp3', 'astrocyte_fetal_4', 'astrocyte_fetal_5',
'astrocyte_fetal_rp6', 'Polarbody_rp2',
'Oocyte_rp4', 'Oocyte_rp5',
# others:
'Airway_basal_cells_rp25', 'Airway_basal_cells_rp26', 'Airway_basal_cells_rp27', # are super outliers. I take the majority vote and delete these
'Epidermal_keratinocytes_rp6', # sample 6 has a problem, I take the majority vote
'Large_airway_epithelial_cells_rp2', # looks mangled for some reason
# Weird outliers:
'Ileum_rp3', 'Ileum_rp9', 'Ileum_rp13',
'Retina_rp3',
'Treg_rp1',
# Bad embryo:
'Embryo_8C_rp10',
# Bad curves:
'astrocyte_fetal_rp5',
'CD4p_ILC1_rp4',
'Cardiac_resident_MSC_W8B2p_rp2',
'Cortex_rp1',
'Frontal_pole_rp1', 'Frontal_pole_rp2', 'Frontal_pole_rp3', 'Frontal_pole_rp4', 'Frontal_pole_rp5',
'Skeletal_muscle_cells_rp1', 'Skeletal_muscle_cells_rp2',
'Macrophages_rp1', 'Macrophages_rp2',
# SS Petropoulos Human embryo
"embryo_E6_12_1296", "embryo_E6_12_1297", "embryo_E6_13_1385",
"embryo_E6_14_1405", "embryo_E6_14_1415", "embryo_E6_17_1571", "embryo_E6_17_1621",
"embryo_E6_18_1627", "embryo_E6_18_1634", "embryo_E6_18_1642", "embryo_E6_2_104",
"embryo_E6_6_721", "embryo_E7_11_846", "embryo_E7_14_906", "embryo_E7_15_1094",
"embryo_E7_16_1109", "embryo_E7_17_1331", "embryo_E7_19_1567", "embryo_E6_2_107",
"embryo_E7_12_866",
# Outliers:
'embryo_E4_3_481', 'embryo_E3_2_466', 'embryo_E3_2_467', 'embryo_E3_3_456', 'embryo_E3_3_458',
'embryo_E3_3_459', 'embryo_E4_10_1221', 'embryo_E4_10_1223', 'embryo_E4_10_1230',
'embryo_E4_10_1232', 'embryo_E4_11_1258', 'embryo_E4_1_1',
'embryo_E4_24_5_0_4_7', 'embryo_E4_2_10', 'embryo_E4_2_13', 'embryo_E4_31_5_1_11',
'embryo_E4_31_5_1_2', 'embryo_E4_31_5_1_6', 'embryo_E4_31_5_1_8', 'embryo_E4_31_5_1_9',
'embryo_E4_3_481', 'embryo_E4_3_486', 'embryo_E4_3_490', 'embryo_E4_3_491',
'embryo_E4_7_642', 'embryo_E4_7_643', 'embryo_E4_7_644', 'embryo_E4_8_1171',
'embryo_E5_10_934' ,'embryo_E5_10_935', 'embryo_E5_10_936', 'embryo_E5_10_940',
'embryo_E5_10_941', 'embryo_E5_10_943' ,'embryo_E5_10_944', 'embryo_E5_10_945',
'embryo_E5_10_947', 'embryo_E5_10_949', 'embryo_E5_10_950', 'embryo_E5_11_951',
'embryo_E5_11_953' ,'embryo_E5_11_955', 'embryo_E5_11_957', 'embryo_E5_11_959',
'embryo_E5_11_960', 'embryo_E5_11_961' ,'embryo_E5_11_962', 'embryo_E5_11_963',
'embryo_E5_11_964', 'embryo_E5_11_965', 'embryo_E5_11_966', 'embryo_E5_12_1028',
'embryo_E5_12_1029', 'embryo_E5_14_1785', 'embryo_E5_14_1791',
'embryo_E5_14_1793', 'embryo_E5_14_1794', 'embryo_E5_14_1797',
'embryo_E5_14_1798', 'embryo_E5_14_1802', 'embryo_E5_14_1808', 'embryo_E5_14_1811',
'embryo_E5_15_1820', 'embryo_E5_15_1821', 'embryo_E5_15_1822', 'embryo_E5_15_1824',
'embryo_E5_15_1825', 'embryo_E5_15_1826', 'embryo_E5_15_1827',
'embryo_E5_15_1829', 'embryo_E5_15_1830', 'embryo_E5_15_1831', 'embryo_E5_15_1836',
'embryo_E5_15_1837', 'embryo_E5_16_1885', 'embryo_E5_16_1900', 'embryo_E5_37_3247',
'embryo_E5_39_3266', 'embryo_E5_3_52',
'embryo_E6_10_1051', 'embryo_E6_11_1065', 'embryo_E6_11_1068', 'embryo_E6_11_1068',
'embryo_E6_11_1070', 'embryo_E6_11_1074', 'embryo_E6_13_1371', 'embryo_E6_16_1500',
'embryo_E6_1_72', 'embryo_E6_2_105',
'embryo_E7_16_1169', 'embryo_E7_17_1307', 'embryo_E7_17_1308',
'embryo_E7_2_147', 'embryo_E7_2_151',
])
| 60.092308 | 148 | 0.747056 |
79407190f4b1b8dd7623034093a87613e0a3a06a | 743 | py | Python | setup.py | fakegit/pythonping | 5b6d67228d982caf3503a3f26b6721aa7b3a612e | [
"MIT"
] | 129 | 2018-11-26T15:23:36.000Z | 2022-03-15T05:20:09.000Z | setup.py | MPCodeWriter21/pythonping | 5b6d67228d982caf3503a3f26b6721aa7b3a612e | [
"MIT"
] | 50 | 2018-03-31T09:49:09.000Z | 2022-03-14T07:24:19.000Z | setup.py | MPCodeWriter21/pythonping | 5b6d67228d982caf3503a3f26b6721aa7b3a612e | [
"MIT"
] | 58 | 2019-02-09T18:49:33.000Z | 2022-03-22T06:55:41.000Z | from setuptools import setup
with open('README.md', 'r') as file:
long_description = file.read()
setup(name='pythonping',
version='1.1.1',
description='A simple way to ping in Python',
url='https://github.com/alessandromaggio/pythonping',
author='Alessandro Maggio',
author_email='[email protected]',
license='MIT',
packages=['pythonping'],
keywords=['ping', 'icmp', 'network'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'Natural Language :: English'
],
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False)
| 32.304348 | 59 | 0.637954 |
7940728b04061eeee3f5128428a4bbda8446bb81 | 3,748 | py | Python | aiida/orm/implementation/django/users.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 1 | 2016-09-12T10:51:00.000Z | 2016-09-12T10:51:00.000Z | aiida/orm/implementation/django/users.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 17 | 2020-03-11T17:04:05.000Z | 2020-05-01T09:34:45.000Z | aiida/orm/implementation/django/users.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Django user module"""
import functools
from aiida.backends.djsite.db import models
from aiida.backends.djsite.db.models import DbUser
from aiida.orm.implementation.users import BackendUser, BackendUserCollection
from . import entities
from . import utils
__all__ = ('DjangoUser', 'DjangoUserCollection')
class DjangoUser(entities.DjangoModelEntity[models.DbUser], BackendUser):
"""The Django user class"""
MODEL_CLASS = models.DbUser
def __init__(self, backend, email, first_name, last_name, institution):
# pylint: disable=too-many-arguments
super().__init__(backend)
self._dbmodel = utils.ModelWrapper(
DbUser(email=email, first_name=first_name, last_name=last_name, institution=institution)
)
@property
def email(self):
return self._dbmodel.email
@email.setter
def email(self, email):
self._dbmodel.email = email
@property
def first_name(self):
return self._dbmodel.first_name
@first_name.setter
def first_name(self, first_name):
self._dbmodel.first_name = first_name
@property
def last_name(self):
return self._dbmodel.last_name
@last_name.setter
def last_name(self, last_name):
self._dbmodel.last_name = last_name
@property
def institution(self):
return self._dbmodel.institution
@institution.setter
def institution(self, institution):
self._dbmodel.institution = institution
class DjangoUserCollection(BackendUserCollection):
"""The Django collection of users"""
ENTITY_CLASS = DjangoUser
def create(self, email, first_name='', last_name='', institution=''): # pylint: disable=arguments-differ
"""
Create a user with the provided email address
:return: A new user object
:rtype: :class:`aiida.orm.implementation.django.users.DjangoUser`
"""
# pylint: disable=abstract-class-instantiated
return DjangoUser(self.backend, email, first_name, last_name, institution)
def find(self, email=None, id=None): # pylint: disable=redefined-builtin, invalid-name
"""
Find users in this collection
:param email: optional email address filter
:param id: optional id filter
:return: a list of the found users
:rtype: list
"""
# Constructing the default query
import operator
from django.db.models import Q # pylint: disable=import-error, no-name-in-module
query_list = []
# If an id is specified then we add it to the query
if id is not None:
query_list.append(Q(pk=id))
# If an email is specified then we add it to the query
if email is not None:
query_list.append(Q(email=email))
if not query_list:
dbusers = DbUser.objects.all()
else:
dbusers = DbUser.objects.filter(functools.reduce(operator.and_, query_list))
found_users = []
for dbuser in dbusers:
found_users.append(self.from_dbmodel(dbuser))
return found_users
| 33.168142 | 109 | 0.614194 |
79407365ae4b2eb51f5957deb8dbfd16903b9e02 | 7,825 | py | Python | hermes/models.py | DemocracyClub/django-hermes | 233bc41dd1c9c0da05639213a1fb5c5a638e6014 | [
"MIT"
] | 2 | 2017-02-22T11:20:23.000Z | 2017-04-27T05:33:15.000Z | hermes/models.py | DemocracyClub/django-hermes | 233bc41dd1c9c0da05639213a1fb5c5a638e6014 | [
"MIT"
] | 7 | 2017-02-04T18:29:58.000Z | 2022-03-12T00:58:05.000Z | hermes/models.py | DemocracyClub/django-hermes | 233bc41dd1c9c0da05639213a1fb5c5a638e6014 | [
"MIT"
] | null | null | null | import os
import operator
from functools import reduce
from django.contrib.postgres.fields import ArrayField
try:
from itertools import ifilter as filter
except:
pass # Must be python 3
from django.conf import settings as django_settings
from django.db import models
from django.urls import reverse
from django.utils.text import Truncator, slugify
from django.utils.translation import gettext as _
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from . import settings
class TimestampedModel(models.Model):
created_on = models.DateTimeField(_('created on'), auto_now_add=True)
modified_on = models.DateTimeField(_('modified on'), auto_now=True)
class Meta:
abstract = True
class CategoryManager(models.Manager):
def children_of(self, category, categories=None):
if categories is None:
categories = list(self.all())
children = list(filter(lambda c: c.parent == category, categories))
for child in children:
children.extend(self.children_of(child, categories))
return children
class Category(models.Model):
title = models.CharField(_('title'), max_length=100)
parent = models.ForeignKey('self', blank=True, null=True, on_delete=models.CASCADE)
slug = models.CharField(blank=True, default='', max_length=500, db_index=True)
objects = CategoryManager()
class Meta:
verbose_name = u'category'
verbose_name_plural = u'categories'
def save(self, *args, **kwargs):
self.slug = self._generate_slug()
super(Category, self).save(*args, **kwargs)
def __unicode__(self):
return u" > ".join([category.title for category in self.hierarchy()])
def __str__(self):
return self.__unicode__()
def get_absolute_url(self):
return reverse('hermes_category_post_list', kwargs={'slug': self.slug})
def _generate_slug(self):
return "/".join([slugify(category.title) for category in self.hierarchy()]).lower()
@property
def is_root(self):
""" Returns True if this category has no parent. """
return self.parent is None
def parents(self):
""" Returns a list of all the current category's parents."""
parents = []
if self.parent is None:
return []
category = self
while category.parent is not None:
parents.append(category.parent)
category = category.parent
return parents[::-1]
def hierarchy(self):
return self.parents() + [self]
def root_parent(self, category=None):
""" Returns the topmost parent of the current category. """
return next(filter(lambda c: c.is_root, self.hierarchy()))
class PostQuerySet(models.query.QuerySet):
def by(self, author):
return self.filter(author__username=author)
def in_category(self, category_slug):
category = Category.objects.get(slug=category_slug)
children = Category.objects.children_of(category)
return self.filter(category__in=[category] + children)
def created_on(self, year=None, month=None, day=None):
clauses = []
if year:
clauses.append(models.Q(created_on__year=year))
if month:
clauses.append(models.Q(created_on__month=month))
if day:
clauses.append(models.Q(created_on__day=day))
return self.filter(reduce(operator.__and__, clauses))
def recent(self, limit=None):
queryset = self.published()
if limit:
queryset = queryset[:limit]
return queryset
def random(self, limit=None):
queryset = self.recent().order_by('?')
if limit:
queryset = queryset[:limit]
return queryset
def published(self):
return self.filter(is_published=True)
def for_tag(self, tag):
return self.filter(tags__contains=[tag])
class PostManager(models.Manager):
def get_queryset(self):
return PostQuerySet(self.model, using=self._db)
def in_category(self, category_slug):
return self.get_queryset().in_category(category_slug)
def created_on(self, year=None, month=None, day=None):
return self.get_queryset().created_on(year=year, month=month, day=day)
def recent(self, limit=None):
return self.get_queryset().recent(limit=limit)
def random(self, limit=None):
return self.get_queryset().random(limit=limit)
def published(self):
return self.get_queryset().published()
def by(self, author):
return self.get_queryset().by(author)
def for_tag(self, tag):
return self.get_queryset().for_tag(tag)
def post_hero_upload_to(instance, filename):
filename = os.path.split(filename)[-1]
filename, extension = os.path.splitext(filename)
extension = extension[1:]
return "hermes/heroes/{slug}_{filename}_hero.{extension}".format(
slug=instance.slug[:40],
filename=filename[:30],
extension=extension
)
class Post(TimestampedModel):
is_published = models.BooleanField(default=False)
hero = models.ImageField(_('hero'), upload_to=post_hero_upload_to, blank=True, null=True)
subject = models.CharField(_('subject'), max_length=100)
slug = models.SlugField(_('slug'), max_length=100)
summary = models.TextField(_('summary'), blank=True, null=True)
body = models.TextField(_('body'))
category = models.ForeignKey(Category, on_delete=models.CASCADE)
author = models.ForeignKey(django_settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
tags = ArrayField(models.CharField(max_length=30), blank=True, default=list)
objects = PostManager()
class Meta:
ordering = ('-created_on', )
get_latest_by = 'modified_on'
def __unicode__(self):
return self.subject
def __str__(self):
return self.__unicode__()
def get_absolute_url(self):
return reverse('hermes_post_detail', kwargs={
'year': self.created_on.year,
'month': self.created_on.strftime('%m'),
'day': self.created_on.strftime('%d'),
'slug': self.slug,
})
@property
def short(self):
if self.summary:
return self.rendered_summary
else:
return Truncator(
self.rendered
).words(
settings.HERMES_SHORT_TRUNCATE_WORDS,
truncate='…',
html=True,
)
@property
def rendered_summary(self):
return self._rendered_attribute('summary')
@property
def rendered(self):
return self._rendered_attribute('body')
def _rendered_attribute(self, attr_name):
attr_value = getattr(self, attr_name)
if settings.MARKUP_RENDERER:
return settings.MARKUP_RENDERER(attr_value)
else:
return attr_value
@property
def reading_time(self):
time = (self.summary.count(' ') + self.body.count(' ')) / 300
if time == 0:
time = 1
return time
def postfile_upload_to(instance, filename):
return "uploads/hermes/{article}/{filename}".format(
article=instance.pk,
filename=filename
)
class PostFile(models.Model):
post = models.ForeignKey(Post, related_name='files', on_delete=models.CASCADE)
f = models.FileField(upload_to=postfile_upload_to)
class Meta:
verbose_name = "PostFile"
verbose_name_plural = "PostFiles"
def __unicode__(self):
return u'File for {}'.format(self.post)
def __str__(self):
return self.__unicode__()
@receiver(pre_delete, sender=PostFile)
def postfile_delete(sender, instance, **kwargs):
instance.f.delete(False)
| 28.558394 | 93 | 0.65278 |
79407386557c663fbc4892ac56efa90f58064a52 | 35 | py | Python | Database/Setup Database/File.py | a84885640/Python | 5984f8384abfdbfc83470d1d93b7430297fa654b | [
"Unlicense"
] | 19 | 2018-09-06T01:57:22.000Z | 2022-03-23T04:12:12.000Z | Database/Setup Database/File.py | a84885640/Python | 5984f8384abfdbfc83470d1d93b7430297fa654b | [
"Unlicense"
] | null | null | null | Database/Setup Database/File.py | a84885640/Python | 5984f8384abfdbfc83470d1d93b7430297fa654b | [
"Unlicense"
] | 43 | 2018-08-02T11:01:11.000Z | 2022-01-03T13:37:27.000Z | #!/usr/bin/python3
import pymysql
| 8.75 | 18 | 0.742857 |
794074112b796b2ec05a0c9ad978de352229ddbb | 2,075 | py | Python | ledgerblue/mcuBootloader.py | DarthJahus/blue-loader-python | a40c61fc74e1e7236c96516308f26e9776e092e3 | [
"Apache-2.0"
] | 110 | 2016-09-15T06:32:56.000Z | 2022-03-30T19:44:01.000Z | ledgerblue/mcuBootloader.py | DarthJahus/blue-loader-python | a40c61fc74e1e7236c96516308f26e9776e092e3 | [
"Apache-2.0"
] | 53 | 2016-04-17T18:16:39.000Z | 2022-03-25T20:37:04.000Z | ledgerblue/mcuBootloader.py | DarthJahus/blue-loader-python | a40c61fc74e1e7236c96516308f26e9776e092e3 | [
"Apache-2.0"
] | 86 | 2016-04-14T21:08:13.000Z | 2022-03-24T09:13:51.000Z | """
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
import argparse
def get_argparser():
parser = argparse.ArgumentParser(description="Request the MCU to execute its bootloader.")
parser.add_argument("--targetId", help="The device's target ID (default is Ledger Blue)", type=auto_int)
parser.add_argument("--rootPrivateKey", help="""The Signer private key used to establish a Secure Channel (otherwise
a random one will be generated)""")
parser.add_argument("--apdu", help="Display APDU log", action='store_true')
return parser
def auto_int(x):
return int(x, 0)
if __name__ == '__main__':
from .ecWrapper import PrivateKey
from .comm import getDongle
from .deployed import getDeployedSecretV1, getDeployedSecretV2
from .hexLoader import HexLoader
import binascii
import sys
args = get_argparser().parse_args()
if args.targetId == None:
args.targetId = 0x31000002
if args.rootPrivateKey == None:
privateKey = PrivateKey()
publicKey = binascii.hexlify(privateKey.pubkey.serialize(compressed=False))
print("Generated random root public key : %s" % publicKey)
args.rootPrivateKey = privateKey.serialize()
dongle = getDongle(args.apdu)
secret = getDeployedSecretV2(dongle, bytearray.fromhex(args.rootPrivateKey), args.targetId)
loader = HexLoader(dongle, 0xe0, True, secret)
loader.exchange(0xE0, 0, 0, 0, loader.encryptAES(b'\xB0'));
| 37.053571 | 117 | 0.69253 |
7940752130368d4b41073f8590f6edc4cae1c94f | 503 | py | Python | WebGUI/DjangoAPI/myapi/urls.py | Twinparadox/DeepLearning | 2746c22e987bb509eaa8257744f0d5248a1f1264 | [
"MIT"
] | null | null | null | WebGUI/DjangoAPI/myapi/urls.py | Twinparadox/DeepLearning | 2746c22e987bb509eaa8257744f0d5248a1f1264 | [
"MIT"
] | null | null | null | WebGUI/DjangoAPI/myapi/urls.py | Twinparadox/DeepLearning | 2746c22e987bb509eaa8257744f0d5248a1f1264 | [
"MIT"
] | null | null | null | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'samples', views.SampleViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('status/', views.approvereject),
path('api/', include(router.urls)),
] | 33.533333 | 82 | 0.735586 |
7940755c2d2f568b40c5466ab1dd341cb3f6d151 | 888 | py | Python | src/environments/_test_env/test_env.py | mad-rl/mad-rl-framework | 61de0458e22a95ccec8f52a56ee7ab331641436f | [
"MIT"
] | 6 | 2019-06-13T10:49:29.000Z | 2020-07-12T11:13:23.000Z | src/environments/_test_env/test_env.py | mad-rl/mad-rl-framework | 61de0458e22a95ccec8f52a56ee7ab331641436f | [
"MIT"
] | 17 | 2019-06-15T15:43:23.000Z | 2022-03-11T23:51:19.000Z | src/environments/_test_env/test_env.py | mad-rl/mad-rl-framework | 61de0458e22a95ccec8f52a56ee7ab331641436f | [
"MIT"
] | null | null | null | '''
This is a Mock Environment only for testing purposes.
This Mock Environment implements the very mandatory methods that an RL environment should has.
Usually you will use an Environment like OpenAI Gym which has almost the same main methods.
'''
class Test_Env():
def __init__(self):
self.action_space = [1]
self.next_observation = []
self.reward = 0
self.game_finished = False
def next_step(self, action):
self.next_observation.append(1)
if len(self.next_observation) > 100:
self.game_finished = True
self.reward = self.reward + 0.01
return self.next_observation, self.reward, self.game_finished
def reset(self):
self.next_observation = []
self.reward = 0
self.game_finished = False
def get_observation(self):
return self.next_observation
| 30.62069 | 98 | 0.657658 |
794078d14ef5cbdf119d9abc9859774f028ee8d1 | 341 | py | Python | grr/server/grr_response_server/gui/local/local.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/gui/local/local.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/gui/local/local.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | 1 | 2020-07-09T01:08:48.000Z | 2020-07-09T01:08:48.000Z | #!/usr/bin/env python
"""Additional (user-specific) UI logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr.core.grr_response_server.lib import registry
class LocalGuiInitHook(registry.InitHook):
"""User-specific init logic."""
def RunOnce(self):
pass
| 22.733333 | 53 | 0.780059 |
794078e29f74ffea207d61a0ca65bf8121e8db19 | 2,693 | py | Python | custom_components/gtasks/sensor.py | kubeeapp/kubee-gtasks | 77d8c69c5b6c72f5d007472417a204d0a2b39a0b | [
"Apache-2.0"
] | null | null | null | custom_components/gtasks/sensor.py | kubeeapp/kubee-gtasks | 77d8c69c5b6c72f5d007472417a204d0a2b39a0b | [
"Apache-2.0"
] | null | null | null | custom_components/gtasks/sensor.py | kubeeapp/kubee-gtasks | 77d8c69c5b6c72f5d007472417a204d0a2b39a0b | [
"Apache-2.0"
] | null | null | null | """Sensor platform for Gtasks."""
from homeassistant.helpers.entity import Entity
from datetime import timedelta, date
from .const import (
ATTRIBUTION,
DEFAULT_NAME,
DOMAIN_DATA,
ICON,
DOMAIN,
SENSOR_UNIT_OF_MEASUREMENT,
)
from datetime import datetime, timedelta
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
): # pylint: disable=unused-argument
"""Setup sensor platform."""
async_add_entities([GtasksSensor(hass, discovery_info)], True)
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Setup sensor platform."""
async_add_devices([GtasksSensor(hass, {})], True)
class GtasksSensor(Entity):
"""blueprint Sensor class."""
def __init__(self, hass, config):
self.hass = hass
self.attr = {}
self._state = None
self._list = hass.data[DOMAIN_DATA]["default_list"]
self._name = '{}_{}'.format(config.get("name", DEFAULT_NAME),self._list)
async def async_update(self):
"""Update the sensor."""
# Send update "signal" to the component
await self.hass.data[DOMAIN_DATA]["client"].get_tasks()
# Get new data (if any)
task_list = self.hass.data[DOMAIN_DATA].get("tasks_list", None)
data = []
# Check the data and update the value.
if task_list is None:
self._state = self._state
else:
self._state = len(task_list)
for t in task_list:
jtask = {}
jtask["task_title"] = '{}'.format(t.title)
jtask["due_date"] = '{}'.format(t.due_date)
data.append(jtask)
# Set/update attributes
self.attr["attribution"] = ATTRIBUTION
self.attr["tasks"] = data
@property
def unique_id(self):
"""Return a unique ID to use for this sensor."""
return (
"a80f3d5b-df3d-4e38-bbb7-1025276830cd"
)
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Gtasks",
}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON
@property
def unit_of_measurement(self):
return SENSOR_UNIT_OF_MEASUREMENT
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self.attr
| 27.20202 | 80 | 0.602674 |
794078f6921994f0252f7b2a7280cb1d9e65d0be | 3,197 | py | Python | Solution_File_D-Getting_SASPy_Environment_Info.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | 2 | 2019-09-04T03:29:12.000Z | 2021-03-02T07:22:08.000Z | Solution_File_D-Getting_SASPy_Environment_Info.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | null | null | null | Solution_File_D-Getting_SASPy_Environment_Info.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | 3 | 2019-09-04T05:57:02.000Z | 2019-09-15T21:21:47.000Z | # Everything is better with friends: Executing SAS® code in Python scripts with
# SASPy, and turbocharging your SAS programming with open-source tooling
#
# Half-day class, Western Users of SAS Software (WUSS) 2019
###############################################################################
# Exercise 10: Getting SASPy Environment Info #
###############################################################################
# Lines 12-14 load modules needed for exercises and should be left as-is
from class_setup import print_with_title
from saspy import SASsession
sas = SASsession()
###############################################################################
# #
# Exercise 10. [Python w/ saspy] Get info about a SAS session #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Orginal Version
sas_submit_return_value = sas.submit('PROC PRODUCT_STATUS; RUN;')
sas_submit_log = sas_submit_return_value['LOG']
print_with_title(sas_submit_log, 'SAS log from PROC PRODUCT_STATUS:')
# Verify the output from PROC PRODUCT_STATUS is empty
print('The output from PROC PRODUCT_STATUS appears below.')
print(sas.submit('PROC PRODUCT_STATUS; RUN;')['LST'])
print('The output from PROC PRODUCT_STATUS appears above.')
# Compare the output of PROC PRODUCT_STATUS to PROC SETINIT
print(sas.submit('PROC SETINIT; RUN;')['LOG'])
# Refactor the sas.submit call to match Exercise 9
print(
sas.submit(
'''
PROC PRODUCT_STATUS;
RUN;
''',
results='TEXT'
)['LOG']
)
# Notes:
#
# 1. The SAS PRODUCT_STATUS procedure is called, and the following is printed:
# * the log returned by PROC PRODUCT_STATUS
#
# 2. As before, the sas object represents a connection to a SAS session, and
# its submit method is used to submit the PROC PRODUCT_STATUS step to the
# SAS kernel. A dictionary is returned with the following key-value pairs:
# * sas_submit_return_value['LST'] is a string comprising the results from
# executing PROC PRODUCT_STATUS, which is empty because no output is
# produced by this procedure
# * sas_submit_return_value['LOG'] is a string comprising the plain-text log
# resulting from executing PROC PRODUCT_STATUS
#
# 3. Like the Python command help('modules') gives us information about the
# Python modules available to our Python session, the PRODUCT_STATUS
# procedure gives us information about the products available in the SAS
# environment we're connected to.
#
# 4. For additional practice, try any or all of the following:
# * Verify the output from PROC PRODUCT_STATUS is empty.
# * Compare the output of PROC PRODUCT_STATUS to PROC SETINIT.
# * Refactor the sas.submit call to match Exercise 9, including
# triple-quotes (''') around the argument and embedded line breaks.
| 42.065789 | 79 | 0.59681 |
79407a05cb0099811dcbe1a2fab4b5592e9a3d32 | 6,174 | py | Python | protein_inference/benchmarking/benchmark_percolator_inference.py | MassDynamics/protein-inference | 05cc9738a3fcd074d8e6789bb24979a9837082cf | [
"MIT"
] | 4 | 2020-11-25T03:08:07.000Z | 2020-11-25T23:28:06.000Z | protein_inference/benchmarking/benchmark_percolator_inference.py | MassDynamics/protein-inference | 05cc9738a3fcd074d8e6789bb24979a9837082cf | [
"MIT"
] | null | null | null | protein_inference/benchmarking/benchmark_percolator_inference.py | MassDynamics/protein-inference | 05cc9738a3fcd074d8e6789bb24979a9837082cf | [
"MIT"
] | 1 | 2020-11-25T04:52:04.000Z | 2020-11-25T04:52:04.000Z | import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
class ProcessPercolatorInference():
def load_protein_table(self, path):
protein_table = pd.read_csv(path, sep="\t")
protein_table["peptides"] = protein_table.peptideIds.str.split(r"\s")
return protein_table[["ProteinId", "ProteinGroupId", "q-value","posterior_error_prob", "peptides"]]
def label_problem_networks(self, pns, percolator_protein_table):
'''Since the original workflow is built around percolator output,
we can assume the pn's are accurate and find proteins in the generated
network and assign the scores and groups according to this table'''
def label_problem_network(self, network, percolator_protein_table):
'''Since the original workflow is built around percolator output,
we can assume the pn's are accurate and find proteins in the generated
network and assign the scores and groups according to this table'''
for _, row in percolator_protein_table.iterrows():
# get values
protein = row["ProteinId"]
group = row["ProteinGroupId"]
q_value = row["q-value"]
PEP = row["posterior_error_prob"]
peptides = row["peptides"]
# assign
if protein in network.nodes():
network.nodes[protein]["major"] = group
network.nodes[protein]["q_value"] = q_value
network.nodes[protein]["PEP"] = PEP
for peptide in peptides:
if peptide in network.nodes():
network.nodes[peptide]["allocated"] = group
# There are probably several good reasons peptides are lost
# in our network table during preprocessing.
# except:
# print("peptide", peptide, " is not in the table")
return network
class EvaluatePercolatorInference():
'''This is only about comparign q-value's and FDR scores.
Code has been written with expectation of ground truth data in mind.'''
def plot_percolator_reprisal_predictions(self, reprisal_PI_df, percolator_PI_df,
real=False, fake=False):
comparison_df = pd.DataFrame.merge(reprisal_PI_df, percolator_PI_df,
how="outer",
left_on="protein_id",
right_on="ProteinId",
suffixes=["_rep", "_per"])
fig = go.Figure()
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], mode="lines",
line=go.scatter.Line(
color="gray", dash="dashdot"),
showlegend=False))
fig.add_trace(go.Scatter(x=[0, 1], y=[0.01, 0.01], mode="lines",
line=go.scatter.Line(
color="gray", dash="dashdot"),
showlegend=False))
fig.add_trace(go.Scatter(x=[0.01, 0.01], y=[0, 1], mode="lines",
line=go.scatter.Line(
color="gray", dash="dashdot"),
showlegend=False))
if real and fake:
tmp = comparison_df[comparison_df.protein_id.apply(
lambda x: x in fake)]
fig.add_trace(go.Scatter(x=tmp["q-value_rep"], y=tmp["q-value_per"], mode='markers',
line=dict(color="DarkRed"),
name="Entrapment (Doesn't exist)", hovertext=tmp.ProteinId))
tmp = comparison_df[comparison_df.protein_id.apply(
lambda x: x in real)]
fig.add_trace(go.Scatter(x=tmp["q-value_rep"], y=tmp["q-value_per"], mode='markers',
line=dict(color="LightSeaGreen"),
name="True Target (exists)", hovertext=tmp.ProteinId))
else:
tmp = comparison_df
fig.add_trace(go.Scatter(x=tmp["q-value_rep"], y=tmp["q-value_per"], mode='markers',
line=dict(color="DarkRed"), name="All (no ground truth give)",
hovertext=tmp.ProteinId))
fig.update_layout(title="REPRISAL - PERCOLATOR Benchmark",
xaxis_title="REPRISAL q-value",
yaxis_title="Percolator q-value")
fig.update_xaxes(range=[0, 0.2])
fig.update_yaxes(range=[0, 0.2])
fig.show()
return
def compare_perc_rep_fdr(self, perc_protein_table, rep_protein_table):
comparison_df = pd.DataFrame.merge(rep_protein_table, perc_protein_table,
how="outer",
left_on="protein_id",
right_on="ProteinId",
suffixes=["_rep", "_per"])
tmp = comparison_df.rename(columns={"FDR": "REPRISAL FDR",
"q-value": "PERCOLATOR q-value"})
fig = px.scatter(tmp, x="REPRISAL FDR", y="PERCOLATOR q-value",
hover_data=["ProteinId", "REPRISAL FDR", "PERCOLATOR q-value"])
fig.show()
def check_perc_rep_agreement(self, perc_protein_table, rep_protein_table):
comparison_df = pd.DataFrame.merge(rep_protein_table, perc_protein_table,
how="outer",
left_on="protein_id",
right_on="ProteinId",
suffixes=["_rep", "_per"])
rep_not_perc = comparison_df[(comparison_df.FDR < 0.01) & (
comparison_df["q-value"] > 0.01)]
perc_not_rep = comparison_df[(comparison_df.FDR > 0.01) & (
comparison_df["q-value"] < 0.01)]
return rep_not_perc, perc_not_rep
| 48.234375 | 107 | 0.521218 |
79407a58e267171f10bff394a3153f55e56d3621 | 4,143 | py | Python | venv/Lib/site-packages/torch/optim/adagrad.py | countBMB/BenjiRepo | 79d882263baaf2a11654ca67d2e5593074d36dfa | [
"Apache-2.0"
] | 1 | 2020-02-24T06:23:07.000Z | 2020-02-24T06:23:07.000Z | venv/Lib/site-packages/torch/optim/adagrad.py | countBMB/BenjiRepo | 79d882263baaf2a11654ca67d2e5593074d36dfa | [
"Apache-2.0"
] | 4 | 2021-06-02T00:49:27.000Z | 2022-01-13T01:59:34.000Z | venv/Lib/site-packages/torch/optim/adagrad.py | countBMB/BenjiRepo | 79d882263baaf2a11654ca67d2e5593074d36dfa | [
"Apache-2.0"
] | null | null | null | import torch
from .optimizer import Optimizer
class Adagrad(Optimizer):
"""Implements Adagrad algorithm.
It has been proposed in `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= lr_decay:
raise ValueError("Invalid lr_decay value: {}".format(lr_decay))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= initial_accumulator_value:
raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
defaults = dict(lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
super(Adagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p.data, initial_accumulator_value)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum'].sparse_mask(grad)
std_values = std._values().sqrt_().add_(group['eps'])
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(group['eps'])
p.data.addcdiv_(-clr, grad, std)
return loss
| 41.019802 | 109 | 0.562636 |
79407a79002883565f6e7eb9f81dba139fd8bebf | 2,462 | py | Python | dist/ba_data/python/ba/_asyncio.py | driftywinds/bombsquad-modded-server | 504c8b47f978508c6d06dd684536883d8c36972e | [
"MIT"
] | 1 | 2022-02-25T15:51:18.000Z | 2022-02-25T15:51:18.000Z | dist/ba_data/python/ba/_asyncio.py | driftywinds/bombsquad-modded-server | 504c8b47f978508c6d06dd684536883d8c36972e | [
"MIT"
] | null | null | null | dist/ba_data/python/ba/_asyncio.py | driftywinds/bombsquad-modded-server | 504c8b47f978508c6d06dd684536883d8c36972e | [
"MIT"
] | null | null | null | # Released under the MIT License. See LICENSE for details.
#
"""Asyncio related functionality.
Exploring the idea of allowing Python coroutines to run gracefully
besides our internal event loop. They could prove useful for networking
operations or possibly game logic.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import asyncio
if TYPE_CHECKING:
from typing import Optional
import ba
# Our timer and event loop for the ballistica game thread.
_asyncio_timer: Optional[ba.Timer] = None
_asyncio_event_loop: Optional[asyncio.AbstractEventLoop] = None
def setup_asyncio() -> None:
"""Setup asyncio functionality for our game thread."""
# pylint: disable=global-statement
import _ba
from ba._generated.enums import TimeType
assert _ba.in_game_thread()
# Create our event-loop. We don't expect there to be one
# running on this thread before we do.
try:
asyncio.get_running_loop()
print('Found running asyncio loop; unexpected.')
except RuntimeError:
pass
global _asyncio_event_loop # pylint: disable=invalid-name
_asyncio_event_loop = asyncio.new_event_loop()
# Ideally we should integrate asyncio into our C++ Thread class's
# low level event loop so that asyncio timers/sockets/etc. could
# be true first-class citizens. For now, though, we can explicitly
# pump an asyncio loop periodically which gets us a decent
# approximation of that, which should be good enough for
# all but extremely time sensitive uses.
# See https://stackoverflow.com/questions/29782377/
# is-it-possible-to-run-only-a-single-step-of-the-asyncio-event-loop
def run_cycle() -> None:
assert _asyncio_event_loop is not None
_asyncio_event_loop.call_soon(_asyncio_event_loop.stop)
_asyncio_event_loop.run_forever()
global _asyncio_timer # pylint: disable=invalid-name
_asyncio_timer = _ba.Timer(1.0 / 30.0,
run_cycle,
timetype=TimeType.REAL,
repeat=True)
if bool(False):
async def aio_test() -> None:
print('TEST AIO TASK STARTING')
assert _asyncio_event_loop is not None
assert asyncio.get_running_loop() is _asyncio_event_loop
await asyncio.sleep(2.0)
print('TEST AIO TASK ENDING')
_asyncio_event_loop.create_task(aio_test())
| 33.726027 | 72 | 0.692526 |
79407a913c46a13f3ba0bde4da9b49aaa30d60e4 | 56,592 | py | Python | pgmpy/tests/test_factors/test_discrete/test_Factor.py | daanknoope/pgmpy | b1ad5ec01837dc1fc369dd542971492fae642ab4 | [
"MIT"
] | null | null | null | pgmpy/tests/test_factors/test_discrete/test_Factor.py | daanknoope/pgmpy | b1ad5ec01837dc1fc369dd542971492fae642ab4 | [
"MIT"
] | 20 | 2019-02-22T09:24:57.000Z | 2019-02-25T14:53:54.000Z | pgmpy/tests/test_factors/test_discrete/test_Factor.py | daanknoope/pgmpy | b1ad5ec01837dc1fc369dd542971492fae642ab4 | [
"MIT"
] | null | null | null | import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as np_test
from pgmpy.extern.six.moves import range
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
from pgmpy.factors import factor_divide
from pgmpy.factors import factor_product
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = DiscreteFactor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, DiscreteFactor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = DiscreteFactor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
self.tup1 = ('x1', 'x2')
self.tup2 = ('x2', 'x3')
self.tup3 = ('x3', (1, 'x4'))
self.phi4 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], np.random.uniform(3, 10, size=24))
self.phi5 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], range(24))
self.card6 = [4, 2, 1, 3, 5, 6]
self.phi6 = DiscreteFactor([self.tup1, self.tup2, self.tup3, self.tup1 + self.tup2,
self.tup2 + self.tup3, self.tup3 + self.tup1], self.card6,
np.arange(np.prod(self.card6), dtype=np.float))
self.var1 = 'x1'
self.var2 = ('x2', 1)
self.var3 = frozenset(['x1', 'x2'])
self.phi7 = DiscreteFactor([self.var1, self.var2], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi8 = DiscreteFactor([self.var2, self.var3], [2, 2], [2, 1, 5, 6])
self.phi9 = DiscreteFactor([self.var1, self.var3], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi10 = DiscreteFactor([self.var3], [2], [3, 6])
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi4.scope(), [self.tup1, self.tup2, self.tup3])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])), [[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi4.assignment(np.array([11, 12, 23])),
[[(self.tup1, 0), (self.tup2, 2), (self.tup3, 3)],
[(self.tup1, 1), (self.tup2, 0), (self.tup3, 0)],
[(self.tup1, 1), (self.tup2, 2), (self.tup3, 3)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
self.assertRaises(IndexError, self.phi4.assignment, [2, 24])
self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
self.assertEqual(self.phi4.get_cardinality([self.tup1, self.tup3]),
{self.tup1: 2, self.tup3: 4})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2, 'x4'))])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
self.phi5.marginalize([self.tup1])
np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18],
[20, 22, 24, 26],
[28, 30, 32, 34]]))
self.phi5.marginalize([self.tup2])
np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78]))
self.phi5.marginalize([self.tup3])
np_test.assert_array_equal(self.phi5.values, np.array([276]))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')])
self.phi4.marginalize([self.tup2])
self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_mar = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality)
phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False)
np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality)
self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_normalize(self):
self.phi1.normalize()
np_test.assert_almost_equal(self.phi1.values,
np.array([[[0, 0.01515152],
[0.03030303, 0.04545455],
[0.06060606, 0.07575758]],
[[0.09090909, 0.10606061],
[0.12121212, 0.13636364],
[0.15151515, 0.16666667]]]))
self.phi5.normalize()
np_test.assert_almost_equal(self.phi5.values,
[[[0., 0.00362319, 0.00724638, 0.01086957],
[0.01449275, 0.01811594, 0.02173913, 0.02536232],
[0.02898551, 0.0326087, 0.03623188, 0.03985507]],
[[0.04347826, 0.04710145, 0.05072464, 0.05434783],
[0.05797101, 0.0615942, 0.06521739, 0.06884058],
[0.07246377, 0.07608696, 0.07971014, 0.08333333]]])
def test_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup1, 0), (self.tup3, 1)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce1(self):
self.phi1.reduce([('x2', 0), ('x1', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup3, 1), (self.tup1, 0)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce_shape(self):
values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)]
phi3_reduced = self.phi3.reduce(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality)
values = [(self.tup1, 2), (self.tup3, 0)]
phi6_reduced = self.phi6.reduce(values, inplace=False)
np_test.assert_array_equal(phi6_reduced.values.shape, phi6_reduced.cardinality)
self.phi6.reduce(values, inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_complete_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)])
np_test.assert_array_equal(self.phi1.values, np.array([1]))
np_test.assert_array_equal(self.phi1.cardinality, np.array([]))
np_test.assert_array_equal(self.phi1.variables, OrderedDict())
self.phi5.reduce([(('x1', 'x2'), 1), (('x2', 'x3'), 0), (('x3', (1, 'x4')), 3)])
np_test.assert_array_equal(self.phi5.values, np.array([15]))
np_test.assert_array_equal(self.phi5.cardinality, np.array([]))
np_test.assert_array_equal(self.phi5.variables, OrderedDict())
def test_reduce_typeerror(self):
self.assertRaises(TypeError, self.phi1.reduce, 'x10')
self.assertRaises(TypeError, self.phi1.reduce, ['x10'])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')])
self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)])
self.assertRaises(TypeError, self.phi5.reduce, [(('x1', 'x2'), 0), (('x2', 'x3'), 0.2)])
def test_reduce_scopeerror(self):
self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)])
self.assertRaises(ValueError, self.phi5.reduce, [((('x1', 0.1), 0))])
def test_reduce_sizeerror(self):
self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)])
self.assertRaises(IndexError, self.phi5.reduce, [(('x2', 'x3'), 3)])
def test_identity_factor(self):
identity_factor = self.phi.identity_factor()
self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3'])
np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2])
np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2))
identity_factor1 = self.phi5.identity_factor()
self.assertEqual(list(identity_factor1.variables), [self.tup1, self.tup2, self.tup3])
np_test.assert_array_equal(identity_factor1.cardinality, [2, 3, 4])
np_test.assert_array_equal(identity_factor1.values, np.ones(24).reshape(2, 3, 4))
def test_factor_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
prod = factor_product(self.phi7, self.phi8)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
def test_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
phi7_copy = self.phi7
phi7_copy.product(self.phi8, inplace=True)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(expected_factor, phi7_copy)
self.assertEqual(phi7_copy.variables, [self.var1, self.var2, self.var3])
def test_factor_product_non_factor_arg(self):
self.assertRaises(TypeError, factor_product, 1, 2)
def test_factor_mul(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi * phi1
sorted_vars = ['x1', 'x2', 'x3', 'x4']
for axis in range(prod.values.ndim):
exchange_index = prod.variables.index(sorted_vars[axis])
prod.variables[axis], prod.variables[exchange_index] = prod.variables[exchange_index], prod.variables[axis]
prod.values = prod.values.swapaxes(axis, exchange_index)
np_test.assert_almost_equal(prod.values.ravel(),
np.array([0, 0, 0, 0, 0, 1, 2, 3,
0, 2, 4, 6, 0, 3, 6, 9]))
self.assertEqual(prod.variables, ['x1', 'x2', 'x3', 'x4'])
def test_factor_divide(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
expected_factor = phi1.divide(phi2, inplace=False)
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, expected_factor)
self.phi9.divide(self.phi10, inplace=True)
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_truediv(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
div = phi1 / phi2
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
self.phi9 = self.phi9 / self.phi10
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_invalid(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x1'], [2], [0, 2])
div = phi1.divide(phi2, inplace=False)
np_test.assert_array_equal(div.values.ravel(), np.array([np.inf, np.inf, 1.5, 2]))
def test_factor_divide_no_common_scope(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x3'], [2], [0, 2])
self.assertRaises(ValueError, factor_divide, phi1, phi2)
phi2 = DiscreteFactor([self.var3], [2], [2, 1])
self.assertRaises(ValueError, factor_divide, self.phi7, phi2)
def test_factor_divide_non_factor_arg(self):
self.assertRaises(TypeError, factor_divide, 1, 1)
def test_eq(self):
self.assertFalse(self.phi == self.phi1)
self.assertTrue(self.phi == self.phi)
self.assertTrue(self.phi1 == self.phi1)
self.assertTrue(self.phi5 == self.phi5)
self.assertFalse(self.phi5 == self.phi6)
self.assertTrue(self.phi6 == self.phi6)
def test_eq1(self):
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 4, 3], range(24))
phi2 = DiscreteFactor(['x2', 'x1', 'x3'], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17, 6, 7,
8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi1 == phi2)
self.assertEqual(phi2.variables, ['x2', 'x1', 'x3'])
phi3 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.tup2, self.tup1, self.tup3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi3 == phi4)
def test_hash(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x2', 'x1'], [2, 2], [1, 3, 2, 4])
self.assertEqual(hash(phi1), hash(phi2))
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], range(8))
phi2 = DiscreteFactor(['x3', 'x1', 'x2'], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi1), hash(phi2))
var1 = TestHash(1, 2)
phi3 = DiscreteFactor([var1, self.var2, self.var3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.var2, var1, self.var3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertEqual(hash(phi3), hash(phi4))
var1 = TestHash(2, 3)
var2 = TestHash('x2', 1)
phi3 = DiscreteFactor([var1, var2, self.var3], [2, 2, 2], range(8))
phi4 = DiscreteFactor([self.var3, var1, var2], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi3), hash(phi4))
def test_maximize_single(self):
self.phi1.maximize(['x1'])
self.assertEqual(self.phi1, DiscreteFactor(['x2', 'x3'], [3, 2], [6, 7, 8, 9, 10, 11]))
self.phi1.maximize(['x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi2 = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
0.00, 0.00, 0.15, 0.21, 0.08, 0.18])
self.phi2.maximize(['x2'])
self.assertEqual(self.phi2, DiscreteFactor(['x1', 'x3'], [3, 2], [0.25, 0.35, 0.05,
0.07, 0.15, 0.21]))
self.phi5.maximize([('x1', 'x2')])
self.assertEqual(self.phi5, DiscreteFactor([('x2', 'x3'), ('x3', (1, 'x4'))], [3, 4],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]))
self.phi5.maximize([('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_list(self):
self.phi1.maximize(['x1', 'x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi5.maximize([('x1', 'x2'), ('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.maximize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
phi = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2], [3, 2, 4, 5, 9, 8, 3, 2, 4, 5, 9, 8])
phi_max = phi.marginalize([self.var1, self.var2], inplace=False)
np_test.assert_array_equal(phi_max.values.shape, phi_max.cardinality)
def test_maximize_scopeerror(self):
self.assertRaises(ValueError, self.phi.maximize, ['x10'])
def test_maximize_typeerror(self):
self.assertRaises(TypeError, self.phi.maximize, 'x1')
def tearDown(self):
del self.phi
del self.phi1
del self.phi2
del self.phi3
del self.phi4
del self.phi5
del self.phi6
del self.phi7
del self.phi8
del self.phi9
del self.phi10
class TestHash:
# Used to check the hash function of DiscreteFactor class.
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(str(self.x) + str(self.y))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.x == other.x and self.y == other.y
class TestTabularCPDInit(unittest.TestCase):
def test_cpd_init(self):
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1]])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
self.assertEqual(list(cpd.variables), ['grade'])
np_test.assert_array_equal(cpd.cardinality, np.array([3]))
np_test.assert_array_almost_equal(cpd.values, np.array([0.1, 0.1, 0.1]))
values = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]
evidence = ['intel', 'diff']
evidence_card = [3, 2]
valid_value_inputs = [values, np.asarray(values)]
valid_evidence_inputs = [evidence, set(evidence), np.asarray(evidence)]
valid_evidence_card_inputs = [evidence_card, np.asarray(evidence_card)]
for value in valid_value_inputs:
for evidence in valid_evidence_inputs:
for evidence_card in valid_evidence_card_inputs:
cpd = TabularCPD('grade', 3, values, evidence=['intel', 'diff'], evidence_card=[3, 2])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'intel', 'diff'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8]).reshape(3, 3, 2))
cpd = TabularCPD('grade', 3, [[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]],
evidence=['evi1'], evidence_card=[2.0])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'evi1'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1,
0.1, 0.1,
0.8, 0.8]).reshape(3, 2))
def test_cpd_init_event_card_not_int(self):
self.assertRaises(TypeError, TabularCPD, 'event', '2', [[0.1, 0.9]])
def test_cpd_init_cardinality_not_specified(self):
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5.0])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1'], [5, 6])
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
'evi1', [5, 6])
def test_cpd_init_value_not_2d(self):
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]],
['evi1', 'evi2'], [5, 6])
class TestTabularCPDMethods(unittest.TestCase):
def setUp(self):
sn = {'intel': ['low', 'medium', 'high'],
'diff': ['low', 'high'],
'grade' : ['grade(0)', 'grade(1)', 'grade(2)', 'grade(3)', 'grade(4)', 'grade(5)']}
self.cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2], state_names = sn)
self.cpd2 = TabularCPD('J', 2, [[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]],
evidence=['A', 'B', 'C'], evidence_card=[2, 2, 2])
def test_marginalize_1(self):
self.cpd.marginalize(['diff'])
self.assertEqual(self.cpd.variable, 'grade')
self.assertEqual(self.cpd.variable_card, 3)
self.assertListEqual(list(self.cpd.variables), ['grade', 'intel'])
np_test.assert_array_equal(self.cpd.cardinality, np.array([3, 3]))
np_test.assert_array_equal(self.cpd.values.ravel(), np.array([0.1, 0.1, 0.1,
0.1, 0.1, 0.1,
0.8, 0.8, 0.8]))
def test_marginalize_2(self):
self.assertRaises(ValueError, self.cpd.marginalize, ['grade'])
def test_marginalize_3(self):
copy_cpd = self.cpd.copy()
copy_cpd.marginalize(['intel', 'diff'])
self.cpd.marginalize(['intel'])
self.cpd.marginalize(['diff'])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_normalize(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize()
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_not_in_place(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
np_test.assert_array_almost_equal(cpd_un_normalized.normalize(inplace=False).values,
np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_original_safe(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize(inplace=False)
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.7, 0.2], [0.6, 0.2]],
[[0.4, 0.4], [0.4, 0.8]]]))
def test__repr__(self):
grade_cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
diff_cpd = TabularCPD('grade', 3, [[0.1, 0.1], [0.1, 0.1], [0.8, 0.8]], evidence=['diff'], evidence_card=[2])
self.assertEqual(repr(grade_cpd), '<TabularCPD representing P(grade:3 | intel:3, diff:2) at {address}>'
.format(address=hex(id(grade_cpd))))
self.assertEqual(repr(intel_cpd), '<TabularCPD representing P(intel:3) at {address}>'
.format(address=hex(id(intel_cpd))))
self.assertEqual(repr(diff_cpd), '<TabularCPD representing P(grade:3 | diff:2) at {address}>'
.format(address=hex(id(diff_cpd))))
def test_copy(self):
copy_cpd = self.cpd.copy()
np_test.assert_array_equal(self.cpd.get_values(), copy_cpd.get_values())
def test_copy_original_safe(self):
copy_cpd = self.cpd.copy()
copy_cpd.reorder_parents(['diff', 'intel'])
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_copy_state_names(self):
copy_cpd = self.cpd.copy()
self.assertEqual(self.cpd.state_names, copy_cpd.state_names)
def test_reduce_1(self):
self.cpd.reduce([('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.8, 0.8, 0.8]]))
def test_reduce_2(self):
self.cpd.reduce([('intel', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]]))
def test_reduce_3(self):
self.cpd.reduce([('intel', 0), ('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1],
[0.1],
[0.8]]))
def test_reduce_4(self):
self.assertRaises(ValueError, self.cpd.reduce, [('grade', 0)])
def test_reduce_5(self):
copy_cpd = self.cpd.copy()
copy_cpd.reduce([('intel', 2), ('diff', 1)])
self.cpd.reduce([('intel', 2)])
self.cpd.reduce([('diff', 1)])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_get_values(self):
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reorder_parents_inplace(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents_no_effect(self):
self.cpd2.reorder_parents(['C', 'A', 'B'], inplace=False)
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def test_reorder_parents_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cpd2.reorder_parents(['A', 'B', 'C'], inplace=False)
assert("Same ordering provided as current" in str(w[-1].message))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def tearDown(self):
del self.cpd
class TestJointProbabilityDistributionInit(unittest.TestCase):
def test_jpd_init(self):
jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12) / 12)
np_test.assert_array_equal(jpd.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(jpd.values, np.ones(12).reshape(2, 3, 2) / 12)
self.assertEqual(jpd.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 3, 'x3': 2})
def test_jpd_init_exception(self):
self.assertRaises(ValueError, JPD, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
class TestJointProbabilityDistributionMethods(unittest.TestCase):
def setUp(self):
self.jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd1 = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd2 = JPD(['x1', 'x2', 'x3'], [2, 2, 3],
[0.126, 0.168, 0.126, 0.009, 0.045, 0.126, 0.252, 0.0224, 0.0056, 0.06, 0.036, 0.024])
self.jpd3 = JPD(['x1', 'x2', 'x3'], [2, 2, 2],
[5.0e-04, 5.225e-04, 0.00, 8.9775e-03, 9.9e-03, 5.39055e-02, 0.00, 9.261945e-01])
def test_jpd_marginal_distribution_list(self):
self.jpd.marginal_distribution(['x1', 'x2'])
np_test.assert_array_almost_equal(self.jpd.values,
np.array([[0.16666667, 0.16666667, 0.16666667],
[0.16666667, 0.16666667, 0.16666667]]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2, 3]))
dic = {'x1': 2, 'x2': 3}
self.assertEqual(self.jpd.get_cardinality(['x1', 'x2']), dic)
self.assertEqual(self.jpd.scope(), ['x1', 'x2'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution(['x1', 'x2'], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(new_jpd == self.jpd)
def test_marginal_distribution_str(self):
self.jpd.marginal_distribution('x1')
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x1'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution('x1', inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_conditional_distribution_list(self):
self.jpd = self.jpd1.copy()
self.jpd.conditional_distribution([('x1', 1), ('x2', 0)])
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x3'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.conditional_distribution([('x1', 1), ('x2', 0)], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_check_independence(self):
self.assertTrue(self.jpd2.check_independence(['x1'], ['x2']))
self.assertRaises(TypeError, self.jpd2.check_independence, 'x1', ['x2'])
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], 'x2')
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], ['x2'], 'x3')
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], [('x3', 0)]))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], [('x3', 1)]))
self.assertTrue(self.jpd3.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
def test_get_independencies(self):
independencies = Independencies(['x1', 'x2'], ['x2', 'x3'], ['x3', 'x1'])
independencies1 = Independencies(['x1', 'x2'])
self.assertEqual(self.jpd1.get_independencies(), independencies)
self.assertEqual(self.jpd2.get_independencies(), independencies1)
self.assertEqual(self.jpd1.get_independencies([('x3', 0)]), independencies1)
self.assertEqual(self.jpd2.get_independencies([('x3', 0)]), Independencies())
def test_minimal_imap(self):
bm = self.jpd1.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(sorted(bm.edges()), sorted([('x1', 'x3'), ('x2', 'x3')]))
bm = self.jpd1.minimal_imap(order=['x2', 'x3', 'x1'])
self.assertEqual(sorted(bm.edges()), sorted([('x2', 'x1'), ('x3', 'x1')]))
bm = self.jpd2.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(list(bm.edges()), [])
bm = self.jpd2.minimal_imap(order=['x1', 'x2'])
self.assertEqual(list(bm.edges()), [])
def test_repr(self):
self.assertEqual(repr(self.jpd1), '<Joint Distribution representing P(x1:2, x2:3, x3:2) at {address}>'.format(
address=hex(id(self.jpd1))))
def test_is_imap(self):
G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3,
[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'],
evidence_card=[2, 3])
G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
jpd = JPD(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(jpd.is_imap(G1))
self.assertRaises(TypeError, jpd.is_imap, MarkovModel())
def tearDown(self):
del self.jpd
del self.jpd1
del self.jpd2
del self.jpd3
#
# class TestTreeCPDInit(unittest.TestCase):
# def test_init_single_variable_nodes(self):
# tree = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), 0),
# ('B', 'C', 1),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), 0),
# ('C', 'D', 1),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1)])
#
# self.assertTrue('B' in tree.nodes())
# self.assertTrue('C' in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.nodes())
#
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.edges()))
# self.assertTrue(('C', 'D') in tree.edges())
# self.assertTrue(('B', 'C') in tree.edges())
#
# self.assertEqual(tree['B'][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], 0)
# self.assertEqual(tree['B']['C']['label'], 1)
# self.assertEqual(tree['C'][DiscreteFactor(['A'], [2], [0.1, 0.9])]['label'], 0)
# self.assertEqual(tree['C']['D']['label'], 1)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.4, 0.6])]['label'], 1)
#
# self.assertRaises(ValueError, tree.add_edges_from, [('F', 'G')])
#
# def test_init_self_loop(self):
# self.assertRaises(ValueError, TreeCPD, [('B', 'B', 0)])
#
# def test_init_cycle(self):
# self.assertRaises(ValueError, TreeCPD, [('A', 'B', 0), ('B', 'C', 1), ('C', 'A', 0)])
#
# def test_init_multi_variable_nodes(self):
# tree = TreeCPD([(('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]), (0, 0)),
# (('B', 'C'), 'D', (0, 1)),
# (('B', 'C'), DiscreteFactor(['A'], [2], [0.1, 0.9]), (1, 0)),
# (('B', 'C'), 'E', (1, 1)),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1),
# ('E', DiscreteFactor(['A'], [2], [0.3, 0.7]), 0),
# ('E', DiscreteFactor(['A'], [2], [0.8, 0.2]), 1)
# ])
#
# self.assertTrue(('B', 'C') in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue('E' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
#
# self.assertTrue((('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue((('B', 'C'), 'E') in tree.edges())
# self.assertTrue(('D', DiscreteFactor(['A'], [2], [0.4, 0.6])) in tree.edges())
# self.assertTrue(('E', DiscreteFactor(['A'], [2], [0.8, 0.2])) in tree.edges())
#
# self.assertEqual(tree[('B', 'C')][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], (0, 0))
# self.assertEqual(tree[('B', 'C')]['D']['label'], (0, 1))
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['E'][DiscreteFactor(['A'], [2], [0.3, 0.7])]['label'], 0)
#
#
# class TestTreeCPD(unittest.TestCase):
# def setUp(self):
# self.tree1 = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), '0'),
# ('B', 'C', '1'),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), '0'),
# ('C', 'D', '1'),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), '0'),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), '1')])
#
# self.tree2 = TreeCPD([('C','A','0'),('C','B','1'),
# ('A', DiscreteFactor(['J'], [2], [0.9, 0.1]), '0'),
# ('A', DiscreteFactor(['J'], [2], [0.3, 0.7]), '1'),
# ('B', DiscreteFactor(['J'], [2], [0.8, 0.2]), '0'),
# ('B', DiscreteFactor(['J'], [2], [0.4, 0.6]), '1')])
#
# def test_add_edge(self):
# self.tree1.add_edge('yolo', 'yo', 0)
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
#
# def test_add_edges_from(self):
# self.tree1.add_edges_from([('yolo', 'yo', 0), ('hello', 'world', 1)])
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes() and
# 'hello' in self.tree1.nodes() and 'world' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertTrue(('hello', 'world') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
# self.assertEqual(self.tree1['hello']['world']['label'], 1)
#
# def test_to_tabular_cpd(self):
# tabular_cpd = self.tree1.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'B', 'C', 'D'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.8, 0.8, 0.8, 0.1, 0.1, 0.9, 0.4,
# 0.2, 0.2, 0.2, 0.2, 0.9, 0.9, 0.1, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['A', 'B', 'C'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['J', 'C', 'B', 'A'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([ 0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4,
# 0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]))
#
# @unittest.skip('Not implemented yet')
# def test_to_tabular_cpd_parent_order(self):
# tabular_cpd = self.tree1.to_tabular_cpd('A', parents_order=['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'D', 'C', 'B'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.1, 0.8, 0.9, 0.8, 0.1, 0.8, 0.4,
# 0.2, 0.9, 0.2, 0.1, 0.2, 0.9, 0.2, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd('A', parents_order=['E', 'D', 'C', 'B'])
#
# @unittest.skip('Not implemented yet')
# def test_to_rule_cpd(self):
# rule_cpd = self.tree1.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_1', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_!', 'C_1', 'D_1'): 0.6})
#
# rule_cpd = self.tree2.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2, 'E': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D', 'E'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0', 'C_0'): 0.8,
# ('A_1', 'B_0', 'C_0'): 0.2,
# ('A_0', 'B_0', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_0', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_0', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_0', 'C_1', 'D_1'): 0.6,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_1', 'B_1', 'C_0'): 0.9,
# ('A_0', 'B_1', 'C_1', 'E_0'): 0.3,
# ('A_1', 'B_1', 'C_1', 'E_0'): 0.7,
# ('A_0', 'B_1', 'C_1', 'E_1'): 0.8,
# ('A_1', 'B_1', 'C_1', 'E_1'): 0.2})
#
#
# class TestRuleCPDInit(unittest.TestCase):
# def test_init_without_errors_rules_none(self):
# rule_cpd = RuleCPD('A')
# self.assertEqual(rule_cpd.variable, 'A')
#
# def test_init_without_errors_rules_not_none(self):
# rule_cpd = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
# def test_init_with_errors(self):
# self.assertRaises(ValueError, RuleCPD, 'A', {('A_0',): 0.5,
# ('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
#
# class TestRuleCPDMethods(unittest.TestCase):
# def setUp(self):
# self.rule_cpd_with_rules = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6})
# self.rule_cpd_without_rules = RuleCPD('A')
#
# def test_add_rules_single(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_multiple(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_error(self):
# self.assertRaises(ValueError, self.rule_cpd_with_rules.add_rules, {('A_0',): 0.8})
#
# def test_scope(self):
# self.assertEqual(self.rule_cpd_with_rules.scope(), {'A', 'B', 'C'})
# self.assertEqual(self.rule_cpd_without_rules.scope(), set())
#
# def test_cardinality(self):
# self.assertEqual(self.rule_cpd_with_rules.cardinality(), {'A': 2, 'B': 2, 'C': 1})
# self.assertEqual(self.rule_cpd_without_rules.cardinality(), {})
#
# def tearDown(self):
# del self.rule_cpd_without_rules
#
| 53.74359 | 119 | 0.493232 |
79407aec16063aa8bec53b5b2c395fc6cefff38e | 4,852 | py | Python | qa/rpc-tests/httpbasics.py | proteanx/DeVault-Core | b866989b33f64fa462e0516933fecc681fa57f92 | [
"MIT"
] | null | null | null | qa/rpc-tests/httpbasics.py | proteanx/DeVault-Core | b866989b33f64fa462e0516933fecc681fa57f92 | [
"MIT"
] | null | null | null | qa/rpc-tests/httpbasics.py | proteanx/DeVault-Core | b866989b33f64fa462e0516933fecc681fa57f92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import DeVaultTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (DeVaultTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = True
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because devaultd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 42.561404 | 108 | 0.63211 |
79407b2d222d26d99f7feaf5c7379ab57be629c6 | 2,404 | py | Python | cirtorch/enhance/color/hsv.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | 3 | 2021-01-15T13:58:22.000Z | 2021-01-22T00:03:34.000Z | cirtorch/enhance/color/hsv.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | null | null | null | cirtorch/enhance/color/hsv.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | fcad9af4f558bebb3cbec1d08e49603a452f439d | [
"BSD-3-Clause"
] | null | null | null | import math
import torch
import torch.nn as nn
def rgb_to_hsv(image, eps=1e-6):
"""
Convert an image from RGB to HSV.
The image data is assumed to be in the range of (0, 1).
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
maxc, _ = image.max(-3)
maxc_mask = image == maxc.unsqueeze(-3)
_, max_indices = ((maxc_mask.cumsum(-3) == 1) & maxc_mask).max(-3)
minc = image.min(-3)[0]
v = maxc # brightness
deltac = maxc - minc
s = deltac / (v + eps)
# avoid division by zero
deltac = torch.where(deltac == 0, torch.ones_like(deltac, device=deltac.device, dtype=deltac.dtype), deltac)
maxc_tmp = maxc.unsqueeze(-3) - image
rc = maxc_tmp[..., 0, :, :]
gc = maxc_tmp[..., 1, :, :]
bc = maxc_tmp[..., 2, :, :]
h = torch.stack([
bc - gc,
2.0 * deltac + rc - bc,
4.0 * deltac + gc - rc,
], dim=-3)
h = torch.gather(h, dim=-3, index=max_indices[..., None, :, :])
h = h.squeeze(-3)
h = h / deltac
h = (h / 6.0) % 1.0
h = 2 * math.pi * h
return torch.stack([h, s, v], dim=-3)
def hsv_to_rgb(image):
"""
Convert an image from HSV to RGB.
The image data is assumed to be in the range of (0, 1).
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
h = image[..., 0, :, :] / (2 * math.pi)
s = image[..., 1, :, :]
v = image[..., 2, :, :]
hi = torch.floor(h * 6) % 6
f = ((h * 6) % 6) - hi
one = torch.tensor(1.).to(image.device)
p = v * (one - s)
q = v * (one - f * s)
t = v * (one - (one - f) * s)
hi = hi.long()
indices = torch.stack([hi, hi + 6, hi + 12], dim=-3)
out = torch.stack((
v, q, p, p, t, v,
t, v, v, q, p, p,
p, p, t, v, v, q,
), dim=-3)
out = torch.gather(out, -3, indices)
return out | 25.574468 | 112 | 0.505408 |
79407b391210e62b59481b6a88d7fc79a440a95f | 7,143 | py | Python | core/active/build.py | BIT-DA/RIPU | 125edf112c9ded1e7497aedb2a092331824df100 | [
"MIT"
] | 9 | 2022-03-13T06:53:04.000Z | 2022-03-31T11:28:09.000Z | core/active/build.py | BinhuiXie/RIPU | 125edf112c9ded1e7497aedb2a092331824df100 | [
"MIT"
] | null | null | null | core/active/build.py | BinhuiXie/RIPU | 125edf112c9ded1e7497aedb2a092331824df100 | [
"MIT"
] | 5 | 2022-03-18T07:18:13.000Z | 2022-03-31T11:28:11.000Z | import math
import torch
import numpy as np
import torch.nn.functional as F
from PIL import Image
from tqdm import tqdm
from .floating_region import FloatingRegionScore
from .spatial_purity import SpatialPurity
def PixelSelection(cfg, feature_extractor, classifier, tgt_epoch_loader):
feature_extractor.eval()
classifier.eval()
active_pixels = math.ceil(cfg.ACTIVE.PIXELS / len(cfg.ACTIVE.SELECT_ITER) / (1280 * 640) * (2048 * 1024))
calculate_purity = SpatialPurity(in_channels=cfg.MODEL.NUM_CLASSES, size=2 * cfg.ACTIVE.RADIUS_K + 1).cuda()
mask_radius = cfg.ACTIVE.RADIUS_K
with torch.no_grad():
for tgt_data in tqdm(tgt_epoch_loader):
tgt_input, path2mask = tgt_data['img'], tgt_data['path_to_mask']
origin_mask, origin_label = tgt_data['origin_mask'], tgt_data['origin_label']
origin_size = tgt_data['size']
active_indicator = tgt_data['active']
selected_indicator = tgt_data['selected']
path2indicator = tgt_data['path_to_indicator']
tgt_input = tgt_input.cuda(non_blocking=True)
tgt_size = tgt_input.shape[-2:]
tgt_feat = feature_extractor(tgt_input)
tgt_out = classifier(tgt_feat, size=tgt_size)
for i in range(len(origin_mask)):
active_mask = origin_mask[i].cuda(non_blocking=True)
ground_truth = origin_label[i].cuda(non_blocking=True)
size = (origin_size[i][0], origin_size[i][1])
active = active_indicator[i]
selected = selected_indicator[i]
output = tgt_out[i:i + 1, :, :, :]
output = F.interpolate(output, size=size, mode='bilinear', align_corners=True)
output = output.squeeze(dim=0)
p = torch.softmax(output, dim=0)
entropy = torch.sum(-p * torch.log(p + 1e-6), dim=0)
pseudo_label = torch.argmax(p, dim=0)
one_hot = F.one_hot(pseudo_label, num_classes=cfg.MODEL.NUM_CLASSES).float()
one_hot = one_hot.permute((2, 0, 1)).unsqueeze(dim=0)
purity = calculate_purity(one_hot).squeeze(dim=0).squeeze(dim=0)
score = entropy * purity
score[active] = -float('inf')
for pixel in range(active_pixels):
values, indices_h = torch.max(score, dim=0)
_, indices_w = torch.max(values, dim=0)
w = indices_w.item()
h = indices_h[w].item()
start_w = w - mask_radius if w - mask_radius >= 0 else 0
start_h = h - mask_radius if h - mask_radius >= 0 else 0
end_w = w + mask_radius + 1
end_h = h + mask_radius + 1
# mask out
score[start_h:end_h, start_w:end_w] = -float('inf')
active[start_h:end_h, start_w:end_w] = True
selected[h, w] = True
# active sampling
active_mask[h, w] = ground_truth[h, w]
active_mask = Image.fromarray(np.array(active_mask.cpu().numpy(), dtype=np.uint8))
active_mask.save(path2mask[i])
indicator = {
'active': active,
'selected': selected
}
torch.save(indicator, path2indicator[i])
feature_extractor.train()
classifier.train()
def RegionSelection(cfg, feature_extractor, classifier, tgt_epoch_loader):
feature_extractor.eval()
classifier.eval()
floating_region_score = FloatingRegionScore(in_channels=cfg.MODEL.NUM_CLASSES, size=2 * cfg.ACTIVE.RADIUS_K + 1).cuda()
per_region_pixels = (2 * cfg.ACTIVE.RADIUS_K + 1) ** 2
active_radius = cfg.ACTIVE.RADIUS_K
mask_radius = cfg.ACTIVE.RADIUS_K * 2
active_ratio = cfg.ACTIVE.RATIO / len(cfg.ACTIVE.SELECT_ITER)
with torch.no_grad():
for tgt_data in tqdm(tgt_epoch_loader):
tgt_input, path2mask = tgt_data['img'], tgt_data['path_to_mask']
origin_mask, origin_label = \
tgt_data['origin_mask'], tgt_data['origin_label']
origin_size = tgt_data['size']
active_indicator = tgt_data['active']
selected_indicator = tgt_data['selected']
path2indicator = tgt_data['path_to_indicator']
tgt_input = tgt_input.cuda(non_blocking=True)
tgt_size = tgt_input.shape[-2:]
tgt_feat = feature_extractor(tgt_input)
tgt_out = classifier(tgt_feat, size=tgt_size)
for i in range(len(origin_mask)):
active_mask = origin_mask[i].cuda(non_blocking=True)
ground_truth = origin_label[i].cuda(non_blocking=True)
size = (origin_size[i][0], origin_size[i][1])
num_pixel_cur = size[0] * size[1]
active = active_indicator[i]
selected = selected_indicator[i]
output = tgt_out[i:i + 1, :, :, :]
output = F.interpolate(output, size=size, mode='bilinear', align_corners=True)
score, purity, entropy = floating_region_score(output)
score[active] = -float('inf')
active_regions = math.ceil(num_pixel_cur * active_ratio / per_region_pixels)
for pixel in range(active_regions):
values, indices_h = torch.max(score, dim=0)
_, indices_w = torch.max(values, dim=0)
w = indices_w.item()
h = indices_h[w].item()
active_start_w = w - active_radius if w - active_radius >= 0 else 0
active_start_h = h - active_radius if h - active_radius >= 0 else 0
active_end_w = w + active_radius + 1
active_end_h = h + active_radius + 1
mask_start_w = w - mask_radius if w - mask_radius >= 0 else 0
mask_start_h = h - mask_radius if h - mask_radius >= 0 else 0
mask_end_w = w + mask_radius + 1
mask_end_h = h + mask_radius + 1
# mask out
score[mask_start_h:mask_end_h, mask_start_w:mask_end_w] = -float('inf')
active[mask_start_h:mask_end_h, mask_start_w:mask_end_w] = True
selected[active_start_h:active_end_h, active_start_w:active_end_w] = True
# active sampling
active_mask[active_start_h:active_end_h, active_start_w:active_end_w] = \
ground_truth[active_start_h:active_end_h, active_start_w:active_end_w]
active_mask = Image.fromarray(np.array(active_mask.cpu().numpy(), dtype=np.uint8))
active_mask.save(path2mask[i])
indicator = {
'active': active,
'selected': selected
}
torch.save(indicator, path2indicator[i])
feature_extractor.train()
classifier.train()
| 43.03012 | 123 | 0.575248 |
79407b3a83b70aeea4b5a43698517db43179640d | 4,426 | py | Python | tests/integration/TestHazard.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/TestHazard.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/TestHazard.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | 2 | 2020-04-03T04:14:42.000Z | 2021-02-22T05:30:35.000Z | #!/usr/bin/env python -O
"""
This is the test class for testing Hazard module algorithms and models.
"""
# -*- coding: utf-8 -*-
#
# tests.integration.TestHazard.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))) + "/rtk", )
import unittest
from nose.plugins.attrib import attr
import dao.DAO as _dao
from analyses.hazard.Hazard import Hazard
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2014 - 2015 Andrew "weibullguy" Rowland'
class TestHazardController(unittest.TestCase):
"""
Class for testing the Hazard data controller class.
"""
def setUp(self):
"""
Sets up the test fixture for the Hazard class.
"""
_database = '/tmp/tempdb.rtk'
self._dao = _dao(_database)
self._dao.execute("PRAGMA foreign_keys = ON", commit=False)
self.DUT = Hazard()
self.DUT.dao = self._dao
@attr(all=True, integration=True)
def test_request_hazard(self):
"""
(TestHazard) request_hazard should return 0 on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
@attr(all=True, integration=True)
def test_add_hazard(self):
"""
(TestHazard) add_hazard should return 0 on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
self.assertEqual(self.DUT.add_hazard(0)[1], 0)
@attr(all=True, integration=True)
def test_delete_hazard(self):
"""
(TestHazard) delete_hazard should return 0 on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
(_results,
_error_code) = self.DUT.delete_hazard(0, 1)
self.assertTrue(_results)
self.assertEqual(_error_code, 0)
@attr(all=True, integration=True)
def test_calculate_hazard(self):
"""
(TestHazard) calculate_hazard should return False on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
self.assertFalse(self.DUT.calculate_hazard(0, 2))
@attr(all=True, integration=True)
def test_save_hazard(self):
"""
(TestHazard) save_hazard returns (True, 0) on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
self.assertEqual(self.DUT.save_hazard(0, 2), (True, 0))
@attr(all=True, integration=True)
def test_save_all_hazards(self):
"""
(TestHazard) save_all_hazards returns False on success
"""
self.assertEqual(self.DUT.request_hazard()[1], 0)
self.assertEqual(self.DUT.save_all_hazards(),
[(0, 3, 0), (0, 0, 0), (0, 2, 0)])
| 34.046154 | 79 | 0.682332 |
79407b552678a43c502326803b52b701468cb436 | 1,549 | py | Python | src/genie/libs/parser/viptela/tests/ShowSystemStatus/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/viptela/tests/ShowSystemStatus/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/viptela/tests/ShowSystemStatus/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | expected_output = {
'boot_loader_version': 'Not applicable',
'build': 'Not applicable',
'chassis_serial_number': 'TTM25160B2Q',
'cloud_hosted_instance': 'false',
'commit_pending': 'false',
'configuration_template': 'None',
'controller_compatibility': '20.7',
'cpu_allocation': {
'control': 8,
'data': 0,
'total': 8,
},
'cpu_reported_reboot': 'Initiated by other',
'cpu_states': {
'idle': 97.43,
'system': 1.31,
'user': 1.24,
},
'current_time': 'Tue Dec 21 15:12:52 UTC 2021',
'device_role': 'cEdge-SDWAN',
'disk_usage': {
'avail_mega': 25257,
'filesystem': '/dev/bootflash1',
'mounted_on': '/bootflash',
'size_mega': 28748,
'use_pc': 7,
'used_mega': 2031,
},
'hypervisor_type': 'None',
'last_reboot': 'Image Install .',
'load_average': {
'minute_1': 0.24,
'minute_15': 0.27,
'minute_5': 0.27,
},
'memory_usage': {
'buffers_kilo': 401084,
'cache_kilo': 3363308,
'free_kilo': 12440184,
'total_kilo': 16214564,
'used_kilo': 3774380,
},
'model_name': 'ASR1001-HX',
'personality': 'vEdge',
'processes': 446,
'services': 'None',
'system_fips_state': 'Disabled',
'system_logging_disk': 'enabled',
'system_logging_host': 'disabled',
'system_state': 'GREEN. All daemons up',
'system_uptime': '20 days 20 hrs 43 min 00 sec',
'version': '17.07',
'vmanaged': 'false',
} | 28.163636 | 52 | 0.556488 |
79407bb78bc358c44ce2fdd5b38afde044638ea1 | 5,349 | py | Python | server/swagger_server/internal/db.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | null | null | null | server/swagger_server/internal/db.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | null | null | null | server/swagger_server/internal/db.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | 2 | 2020-11-04T03:07:00.000Z | 2020-11-05T08:14:33.000Z | import sqlalchemy
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Integer, String
from sqlalchemy.types import DateTime, Boolean, LargeBinary
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from version import DB_VERSION, VERSION
DB_VERSION_LABEL = 'DB_VERSION'
VERSION_LABEL = 'VERSION'
Base = declarative_base()
class DbSchemaTooNew(Exception):
pass
class DbNoVersionSet(Exception):
pass
def migrate():
"""Place holder for futur DB migration scripts"""
pass
class DbAccount(Base):
__tablename__ = 'account'
# definition of the fields
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(30), unique=True, nullable=False)
permission = Column(String(30), nullable=False)
creation_date = Column(DateTime(), nullable=False)
last_modification_date = Column(DateTime(), nullable=False)
# relationships
api_keys = relationship("DbApiKey")
domains = relationship("DbDomain")
notifications = relationship("DbNotification", cascade="all, delete, delete-orphan", backref="account")
tags = relationship("DbTagAccount")
class DbApiKey(Base):
__tablename__ = 'api_key'
# fields
id = Column(Integer, primary_key=True, nullable=False)
secret_hash = Column(String(1024), nullable=False)
creation_date = Column(DateTime(), nullable=False)
last_modification_date = Column(DateTime())
secret_prefix = Column(String(8), nullable=False)
account = relationship("DbAccount")
# foreign keys
account_id = Column(Integer, ForeignKey('account.id'), nullable=False)
class DbDomain(Base):
__tablename__ = 'domain'
id = Column(Integer, primary_key=True)
name = Column(String(256), unique=True)
creation_date = Column(DateTime())
last_modification_date = Column(DateTime())
account_id = Column(Integer, ForeignKey('account.id'))
notifications = relationship("DbNotification")
tags = relationship("DbTagDomain")
class DbCertificate(Base):
__tablename__ = 'certificate'
id = Column(Integer, primary_key=True)
valid_start = Column(DateTime())
valid_end = Column(DateTime())
latest_valid = Column(Boolean())
name = Column(String(256))
ca_name = Column(String(256))
public_key = Column(LargeBinary())
private_key = Column(LargeBinary())
account_id = Column(Integer, ForeignKey('account.id'))
domain_id = Column(Integer, ForeignKey('domain.id'))
class DbNotification(Base):
__tablename__ = 'notification'
id = Column(Integer, primary_key=True)
message = Column(String(256))
payload = Column(String(256))
status = Column(String(256))
status_message = Column(String(256))
domain_id = Column(Integer, ForeignKey('domain.id'))
account_id = Column(Integer, ForeignKey('account.id'))
class DbTagAccount(Base):
__tablename__ = 'tagaccount'
id = Column(Integer, primary_key=True)
key = Column(String(30))
value = Column(String(30))
account_id = Column(Integer, ForeignKey('account.id'), nullable=False)
class DbTagDomain(Base):
__tablename__ = 'tagdomain'
id = Column(Integer, primary_key=True)
key = Column(String(30))
value = Column(String(256))
account_id = Column(Integer, ForeignKey('domain.id'))
class DbVersion(Base):
__tablename__ = 'version'
id = Column(Integer, primary_key=True)
version = Column(String(10))
vtype = Column(String(10), nullable=False, unique=True)
def get_dbsession(config):
engine = create_engine(
config['uri'],
echo=(config['echo_sql'] in ['true', 'True', 'TRUE']),
# pool_size = int(config['pool_size']),
# pool_timeout = int(config['pool_timeout']),
# pool_recycle = int(config['pool_recycle'])
)
Session = sessionmaker(bind=engine)
session = Session()
# we try to get the version, if it doesn't succeed, we create the DB
try:
version = session.query(DbVersion).filter_by(vtype=DB_VERSION_LABEL).first()
except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError):
Base.metadata.create_all(engine)
# committing between schema creation and
# setting the version is necessary on postgres
session.commit()
# we set the version
counter = 0
while counter < 10:
try:
session.add_all([
DbVersion(vtype = DB_VERSION_LABEL, version = DB_VERSION),
DbVersion(vtype = VERSION_LABEL, version = VERSION),
])
session.commit()
version = session.query(DbVersion).filter_by(vtype=DB_VERSION_LABEL).first()
break
except:
counter += 1
time.sleep(1)
# the version of the DB is newer than the version of certascale
# this should not happen so we raise an exception
if version is None:
raise DbNoVersionSet
if int(version.version) > int(DB_VERSION):
raise DbSchemaTooNew
# the version of the DB is older than the certascale definition
# so we launch the schema update script
elif int(version.version) < int(DB_VERSION):
migrate()
return Session
| 32.222892 | 107 | 0.677884 |
79407c97c2d86e6478fb30d55ec3b496023fe448 | 760 | py | Python | Learning/Example1.py | balarsen/pymc_learning | e4a077d492af6604a433433e64b835ce4ed0333a | [
"BSD-3-Clause"
] | null | null | null | Learning/Example1.py | balarsen/pymc_learning | e4a077d492af6604a433433e64b835ce4ed0333a | [
"BSD-3-Clause"
] | null | null | null | Learning/Example1.py | balarsen/pymc_learning | e4a077d492af6604a433433e64b835ce4ed0333a | [
"BSD-3-Clause"
] | 1 | 2017-05-23T16:38:55.000Z | 2017-05-23T16:38:55.000Z | # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3097064/
from pylab import *
import spacepy.plot as spp # for the styles
import numpy as np
import pymc as pm
import pymc
import numpy as np
n = 5 * np.ones(4, dtype=int)
x = np.array([-.86, -.3, -.05, .73])
alpha = pymc.Normal('alpha', mu=0, tau=.01)
beta = pymc.Normal('beta', mu=0, tau=.01)
@pymc.deterministic
def theta(a=alpha, b=beta):
"""theta = logit^{−1}(a+b)"""
return pymc.invlogit(a + b * x)
d = pymc.Binomial('d', n=n, p=theta, value=np.array([0., 1., 3., 5.]),
observed=True)
import pymc
# S = pymc.MCMC(mymodel, db = 'pickle')
S = pymc.MCMC([alpha, beta, theta], db='txt')
S.sample(iter=10000, burn=5000, thin=2)
pymc.Matplot.plot(S)
pymc.Matplot.summary_plot(S)
| 23.030303 | 70 | 0.632895 |
79407d00b03c431abf51d0414df956e00781d8c5 | 6,760 | py | Python | tests/integration/garden/test_forms.py | e-dang/Autogarden | b15217e5d4755fc028b8dc4255cbdcb77ead80f4 | [
"MIT"
] | null | null | null | tests/integration/garden/test_forms.py | e-dang/Autogarden | b15217e5d4755fc028b8dc4255cbdcb77ead80f4 | [
"MIT"
] | null | null | null | tests/integration/garden/test_forms.py | e-dang/Autogarden | b15217e5d4755fc028b8dc4255cbdcb77ead80f4 | [
"MIT"
] | null | null | null | from datetime import timedelta
import pytest
from django.core.exceptions import ValidationError
from garden.forms import (GardenForm, INVALID_DURATION_ERR_MSG, MAX_VALUE_ERR_MSG,
MIN_VALUE_ERR_MSG, REQUIRED_FIELD_ERR_MSG,
NewGardenForm, WateringStationForm)
from garden.models import Garden
@pytest.mark.integration
class TestGardenForm:
@pytest.mark.django_db
def test_clean_name_raises_validation_error_when_instance_already_has_a_garden_with_that_name_that_is_not_the_same_garden(self, user1, garden_factory):
garden_name = user1.gardens.first().name
garden = garden_factory(owner=user1)
data = {'name': garden_name}
form = GardenForm(instance=garden, data=data)
form.cleaned_data = data
with pytest.raises(ValidationError) as err:
form.clean_name()
assert form.NON_UNIQUE_NAME_ERR_MSG == str(err)
@pytest.mark.django_db
def test_clean_doesnt_raise_validation_error_when_no_other_instance_has_the_same_name(self, user1):
garden = user1.gardens.first()
data = {'name': garden.name}
form = GardenForm(instance=garden, data=data)
form.cleaned_data = data
form.clean_name() # should not raise
@pytest.mark.integration
class TestNewGardenForm:
@pytest.mark.parametrize('new_garden_form_fields, missing_field', [
(None, 'name'),
(None, 'num_watering_stations'),
(None, 'update_frequency')
],
indirect=['new_garden_form_fields'],
ids=['name', 'num_watering_stations', 'update_frequency'])
def test_fields_are_required(self, new_garden_form_fields, missing_field):
new_garden_form_fields.pop(missing_field)
form = NewGardenForm(data=new_garden_form_fields)
assert not form.is_valid()
assert form.errors[missing_field] == [REQUIRED_FIELD_ERR_MSG]
@pytest.mark.django_db
def test_save_creates_a_new_garden_with_specified_num_of_watering_stations(self, new_garden_form_fields, user):
prev_num_gardens = Garden.objects.all().count()
form = NewGardenForm(owner=user, data=new_garden_form_fields)
assert form.is_valid()
garden = form.save()
assert prev_num_gardens + 1 == Garden.objects.all().count()
assert garden.watering_stations.count() == new_garden_form_fields['num_watering_stations']
@pytest.mark.django_db
def test_save_sets_new_garden_owner_as_passed_in_user(self, new_garden_form_fields, user):
form = NewGardenForm(owner=user, data=new_garden_form_fields)
assert form.is_valid()
garden = form.save()
assert garden in user.gardens.all()
@pytest.mark.django_db
def test_is_valid_returns_false_when_num_watering_stations_is_invalid(self, new_garden_form_fields, user):
new_garden_form_fields['num_watering_stations'] = -1 # invalidate data
form = NewGardenForm(data=new_garden_form_fields)
ret_val = form.is_valid()
assert ret_val == False
assert MIN_VALUE_ERR_MSG in form.errors['num_watering_stations']
@pytest.mark.django_db
def test_is_valid_returns_false_when_update_frequency_is_invalid(self, new_garden_form_fields, user):
new_garden_form_fields['update_frequency'] = -1 # invalidate data
form = NewGardenForm(data=new_garden_form_fields)
ret_val = form.is_valid()
assert ret_val == False
assert INVALID_DURATION_ERR_MSG in form.errors['update_frequency']
@pytest.mark.django_db
def test_clean_name_raises_validation_error_when_owner_has_a_garden_with_that_name_already(self, user1, new_garden_form_fields):
new_garden_form_fields['name'] = user1.gardens.first().name
form = NewGardenForm(owner=user1, data=new_garden_form_fields)
form.cleaned_data = new_garden_form_fields
with pytest.raises(ValidationError) as err:
form.clean_name()
assert str(err) == form.NON_UNIQUE_NAME_ERR_MSG
@pytest.mark.django_db
def test_clean_name_does_not_raise_validation_error_when_name_is_unique(self, user1, new_garden_form_fields):
new_garden_form_fields['garden_name'] = user1.gardens.first().name + 'adijadjkaodj'
form = NewGardenForm(owner=user1, data=new_garden_form_fields)
form.cleaned_data = new_garden_form_fields
form.clean_name() # should not raise
@pytest.mark.integration
class TestWateringStationForm:
@pytest.mark.parametrize('watering_station_form_fields, missing_field', [
(None, 'moisture_threshold'),
(None, 'watering_duration')
],
indirect=['watering_station_form_fields'],
ids=['moisture_threshold', 'watering_duration'])
def test_fields_are_required(self, watering_station_form_fields, missing_field):
watering_station_form_fields.pop(missing_field)
form = WateringStationForm(data=watering_station_form_fields)
assert not form.is_valid()
assert form.errors[missing_field] == [REQUIRED_FIELD_ERR_MSG]
@pytest.mark.parametrize('watering_station_form_fields, missing_field', [
(None, 'plant_type'),
(None, 'status')
],
indirect=['watering_station_form_fields'],
ids=['plant_type', 'status'])
def test_field_is_not_required(self, watering_station_form_fields, missing_field):
watering_station_form_fields.pop(missing_field)
form = WateringStationForm(data=watering_station_form_fields)
assert form.is_valid()
@pytest.mark.django_db
@pytest.mark.parametrize('moisture_threshold, err_msg', [
(-1, MIN_VALUE_ERR_MSG),
(101, MAX_VALUE_ERR_MSG)
],
ids=['-1', '101'])
def test_is_valid_returns_false_when_moisture_threshold_is_invalid(self, watering_station_form_fields, moisture_threshold, err_msg):
watering_station_form_fields['moisture_threshold'] = moisture_threshold # invalidate data
form = WateringStationForm(data=watering_station_form_fields)
ret_val = form.is_valid()
assert ret_val == False
assert err_msg in form.errors['moisture_threshold']
@pytest.mark.django_db
@pytest.mark.parametrize('watering_duration', [timedelta(seconds=0), timedelta(seconds=-1)], ids=['0', '-1'])
def test_is_valid_returns_false_when_watering_duration_is_invalid(self, watering_station_form_fields, watering_duration):
watering_station_form_fields['watering_duration'] = watering_duration # invalidate data
form = WateringStationForm(data=watering_station_form_fields)
ret_val = form.is_valid()
assert ret_val == False
assert INVALID_DURATION_ERR_MSG in form.errors['watering_duration']
| 41.219512 | 155 | 0.726479 |
79407d15fd7f0cc7e2f39117854615b6331495fd | 12,000 | py | Python | tests/python/unittest/test_contrib_svrg_module.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 228 | 2018-12-06T09:34:01.000Z | 2022-03-08T17:02:02.000Z | tests/python/unittest/test_contrib_svrg_module.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 187 | 2018-03-16T23:44:43.000Z | 2021-12-14T21:19:54.000Z | tests/python/unittest/test_contrib_svrg_module.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 51 | 2019-07-12T05:10:25.000Z | 2021-07-28T16:19:06.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from common import with_seed, assertRaises
from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule
from mxnet.test_utils import *
import unittest
def setup():
train_data = np.random.randint(1, 5, [1000, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=32, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=2)
mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)
mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False)
return di, mod
def test_bind_module():
_, mod = setup()
assert mod.binded == True
assert mod._mod_aux.binded == True
def test_module_init():
_, mod = setup()
assert mod._mod_aux is not None
def test_module_initializer():
def regression_model(m):
x = mx.symbol.var("data", stype='csr')
v = mx.symbol.var("v", shape=(m, 1), init=mx.init.Uniform(scale=.1),
stype='row_sparse')
model = mx.symbol.dot(lhs=x, rhs=v)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y, name="out")
return model
#shape of the data
n, m = 128, 100
model = regression_model(m)
data = mx.nd.zeros(shape=(n, m), stype='csr')
label = mx.nd.zeros((n, 1))
iterator = mx.io.NDArrayIter(data=data, label={'label': label},
batch_size=n, last_batch_handle='discard')
# create module
mod = SVRGModule(symbol=model, data_names=['data'], label_names=['label'], update_freq=2)
mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)
mod.init_params()
v = mod._arg_params['v']
assert v.stype == 'row_sparse'
assert np.sum(v.asnumpy()) != 0
def test_module_bind():
x = mx.sym.Variable("data")
net = mx.sym.FullyConnected(x, num_hidden=1)
mod = SVRGModule(symbol=net, data_names=['data'], label_names=None, update_freq=2)
assertRaises(TypeError, mod.bind, data_shapes=['data', mx.nd.zeros(shape=(2, 1))])
mod.bind(data_shapes=[('data', (2, 1))])
assert mod.binded == True
assert mod._mod_aux.binded == True
@unittest.skip("Flaky test https://gitsvrhub.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_module_save_load():
import tempfile
import os
x = mx.sym.Variable("data")
y = mx.sym.Variable("softmax_label")
net = mx.sym.FullyConnected(x, y, num_hidden=1)
mod = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=2)
mod.bind(data_shapes=[('data', (1, 1))])
mod.init_params()
mod.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate': 0.1})
mod.update()
# Create tempfile
tmp = tempfile.mkdtemp()
tmp_file = os.path.join(tmp, 'svrg_test_output')
mod.save_checkpoint(tmp_file, 0, save_optimizer_states=True)
mod2 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', ))
mod2.bind(data_shapes=[('data', (1, 1))])
mod2.init_optimizer(optimizer_params={'learning_rate': 0.1})
assert mod._symbol.tojson() == mod2._symbol.tojson()
# Multi-device
mod3 = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=3,
context=[mx.cpu(0), mx.cpu(1)])
mod3.bind(data_shapes=[('data', (10, 10))])
mod3.init_params()
mod3.init_optimizer(optimizer_params={'learning_rate': 1.0})
mod3.update()
mod3.save_checkpoint(tmp_file, 0, save_optimizer_states=True)
mod4 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', ))
mod4.bind(data_shapes=[('data', (10, 10))])
mod4.init_optimizer(optimizer_params={'learning_rate': 1.0})
assert mod3._symbol.tojson() == mod4._symbol.tojson()
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_svrgmodule_reshape():
data = mx.sym.Variable("data")
sym = mx.sym.FullyConnected(data=data, num_hidden=4, name='fc')
dshape=(3, 4)
mod = SVRGModule(sym, data_names=["data"], label_names=None, context=[mx.cpu(0), mx.cpu(1)], update_freq=2)
mod.bind(data_shapes=[('data', dshape)])
mod.init_params()
mod._mod_aux.init_params()
mod.init_optimizer(optimizer_params={"learning_rate": 1.0})
data_batch = mx.io.DataBatch(data=[mx.nd.ones(dshape)], label=None)
mod.forward(data_batch)
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
dshape = (2, 4)
mod.reshape(data_shapes=[('data', dshape)])
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_update_full_grad():
def create_network():
train_data = np.random.randint(1, 5, [10, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=5, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=2)
mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)
mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)
mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
force_init=False)
return di, mod
di, svrg_mod = create_network()
# Calculates the average of full gradients over number batches
full_grads_weights = mx.nd.zeros(shape=svrg_mod.get_params()[0]['fc1_weight'].shape)
arg, aux = svrg_mod.get_params()
svrg_mod._mod_aux.set_params(arg_params=arg, aux_params=aux)
num_batch = 2
for batch in di:
svrg_mod.forward(batch)
svrg_mod.backward()
full_grads_weights = mx.nd.broadcast_add(svrg_mod._exec_group.grad_arrays[0][0], full_grads_weights, axis=0)
full_grads_weights /= num_batch
di.reset()
svrg_mod.update_full_grads(di)
assert same(full_grads_weights, svrg_mod._param_dict[0]['fc1_weight'])
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_svrg_with_sgd():
def create_module_with_sgd():
train_data = np.random.randint(1, 5, [100, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=10, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
reg_mod = mx.mod.Module(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'])
reg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)
reg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)
reg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),))
svrg_mod = SVRGModule(symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'],
update_freq=2)
svrg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)
svrg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False)
svrg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),))
return di,reg_mod, svrg_mod
di, reg_mod, svrg_mod = create_module_with_sgd()
num_epoch = 10
# Use metric MSE
metrics = mx.metric.create("mse")
# Train with SVRGModule
for e in range(num_epoch):
metrics.reset()
if e % svrg_mod.update_freq == 0:
svrg_mod.update_full_grads(di)
di.reset()
for batch in di:
svrg_mod.forward_backward(data_batch=batch)
svrg_mod.update()
svrg_mod.update_metric(metrics, batch.label)
svrg_mse = metrics.get()[1]
# Train with SGD standard Module
di.reset()
for e in range(num_epoch):
metrics.reset()
di.reset()
for batch in di:
reg_mod.forward_backward(data_batch=batch)
reg_mod.update()
reg_mod.update_metric(metrics, batch.label)
sgd_mse = metrics.get()[1]
assert svrg_mse < sgd_mse
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_accumulate_kvstore():
# Test KVStore behavior when push a list of values
kv = mx.kv.create('local')
kv.init("fc1_weight", mx.nd.zeros(shape=(1, 2)))
kv.init("fc1_weight_full", mx.nd.zeros(shape=(1, 2)))
b = [mx.nd.ones(shape=(1, 2)) for i in range(4)]
a = mx.nd.zeros(shape=(1, 2))
kv.push("fc1_weight_full", b)
kv.pull("fc1_weight_full", out=a)
assert same(a, [mx.nd.array([4, 4])])
assert kv.num_workers == 1
# Test accumulate in KVStore and allocate gradients
kv_test = mx.kv.create('local')
_, svrg_mod = setup()
svrg_mod.init_optimizer(kvstore=kv_test, optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
force_init=False)
svrg_mod._accumulate_kvstore("fc1_weight", b)
assert len(svrg_mod._param_dict) == svrg_mod._ctx_len
assert same(svrg_mod._param_dict[0]["fc1_weight"], b[0])
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510")
@with_seed()
def test_fit():
di, mod = setup()
num_epoch = 100
metric = mx.metric.create("mse")
mod.fit(di, eval_metric=metric, optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), num_epoch=num_epoch,
kvstore='local')
# Estimated MSE for using SGD optimizer of lr = 0.025, SVRG MSE should be smaller
estimated_mse = 1e-5
assert metric.get()[1] < estimated_mse
if __name__ == "__main__":
import nose
nose.runmodule()
| 38.216561 | 119 | 0.672 |
79407ec5732d2c521c813d686bec0cbe0e639c3d | 2,683 | py | Python | launch/no_roof_small_warehouse_launch.py | Adlink-ROS/aws-robomaker-small-warehouse-world | 17c805aa48879ac879a159058187910180640b4a | [
"MIT-0"
] | null | null | null | launch/no_roof_small_warehouse_launch.py | Adlink-ROS/aws-robomaker-small-warehouse-world | 17c805aa48879ac879a159058187910180640b4a | [
"MIT-0"
] | null | null | null | launch/no_roof_small_warehouse_launch.py | Adlink-ROS/aws-robomaker-small-warehouse-world | 17c805aa48879ac879a159058187910180640b4a | [
"MIT-0"
] | null | null | null | # Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, IncludeLaunchDescription
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PythonExpression
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch directory
aws_small_warehouse_dir = get_package_share_directory('aws_robomaker_small_warehouse_world')
# Launch configuration variables specific to simulation
use_sim_time = LaunchConfiguration('use_sim_time')
use_simulator = LaunchConfiguration('use_simulator')
headless = LaunchConfiguration('headless')
world = LaunchConfiguration('world')
declare_use_sim_time_cmd = DeclareLaunchArgument(
'use_sim_time',
default_value='True',
description='Use simulation (Gazebo) clock if true')
declare_simulator_cmd = DeclareLaunchArgument(
'headless',
default_value='False',
description='Whether to execute gzclient)')
declare_world_cmd = DeclareLaunchArgument(
'world',
default_value=os.path.join(aws_small_warehouse_dir, 'worlds', 'no_roof_small_warehouse', 'no_roof_small_warehouse.world'),
description='Full path to world model file to load')
# Specify the actions
start_gazebo_server_cmd = ExecuteProcess(
cmd=['gzserver', '--verbose', '-s', 'libgazebo_ros_init.so', '-s', 'libgazebo_ros_factory.so', world],
cwd=[aws_small_warehouse_dir], output='screen')
start_gazebo_client_cmd = ExecuteProcess(
condition=IfCondition(PythonExpression(['not ', headless])),
cmd=['gzclient'],
cwd=[aws_small_warehouse_dir], output='screen')
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_use_sim_time_cmd)
ld.add_action(declare_simulator_cmd)
ld.add_action(declare_world_cmd)
# Add any conditioned actions
ld.add_action(start_gazebo_server_cmd)
ld.add_action(start_gazebo_client_cmd)
return ld
| 35.773333 | 124 | 0.800596 |
79407f37642da2b0752564df5636422bc37dcabf | 15,648 | py | Python | esrally/utils/io.py | darrodri/rally | 235ab99f08056699e8a105ec6df2158c7f79d5f7 | [
"Apache-2.0"
] | null | null | null | esrally/utils/io.py | darrodri/rally | 235ab99f08056699e8a105ec6df2158c7f79d5f7 | [
"Apache-2.0"
] | null | null | null | esrally/utils/io.py | darrodri/rally | 235ab99f08056699e8a105ec6df2158c7f79d5f7 | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
import gzip
import os
import re
import subprocess
import tarfile
import zipfile
from esrally.utils import console
class FileSource:
"""
FileSource is a wrapper around a plain file which simplifies testing of file I/O calls.
"""
def __init__(self, file_name, mode, encoding="utf-8"):
self.file_name = file_name
self.mode = mode
self.encoding = encoding
self.f = None
def open(self):
self.f = open(self.file_name, mode=self.mode, encoding=self.encoding)
# allow for chaining
return self
def seek(self, offset):
self.f.seek(offset)
def read(self):
return self.f.read()
def readline(self):
return self.f.readline()
def close(self):
self.f.close()
self.f = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __str__(self, *args, **kwargs):
return self.file_name
class DictStringFileSourceFactory:
"""
Factory that can create `StringAsFileSource` for tests. Based on the provided dict, it will create a proper `StringAsFileSource`.
It is intended for scenarios where multiple files may be read by client code.
"""
def __init__(self, name_to_contents):
self.name_to_contents = name_to_contents
def __call__(self, name, mode, encoding="utf-8"):
return StringAsFileSource(self.name_to_contents[name], mode, encoding)
class StringAsFileSource:
"""
Implementation of ``FileSource`` intended for tests. It's kept close to ``FileSource`` to simplify maintenance but it is not meant to
be used in production code.
"""
def __init__(self, contents, mode, encoding="utf-8"):
"""
:param contents: The file contents as an array of strings. Each item in the array should correspond to one line.
:param mode: The file mode. It is ignored in this implementation but kept to implement the same interface as ``FileSource``.
:param encoding: The file encoding. It is ignored in this implementation but kept to implement the same interface as ``FileSource``.
"""
self.contents = contents
self.current_index = 0
self.opened = False
def open(self):
self.opened = True
return self
def seek(self, offset):
self._assert_opened()
if offset != 0:
raise AssertionError("StringAsFileSource does not support random seeks")
def read(self):
self._assert_opened()
return "\n".join(self.contents)
def readline(self):
self._assert_opened()
if self.current_index >= len(self.contents):
return ""
line = self.contents[self.current_index]
self.current_index += 1
return line
def close(self):
self._assert_opened()
self.contents = None
self.opened = False
def _assert_opened(self):
assert self.opened
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __str__(self, *args, **kwargs):
return "StringAsFileSource"
def ensure_dir(directory, mode=0o777):
"""
Ensure that the provided directory and all of its parent directories exist.
This function is safe to execute on existing directories (no op).
:param directory: The directory to create (if it does not exist).
:param mode: The permission flags to use (if it does not exist).
"""
if directory:
os.makedirs(directory, mode, exist_ok=True)
def _zipdir(source_directory, archive):
for root, dirs, files in os.walk(source_directory):
for file in files:
archive.write(
filename=os.path.join(root, file),
arcname=os.path.relpath(os.path.join(root, file), os.path.join(source_directory, "..")))
def is_archive(name):
"""
:param name: File name to check. Can be either just the file name or optionally also an absolute path.
:return: True iff the given file name is an archive that is also recognized for decompression by Rally.
"""
_, ext = splitext(name)
return ext in [".zip", ".bz2", ".gz", ".tar", ".tar.gz", ".tgz", ".tar.bz2"]
def compress(source_directory, archive_name):
"""
Compress a directory tree.
:param source_directory: The source directory to compress. Must be readable.
:param archive_name: The absolute path including the file name of the archive. Must have the extension .zip.
"""
archive = zipfile.ZipFile(archive_name, "w", zipfile.ZIP_DEFLATED)
_zipdir(source_directory, archive)
def decompress(zip_name, target_directory):
"""
Decompresses the provided archive to the target directory. The following file extensions are supported:
* zip
* bz2
* gz
* tar
* tar.gz
* tgz
* tar.bz2
The decompression method is chosen based on the file extension.
:param zip_name: The full path name to the file that should be decompressed.
:param target_directory: The directory to which files should be decompressed. May or may not exist prior to calling
this function.
"""
path_without_extension, extension = splitext(zip_name)
filename = basename(path_without_extension)
if extension == ".zip":
_do_decompress(target_directory, zipfile.ZipFile(zip_name))
elif extension == ".bz2":
_do_decompress_manually(target_directory, filename, bz2.open(zip_name))
elif extension == ".gz":
_do_decompress_manually(target_directory, filename, gzip.open(zip_name))
elif extension in [".tar", ".tar.gz", ".tgz", ".tar.bz2"]:
_do_decompress(target_directory, tarfile.open(zip_name))
else:
raise RuntimeError("Unsupported file extension [%s]. Cannot decompress [%s]" % (extension, zip_name))
def _do_decompress_manually(target_directory, filename, compressed_file):
ensure_dir(target_directory)
try:
with open("%s/%s" % (target_directory, filename), 'wb') as new_file:
for data in iter(lambda: compressed_file.read(100 * 1024), b''):
new_file.write(data)
finally:
compressed_file.close()
def _do_decompress(target_directory, compressed_file):
try:
compressed_file.extractall(path=target_directory)
except BaseException:
raise RuntimeError("Could not decompress provided archive [%s]" % compressed_file.filename)
finally:
compressed_file.close()
# just in a dedicated method to ease mocking
def dirname(path):
return os.path.dirname(path)
def basename(path):
return os.path.basename(path)
def exists(path):
return os.path.exists(path)
def normalize_path(path, cwd="."):
"""
Normalizes a path by removing redundant "../" and also expanding the "~" character to the user home directory.
:param path: A possibly non-normalized path.
:param cwd: The current working directory. "." by default.
:return: A normalized path.
"""
normalized = os.path.normpath(os.path.expanduser(path))
# user specified only a file name? -> treat as relative to the current directory
if dirname(normalized) == "":
return os.path.join(cwd, normalized)
else:
return normalized
def escape_path(path):
"""
Escapes any characters that might be problematic in shell interactions.
:param path: The original path.
:return: A potentially modified version of the path with all problematic characters escaped.
"""
return path.replace("\\", "\\\\")
def splitext(file_name):
if file_name.endswith(".tar.gz"):
return file_name[0:-7], file_name[-7:]
elif file_name.endswith(".tar.bz2"):
return file_name[0:-8], file_name[-8:]
else:
return os.path.splitext(file_name)
def has_extension(file_name, extension):
"""
Checks whether the given file name has the given extension.
:param file_name: A file name to check (either just the name or an absolute path name).
:param extension: The extension including the leading dot (i.e. it is ".txt", not "txt").
:return: True iff the given ``file_name`` has the given ``extension``.
"""
_, ext = splitext(file_name)
return ext == extension
def prepare_file_offset_table(data_file_path):
"""
Creates a file that contains a mapping from line numbers to file offsets for the provided path. This file is used internally by
#skip_lines(data_file_path, data_file) to speed up line skipping.
:param data_file_path: The path to a text file that is readable by this process.
:return The number of lines read or ``None`` if it did not have to build the file offset table.
"""
offset_file_path = "%s.offset" % data_file_path
# recreate only if necessary as this can be time-consuming
if not os.path.exists(offset_file_path) or os.path.getmtime(offset_file_path) < os.path.getmtime(data_file_path):
console.info("Preparing file offset table for [%s] ... " % data_file_path, end="", flush=True)
line_number = 0
with open(offset_file_path, mode="wt", encoding="utf-8") as offset_file:
with open(data_file_path, mode="rt", encoding="utf-8") as data_file:
while True:
line = data_file.readline()
if len(line) == 0:
break
line_number += 1
if line_number % 50000 == 0:
print("%d;%d" % (line_number, data_file.tell()), file=offset_file)
console.println("[OK]")
return line_number
else:
return None
def remove_file_offset_table(data_file_path):
"""
Attempts to remove the file offset table for the provided data path.
:param data_file_path: The path to a text file that is readable by this process.
"""
offset_file_path = "%s.offset" % data_file_path
os.remove(offset_file_path)
def skip_lines(data_file_path, data_file, number_of_lines_to_skip):
"""
Skips the first `number_of_lines_to_skip` lines in `data_file` as a side effect.
:param data_file_path: The full path to the data file.
:param data_file: The data file. It is assumed that this file is already open for reading and its file pointer is at position zero.
:param number_of_lines_to_skip: A non-negative number of lines that should be skipped.
"""
if number_of_lines_to_skip == 0:
return
offset_file_path = "%s.offset" % data_file_path
offset = 0
remaining_lines = number_of_lines_to_skip
# can we fast forward?
if os.path.exists(offset_file_path):
with open(offset_file_path, mode="rt", encoding="utf-8") as offsets:
for line in offsets:
line_number, offset_in_bytes = [int(i) for i in line.strip().split(";")]
if line_number <= number_of_lines_to_skip:
offset = offset_in_bytes
remaining_lines = number_of_lines_to_skip - line_number
else:
break
# fast forward to the last known file offset
data_file.seek(offset)
# forward the last remaining lines if needed
if remaining_lines > 0:
for line in range(remaining_lines):
data_file.readline()
def get_size(start_path="."):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def _run(args, fallback=None, only_first_line=False):
# noinspection PyBroadException
try:
lines = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].splitlines()
result_lines = [line.decode("utf-8") for line in lines]
if only_first_line:
return result_lines[0]
else:
return result_lines
# pylint: disable=W0702
except:
return fallback
def _read_symlink(path):
try:
return os.path.realpath(path)
except FileNotFoundError:
return None
def guess_install_location(binary_name, fallback=None):
"""
Checks whether a given binary is available on the user's path.
:param binary_name: The name of the binary, e.g. tail, gradle, mvn.
:param fallback: A fallback to return if the binary could not be found on the path.
:return: The full path to the provided binary or the provided fallback.
"""
return _run(["which", binary_name], fallback=fallback, only_first_line=True)
def guess_java_home(major_version=8, fallback=None, runner=_run, read_symlink=_read_symlink):
"""
Tries to find the JDK root directory for the provided version.
:param major_version: The JDK major version that is expected.
:param fallback: The fallback if the JDK home could not be found.
:return: The full path to the JDK root directory or the fallback.
"""
# Mac OS X
if major_version < 9:
java_home = runner(["/usr/libexec/java_home", "-F", "-v", "1.%d" % major_version])
else:
java_home = runner(["/usr/libexec/java_home", "-F", "-v", str(major_version)])
if java_home:
return java_home[0]
else:
# Debian based distributions:
#
# update-alternatives --list java
# /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
# /usr/lib/jvm/java-7-oracle/jre/bin/java
# /usr/lib/jvm/java-8-oracle/jre/bin/java
# /usr/lib/jvm/java-9-openjdk-amd64/bin/java
java_home = runner(["update-alternatives", "--list", "java"])
if java_home:
debian_jdk_pattern = re.compile(r"(/.*/(java-%d)[^/]*)/(jre/)?bin/java" % major_version)
for j in java_home:
m = debian_jdk_pattern.match(j)
if m:
return m.group(1)
# pylint: disable=line-too-long
# Red Hat based distributions
#
# ls -l /etc/alternatives/jre_1.[789].0
# lrwxrwxrwx. 1 root root 63 Sep 10 13:57 /etc/alternatives/jre_1.8.0 -> /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.144-5.b01.fc25.x86_64/jre
# lrwxrwxrwx. 1 root root 51 Sep 13 15:04 /etc/alternatives/jre_1.9.0 -> /usr/lib/jvm/java-9-openjdk-9.0.0.163-1.fc25.x86_64 #
#
# We could also use the output of "alternatives --display java" on Red Hat but the output is so
# verbose that it's easier to use the links.
path = read_symlink("/etc/alternatives/java_sdk_1.%d.0" % major_version)
# return path if and only if it is a proper directory
if path and os.path.isdir(path) and not os.path.islink(path):
return path
else:
return fallback
| 35.085202 | 144 | 0.658998 |
79407f65e216bcc639f51f24c0e2539f2f385bcb | 1,961 | py | Python | setup.py | oPromessa/flickr-rsync | a7893e199e237f99b4a447a923c09cabb4090d17 | [
"MIT"
] | null | null | null | setup.py | oPromessa/flickr-rsync | a7893e199e237f99b4a447a923c09cabb4090d17 | [
"MIT"
] | null | null | null | setup.py | oPromessa/flickr-rsync | a7893e199e237f99b4a447a923c09cabb4090d17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Python setuptools install script.
Run with "python setup.py install" to install FlickrAPI
"""
from __future__ import print_function
import os
import sys
import unittest
# Check the Python version
(major, minor) = sys.version_info[:2]
if (major, minor) < (2, 7) or (major == 3 and minor < 6):
raise SystemExit("Sorry, Python 2.7, or 3.6 or newer required")
# Load version number into __version__
with open(os.path.join('flickr_rsync', '_version.py')) as f:
exec(f.read())
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
def test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='*_test.py')
return test_suite
additional_requires = []
if os.name == 'nt':
additional_requires.append('win_unicode_console~=0.5')
setup(
name='flickr-rsync',
version=__version__,
description='A python script to manage synchronising a local directory of photos with flickr based on an rsync interaction pattern',
long_description=readme(),
author='Paul Heasley',
author_email='[email protected]',
url='http://www.phdesign.com.au/flickr-rsync',
download_url='https://github.com/phdesign/flickr-rsync/archive/v{}.tar.gz'.format(__version__),
packages=['flickr_rsync'],
license='MIT',
keywords=['flickr', 'sync', 'rsync'],
classifiers='',
install_requires=[
'flickr_api>=0.5beta',
'argparse~=1.4.0',
'rx~=1.5.9',
'futures~=3.1.1',
'backoff~=1.3.1'
] + additional_requires,
dependency_links=[
'https://github.com/alexis-mignon/python-flickr-api/tarball/6f3163b#egg=flickr_api-0.5beta'
],
tests_require=[
'mock~=2.0.0'
],
test_suite='setup.test_suite',
zip_safe=True,
entry_points={
'console_scripts': ['flickr-rsync=flickr_rsync:main'],
},
include_package_data=True
)
| 26.863014 | 136 | 0.667517 |
79408124f194fe88a666c4bcfc498f812f80c3e4 | 1,685 | py | Python | irobot/tests/commands_test.py | bhargav-nunna/irobot | 9251f71267fa8cf5e9620f70aa892ae1b7a182f9 | [
"Unlicense"
] | 13 | 2018-08-19T07:09:36.000Z | 2021-08-10T22:52:31.000Z | irobot/tests/commands_test.py | bhargav-nunna/irobot | 9251f71267fa8cf5e9620f70aa892ae1b7a182f9 | [
"Unlicense"
] | null | null | null | irobot/tests/commands_test.py | bhargav-nunna/irobot | 9251f71267fa8cf5e9620f70aa892ae1b7a182f9 | [
"Unlicense"
] | 4 | 2019-01-21T20:03:36.000Z | 2021-01-26T23:59:00.000Z | __author__ = 'Matthew Witherwax (lemoneer)'
import unittest
from irobot.openinterface.commands import *
def to_str(data):
return '[' + '|'.join((('0x%0.2X' % b) for b in data)) + ']'
class TestCommands(unittest.TestCase):
def test_drive(self):
cmd = drive(-200, 500)
self.assertEqual(to_str(cmd), '[0x89|0xFF|0x38|0x01|0xF4]')
def test_get_days(self):
cmd = get_days(sun_hour=0, sun_min=0, mon_hour=0, mon_min=0, tues_hour=0, tues_min=0, wed_hour=15, wed_min=0,
thurs_hour=0, thurs_min=0, fri_hour=10, fri_min=36, sat_hour=0, sat_min=0)
self.assertEquals(40, cmd)
def test_set_schedule(self):
cmd = set_schedule(sun_hour=0, sun_min=0, mon_hour=0, mon_min=0, tues_hour=0, tues_min=0, wed_hour=15,
wed_min=0, thurs_hour=0, thurs_min=0, fri_hour=10, fri_min=36, sat_hour=0, sat_min=0)
self.assertEqual(to_str(cmd),
'[0xA7|0x28|0x00|0x00|0x00|0x00|0x00|0x00|0x0F|0x00|0x00|0x00|0x0A|0x24|0x00|0x00]')
def test_set_motors(self):
cmd = set_motors(True, False, True, True, False)
self.assertEqual(to_str(cmd), '[0x8A|0x0D]')
def test_set_leds(self):
cmd = set_leds(False, False, True, False, 0, 128)
self.assertEqual(to_str(cmd), '[0x8B|0x04|0x00|0x80]')
def test_set_ascii_leds(self):
cmd = set_ascii_leds(65, 66, 67, 68)
self.assertEqual(to_str(cmd), '[0xA4|0x41|0x42|0x43|0x44]')
def test_set_song(self):
cmd = set_song(0, [(31, 32), (85, 100)])
self.assertEqual(to_str(cmd), '[0x8C|0x00|0x02|0x1F|0x20|0x55|0x64]')
| 39.186047 | 118 | 0.616024 |
7940826070eb362f82a378bdf82d9c1a7acfa5c8 | 164 | py | Python | CourseExampleAssignements/primes.py | Dazpoet/Learning-python | 60550fc95a1be6e454e688f89644adca2316d164 | [
"BSD-3-Clause"
] | null | null | null | CourseExampleAssignements/primes.py | Dazpoet/Learning-python | 60550fc95a1be6e454e688f89644adca2316d164 | [
"BSD-3-Clause"
] | null | null | null | CourseExampleAssignements/primes.py | Dazpoet/Learning-python | 60550fc95a1be6e454e688f89644adca2316d164 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
for n in range (2,25):
for i in range(2, n):
if n % i == 0:
break
else:
print(n) | 18.222222 | 25 | 0.445122 |
794082c50f813c49511b9893139cc290bb8e2395 | 14,616 | py | Python | lambda_functions/odigo/odigo.py | Robyo12121/rightcall | c2dd5d6b01f44a0299e19d67b148f802a86c30fa | [
"Unlicense"
] | null | null | null | lambda_functions/odigo/odigo.py | Robyo12121/rightcall | c2dd5d6b01f44a0299e19d67b148f802a86c30fa | [
"Unlicense"
] | 2 | 2019-02-12T14:10:16.000Z | 2019-02-13T13:54:13.000Z | lambda_functions/odigo/odigo.py | Robyo12121/rightcall | c2dd5d6b01f44a0299e19d67b148f802a86c30fa | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
import os
import sys
import time
import datetime
import pandas as pd
from bs4 import BeautifulSoup
from requestium import Session
from selenium.webdriver.common.action_chains import ActionChains
from dotenv import load_dotenv
from lxml import html
load_dotenv()
username = os.environ.get('PROSODIE_USERNAME')
passwd = os.environ.get('PROSODIE_PASSWORD')
print(sys.version)
def change_date_format(date):
try:
correct_string = date.strptime(str(date.date()), '%Y-%m-%d').strftime('%m-%d-%Y')
return correct_string
except Exception as e:
raise e
def change_time_format(date):
try:
correct_string = date.strptime(str(date.hour) + ':' + str(date.minute), "%H:%M").strftime("%I:%M %p")
if correct_string[0] == "0":
return correct_string[1::]
else:
return correct_string
except Exception as e:
raise e
def set_range(now):
"""
Takes current datetime and finds the nearest, previous half hour.
Returns the appropriate start and end times and date
"""
# Format: '10-19-2018'
# Format: '12:00 AM'
def ceil_dt(dt, delta):
"""Round up to the nearest half hour"""
return dt + (datetime.datetime.min - dt) % delta
hour_ago = now - datetime.timedelta(minutes=60)
rounded = ceil_dt(hour_ago, datetime.timedelta(minutes=30))
start_date = change_date_format(rounded)
start_time = change_time_format(rounded)
thirty_mins = datetime.timedelta(minutes=30)
end_date = start_date
end_time = change_time_format(rounded + thirty_mins)
return (start_date, start_time, end_date, end_time)
def setup():
driver = r'C:\Users\RSTAUNTO\Desktop\chromedriver.exe'
s = Session(webdriver_path=driver,
browser='chrome',
default_timeout=15,
webdriver_options={'arguments': ['headless']})
return s
def download_mp3(s, path=None, ref=None, xpath=None):
"""Download mp3 file from www.prosodie.com page and return session.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
path -- mp3 file absolute path (not required | type: str);
ref -- ref number (not required | type: str).
Example: '3905beTOd10339';
Output:
s -- Requestium session (type: requestium.requestium.Session).
"""
if ref is not None and xpath is None:
s.driver.ensure_element_by_class_name('x-action-col-icon').click()
elif xpath is not None and ref is None:
s.driver.ensure_element_by_xpath(xpath).click()
else:
print("Cannot use both reference number and xpath")
return
s.driver.switch_to.frame('result_frame')
time.sleep(1)
# Get URL of mp3 file
src = s.driver.ensure_element_by_id('messagePlayer').get_attribute('src')
# Selenium --> Requests
s.transfer_driver_cookies_to_session()
# Download
r = s.get(src, stream=True)
if path is None:
if ref is None:
# Get ref number
soap = BeautifulSoup(s.driver.page_source, 'lxml')
ref = soap.findAll('div', class_='x-grid-cell-inner')[1].text
path = '%s.mp3' % ref
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(1024*2014):
f.write(chunk)
else:
return 1
# Requests --> Selenium
s.transfer_session_cookies_to_driver()
s.driver.switch_to.default_content()
return s
def download_mp3_by_ref(s, username, passwd, ref, path=None):
"""Download mp3 file from www.prosodie.com page by ref number.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
username -- username on www.prosodie.com (required | type: str);
passwd -- password for username on www.prosodie.com (required |
type: str);
ref -- ref number (required | type: str). Example: '3905beTOd10339';
path -- mp3 file absolute path (not required | type: str).
"""
s = login(s, username, passwd)
s = search_by_ref(s, ref)
result = download_mp3(s, path, ref)
if result == 1:
return 1
s.driver.close()
def download_mp3_by_csv(s, username, passwd, csv_path, download_dir=None):
"""Download mp3 file/files from www.prosodie.com page by input csv file.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
username -- username on www.prosodie.com (required | type: str);
passwd -- password for username on www.prosodie.com (required |
type: str);
csv_path -- csv file absolute path (required | type: str);
download_dir -- download directory for mp3 file/files (not required |
type: str).
"""
s = login(s, username, passwd)
refs = pd.read_csv(csv_path, sep=';').Name
length = len(refs)
for i, ref in enumerate(refs):
sys.stdout.write('\r')
sys.stdout.write('downloading: %s/%s' % (i+1, length))
sys.stdout.flush()
s = search_by_ref(s, ref)
mp3_path = None
if download_dir is not None:
file_name = '%s.mp3' % ref
mp3_path = os.path.join(download_dir, file_name)
result = download_mp3(s, mp3_path, ref)
if result == 1:
return 1
sys.stdout.write('\n')
sys.stdout.flush()
s.driver.close()
def download_all_csv(s, username, passwd, download_dir=None):
s = setup()
d = datetime.datetime.now()
s = login(s, username, passwd)
search_range = set_range(d)
print(search_range[0], search_range[1], search_range[2], search_range[3])
s = search_by_range(s, search_range[0],
search_range[1],
search_range[2],
search_range[3])
s = search_by_language(s, language="_EN")
# s.driver.execute_script("zipItems();")
csvB = s.driver.ensure_element_by_id('csvButton')
if csvB.is_displayed():
print("csvB is visible")
csvB.ensure_click()
else:
print("Not Visible")
yes = s.driver.ensure_element_by_id("button-1006")
# yes = s.driver.ensure_element_by_css_selector("#button-1006")
# yes.ensure_click()
# full_xpath = """//div[@id='messagebox-1001']/div[@id='messagebox-1001-toolbar']/div[@id='messagebox-1001-toolbar-innerCt']/div[@id='messagebox-1001-toolbar-targetEl']/a[@id='button-1006'])"""
# xpath_messagebox = "//div[@id='messagebox-1001']"
# css_sel_messagebox = '.x-css-shadow'
# yes = s.driver.ensure_element_by_css_selector(css_sel_messagebox)
# if yes.is_displayed():
# print("Yes is visible")
# yes.ensure_click()
# else:
# print("Yes button not visible")
# s.driver.ensure_element_by_id('button-1006').ensure_click()
s.driver.close()
# return element
def check_num_results(s):
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'
s.driver.get(url)
result = s.driver.ensure_element_by_id('resultLabelId').get_attribute("innerText")
return result
def login(s, username, passwd):
"""Login to www.prosodie.com with username/passwd pair and return session.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
username -- username on www.prosodie.com (required | type: str);
passwd -- password for username on www.prosodie.com (required |
type: str).
Output:
s -- Requestium session (type: requestium.requestium.Session).
"""
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=LoginHandler'
s.driver.get(url)
s.driver.ensure_element_by_name('mail').send_keys(username)
s.driver.ensure_element_by_name('password').send_keys(passwd)
s.driver.ensure_element_by_name('valider').click()
return s
def search_by_range(s, start_date=None, start_time=None, end_date=None,
end_time=None):
""" Doesn't work correctly. Date seems to work but time not so much.
Search records on www.prosodie.com by date range and return session.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
start_date -- start date (not required | type: str). Format:
'mm:dd:yyyy'. Example: '03-05-1991';
start_time -- start time (not required | type: str). Example:
'12:00 AM';
end_date -- end date (not required | type: str). Format:
'mm:dd:yyyy'. Example: '03-05-1991';
end_time -- end time (not required | type: str). Example: '12:00 PM'.
Output:
s -- Requestium session (type: requestium.requestium.Session).
"""
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'
s.driver.get(url)
if start_date:
s.driver.ensure_element_by_name('dateDebut').send_keys(start_date)
if start_time:
s.driver.ensure_element_by_name('heureDebut').send_keys(start_time)
if end_date:
s.driver.ensure_element_by_name('dateFin').send_keys(end_date)
if end_time:
s.driver.ensure_element_by_name('heureFin').send_keys(end_time)
s.driver.ensure_element_by_id('button-1009').ensure_click()
return s
def search_by_language(s, language=None):
"""
Filter results by language
Options: "_EN" : English, "_ES" : Spanish etc.
"""
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'
s.driver.get(url)
if language:
s.driver.ensure_element_by_name('criteres').send_keys(language)
s.driver.ensure_element_by_id('button-1009').click()
return s
def search_by_ref(s, ref):
"""Search record on www.prosodie.com by ref number and return session.
Input:
s -- Requestium session (required |
type: requestium.requestium.Session);
ref -- ref number (required | type: str). Example: '3905beTOd10339'.
Output:
s -- Requestium session (type: requestium.requestium.Session).
"""
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'
s.driver.get(url)
s.driver.ensure_element_by_name('refEr').send_keys(ref)
s.driver.ensure_element_by_id('button-1009').click()
return s
def messages_per_page(s):
pass
def count_recordings(s):
"""Doesn't Work"""
refs = []
items = s.driver.find_elements_by_css_selector('#gridview-1064-body')
## items = s.driver.ensure_elements_by_xpath('//*[@id="gridview-1064-body"]')
for item in items:
ref_num = s.driver.ensure_element_by_css_selector('#ext-gen1440 > div:nth-child(1)')
refs.append(ref_num)
# refs = s.driver.find_elements_by_xpath('//*[@id="gridview-1064-record-6203746"]')
return refs
def get_num_results(s):
"""Doesn't work. Only returns 'RESULTS : ' and no number"""
text2 = s.driver.ensure_element_by_css_selector('#resultLabelId').text
return text, text2
def get_text(s, element_xpath):
"""Given the xpath returns the web element text"""
text = s.driver.ensure_element_by_xpath(element_xpath)
return text.text
def xpath_generator(common, ending, loop_element, count=10):
"""Given common xpath and ending xpath:
return a generator to loop through that xpath element
"""
for i in range(count):
yield (common + loop_element + '[' + str(i+1)+ ']' + ending)
def loop_through_table(s):
table = s.driver.ensure_element_by_xpath('//*[@id="gridview-1064-body"]')
for row in table.find_elements_by_xpath('.//tr'):
print([td.text for td in row.find_element_by_xpath('//*[contains(@class, x-grid-cell-col)][text()]')])
if __name__ == '__main__':
s = setup()
d = datetime.datetime.now()
s = login(s, username, passwd)
search_range = set_range(d)
print(search_range)
s = search_by_range(s, search_range[0],
search_range[1],
search_range[2],
search_range[3])
time.sleep(0.5)
s = search_by_language(s, language="_EN")
time.sleep(0.5)
table = loop_through_table(s)
## common = '/html/body/div[2]/div[3]/div[2]/div[5]/div/div/span/div/div/div[2]/div/div/div[1]/div[2]/div/table/tbody/'
## body = '//*[@id="gridview-1064-body"]'
## ref_num_path = 'tr[1]/td[3]/div'
## time_path = '/td[4]/div'
## mp3_path = '/td[2]/div/img'
## time_gen = xpath_generator(common, time_path, 'tr')
## mp3_gen = xpath_generator(common, mp3_path, 'tr')
## while True:
## try:
## rec_time = get_text(s, next(time_gen))
## print(rec_time)
## s = download_mp3(s, xpath=next(mp3_gen))
##
## except Exception as e:
## print(f"Exception occured: {e}")
## raise e
## text, text2 = get_num_results(s)
## refs = count_recordings(s)
## download_mp3(s)
s.driver.close()
# element = download_all_csv(s, username, passwd)
# print(element)
# print(type(element))
# download_mp3_by_csv(s, username, passwd, )
# download_mp3_by_ref(s, username, passwd, 'bda551TVd00927',
# r'C:\Users\RSTAUNTO\Desktop\Python\projects\rightcall_robin\lambda_functions\myMP3.mp3')
# download_mp3_by_ref(s, username, passwd, 'b76993TOd10547')
# download_mp3_by_csv(s, username, passwd,
# 'csvs/toget.csv', download_dir='mp3s')
# Example. Download mp3 file from www.prosodie.com by '3905beTOd10339'
# ref number
# download_mp3_by_ref(s, username, passwd, '3905beTOd10339')
# Example. Download mp3 file from www.prosodie.com by '3905beTOd10339'
# ref number as /tmp/example.mp3
# download_mp3_by_ref(s, username, passwd,
# '3905beTOd10339', '/tmp/example.mp3')
# Example. Download mp3 file/files from www.prosodie.com
# page by input csv file
# download_mp3_by_csv(s, username, passwd, 'input.csv')
# Example. Download mp3 file/files from www.prosodie.com page
# by input csv file
# to dedicated directory
# download_mp3_by_csv(s, username, passwd, 'input.csv', download_dir='/tmp')
| 35.389831 | 197 | 0.638273 |
7940831e92f4c1b77f11c637a38a596f09102845 | 1,083 | py | Python | boneik/io.py | cheind/bone-solve-ik | e95ad73e438923c7e63e668b109e8a4734b62a96 | [
"MIT"
] | 1 | 2022-01-08T12:14:03.000Z | 2022-01-08T12:14:03.000Z | boneik/io.py | cheind/bone-solve-ik | e95ad73e438923c7e63e668b109e8a4734b62a96 | [
"MIT"
] | null | null | null | boneik/io.py | cheind/bone-solve-ik | e95ad73e438923c7e63e668b109e8a4734b62a96 | [
"MIT"
] | null | null | null | import json
import numpy as np
from pathlib import Path
from . import kinematics
from . import utils
def load_json(jsonpath: Path) -> kinematics.Body:
jsonpath = Path(jsonpath)
assert jsonpath.is_file()
data = json.load(open(jsonpath, "r"))
b = kinematics.BodyBuilder()
def _convert_interval(i):
if i is None:
return None
else:
return np.deg2rad(i)
for bone in data["bones"]:
dofs = None
if "dofs" in bone:
dofs = {n: _convert_interval(i) for n, i in bone["dofs"].items()}
b.add_bone(
bone["u"],
bone["v"],
tip_to_base=utils.make_tip_to_base(bone["length"], bone["axes"]),
dofs=dofs,
)
order = None
if "order" in data:
order = data["order"]
return b.finalize(order)
if __name__ == "__main__":
from . import draw
import matplotlib.pyplot as plt
body = load_json("etc/body.json")
fig, ax = draw.create_figure3d()
draw.draw_kinematics(ax, body=body, draw_root=True)
plt.show() | 23.543478 | 77 | 0.588181 |
794084ddc3f4bffafd53731c0fb7c04056de43e7 | 15,334 | py | Python | donphan/_insertable.py | bijij/Donphan | fabe2ea77fed0c4921dc370124b338ab8c1420ce | [
"MIT"
] | 17 | 2019-11-09T18:22:31.000Z | 2021-08-24T22:40:03.000Z | donphan/_insertable.py | bijij/Donphan | fabe2ea77fed0c4921dc370124b338ab8c1420ce | [
"MIT"
] | 2 | 2021-05-30T17:03:01.000Z | 2021-10-13T09:51:45.000Z | donphan/_insertable.py | bijij/Donphan | fabe2ea77fed0c4921dc370124b338ab8c1420ce | [
"MIT"
] | 3 | 2020-09-28T16:30:53.000Z | 2022-02-06T03:21:34.000Z | """
MIT License
Copyright (c) 2019-present Josh B
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, ClassVar, Optional, TypeVar, Union, cast, overload
from ._column import Column, SQLType
from ._selectable import Selectable
from .utils import optional_pool, query_builder, resolve_annotation
if TYPE_CHECKING:
from asyncpg import Connection, Record # type: ignore
__all__ = ("Insertable",)
T = TypeVar("T")
class Insertable(Selectable):
if TYPE_CHECKING:
_primary_keys: ClassVar[list[Column]]
@classmethod
def _setup_column(
cls,
name: str,
type: Any,
globals: dict[str, Any],
locals: dict[str, Any],
cache: dict[str, Any],
) -> None:
type = resolve_annotation(type, globals, locals, cache)
if getattr(type, "__origin__", None) is not Column:
raise TypeError("Column typings must be of type Column.")
type = type.__args__[0]
is_array = False
if getattr(type, "__origin__", None) is list:
is_array = True
type = type.__args__[0]
try:
if not issubclass(type, SQLType):
type = SQLType._from_type(list[type] if is_array else type) # type: ignore
elif is_array:
type = SQLType._from_type(list[type.py_type]) # type: ignore
except TypeError:
if getattr(type, "__origin__", None) is not SQLType:
raise TypeError("Column typing generics must be a valid SQLType.")
type = type.__args__[0] # type: ignore
type = SQLType._from_type(list[type] if is_array else type) # type: ignore
if not hasattr(cls, name):
column = Column._with_type(type) # type: ignore
setattr(cls, name, column)
else:
column = getattr(cls, name)
if not isinstance(column, Column):
raise ValueError("Column values must be an instance of Column.")
column._sql_type = type
column.name = name
column.table = cls
cls._columns_dict[name] = column
if column.primary_key:
cls._primary_keys.append(column)
def __init_subclass__(cls, **kwargs: Any) -> None:
cls._primary_keys = []
super().__init_subclass__(**kwargs)
# region: query generation
@classmethod
def _get_primary_keys(
cls,
record: Record,
) -> dict[str, Any]:
return {column.name: record[column.name] for column in cls._primary_keys}
@classmethod
@query_builder
def _build_query_insert(
cls,
columns: Union[Iterable[Union[Column, str]], str],
ignore_on_conflict: bool,
update_on_conflict: Union[Iterable[Union[Column, str]], str],
returning: Union[Iterable[Union[Column, str]], str],
) -> list[str]:
builder = [f"INSERT INTO", cls._name, "("]
if isinstance(columns, str):
builder.append(columns)
else:
for column in columns:
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(",")
builder.pop(-1)
builder.append(") VALUES (")
for i, _ in enumerate(columns, 1):
builder.append(f"${i}")
builder.append(",")
builder.pop(-1)
builder.append(")")
if ignore_on_conflict and update_on_conflict:
raise ValueError("")
elif ignore_on_conflict:
builder.append("ON CONFLICT DO NOTHING")
elif update_on_conflict:
builder.append("ON CONFLICT (")
for column in cls._primary_keys:
builder.append(column.name)
builder.append(",")
builder.pop(-1)
builder.append(") DO UPDATE SET")
if isinstance(update_on_conflict, str):
builder.append(update_on_conflict)
else:
for column in update_on_conflict:
if isinstance(column, Column):
column = column.name
builder.append(f"{column} = EXCLUDED.{column}")
builder.append(",")
builder.pop(-1)
if returning:
builder.append("RETURNING")
if isinstance(returning, str):
builder.append(returning)
else:
for column in returning:
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(",")
builder.pop(-1)
return builder
@classmethod
@query_builder
def _build_query_update(
cls,
where: str,
offset: int,
columns: Union[Iterable[Union[Column, str]], str],
) -> list[str]:
builder = [f"UPDATE", cls._name, "SET"]
if isinstance(columns, str):
columns = [column.strip() for column in columns.split(",")]
for i, column in enumerate(columns, offset):
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(f"= ${i}")
builder.append(",")
builder.pop(-1)
builder.append("WHERE")
builder.append(where)
return builder
@classmethod
@query_builder
def _build_query_delete(
cls,
where: str,
) -> list[str]:
builder = ["DELETE FROM", cls._name]
if where:
builder.append("WHERE")
builder.append(where)
return builder
# endregion
# region: public methods
@overload
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
/,
*,
ignore_on_conflict: bool = ...,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = ...,
returning: Union[Iterable[Union[Column, str]], str] = ...,
**values: Any,
) -> Record:
...
@overload
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
*,
ignore_on_conflict: bool = ...,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = ...,
returning: None = ...,
**values: Any,
) -> None:
...
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
/,
*,
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
returning: Optional[Union[Iterable[Union[Column, str]], str]] = None,
**values: Any,
) -> Optional[Record]:
r"""|coro|
Inserts a new record into the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
ignore_on_conflict: :class:`bool`
Sets whether to ignore errors when inserting, defaults to ``False``.
update_on_conflict: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An Optional list of or string representing columns to update with new data if a conflict occurs.
returning: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An optional list of or string representing columns to return from the inserted record.
\*\*values: Any
The column to value mapping for the record to insert.
Returns
-------
Optional[:class:`asyncpg.Record`]
A record containing information from the inserted record.
"""
columns = cls._get_columns(values)
query = cls._build_query_insert(columns, ignore_on_conflict, update_on_conflict or [], returning or [])
if returning is not None:
return await connection.fetchrow(query, *values.values())
await connection.execute(query, *values.values())
@overload
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: Union[Iterable[Union[Column, str]], str],
*values: Iterable[Any],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
...
@overload
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: None,
*values: dict[str, Any],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
...
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: Optional[Union[Iterable[Union[Column, str]], str]],
*values: Union[Iterable[Any], dict[str, Any]],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
r"""|coro|
Inserts a set of new records into the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
\*values: Dict[:class:`str`, Any]
The column to value mappings for each record to insert.
ignore_on_conflict: :class:`bool`
Sets whether to ignore errors when inserting, defaults to ``False``.
update_on_conflict: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An Optional list of or string representing columns to update with new data if a conflict occurs.
"""
if columns is None:
values = cast(tuple[dict[str, Any], ...], values)
columns = cls._get_columns(values[0])
values = cast(tuple[list[Any]], (list(value.values()) for value in values))
query = cls._build_query_insert(columns, ignore_on_conflict, update_on_conflict or [], [])
await connection.executemany(query, values)
@classmethod
@optional_pool
async def update_where(
cls,
connection: Connection,
/,
where: str,
*values: Any,
**_values: Any,
) -> None:
r"""|coro|
Updates records in the database which match a given WHERE clause.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
where: :class:`str`
An SQL WHERE clause.
\*values: Any
Values to be substituted into the WHERE clause.
\*\*values: Any
The column to value mapping to assign to updated records.
"""
columns = cls._get_columns(_values)
query = cls._build_query_update(where, len(values) + 1, columns)
await connection.execute(query, *values, *_values.values())
@classmethod
@optional_pool
async def update_record(
cls,
connection: Connection,
/,
record: Record,
**values: Any,
) -> None:
r"""|coro|
Updates a record in the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
record: :class:`asyncpg.Record`
The record to update.
\*\*values: Any
The column to value mapping to assign to updated record.
"""
primary_keys = cls._get_primary_keys(record)
where = cls._build_where_clause(primary_keys)
return await cls.update_where(connection, where, *primary_keys.values(), **values)
@classmethod
@optional_pool
async def delete_where(
cls,
connection: Connection,
/,
where: str,
*values: Any,
) -> None:
"""|coro|
Deletes records in the database which match the given WHERE clause.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
where: :class:`str`
An SQL WHERE clause.
*values: Any
Values to be substituted into the WHERE clause.
"""
query = cls._build_query_delete(where)
await connection.execute(query, *values)
@classmethod
@optional_pool
async def delete(
cls,
connection: Connection,
/,
**values: Any,
) -> None:
r"""|coro|
Deletes records in the database which contain the given values.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
\*\*values: Any
The column to value mapping to filter records with.
"""
where = cls._build_where_clause(values)
return await cls.delete_where(connection, where, *filter(lambda v: v is not None, values.values()))
@classmethod
@optional_pool
async def delete_record(
cls,
connection: Connection,
/,
record: Record,
) -> None:
"""|coro|
Deletes a given record from the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
record: :class:`asyncpg.Record`
The record to delete.
"""
primary_keys = cls._get_primary_keys(record)
where = cls._build_where_clause(primary_keys)
return await cls.delete_where(connection, where, *primary_keys.values())
# endregion
| 31.230143 | 111 | 0.594039 |
794085d0032e6a1a732c0ea0bc9c288f99ad3b57 | 2,473 | py | Python | sdks/python/apache_beam/internal/http_client.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | sdks/python/apache_beam/internal/http_client.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 71 | 2018-05-23T22:20:02.000Z | 2019-04-30T15:37:46.000Z | sdks/python/apache_beam/internal/http_client.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions used for creating a common Http client from httplib2.
For internal use only. No backwards compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import re
import httplib2
# This is the number of seconds the library will wait for GCS operations to
# complete.
DEFAULT_HTTP_TIMEOUT_SECONDS = 60
def proxy_info_from_environment_var(proxy_env_var):
"""Reads proxy info from the environment and converts to httplib2.ProxyInfo.
Args:
proxy_env_var: environment variable string to read, http_proxy or
https_proxy (in lower case).
Example: http://myproxy.domain.com:8080
Returns:
httplib2.ProxyInfo constructed from the environment string.
"""
proxy_url = os.environ.get(proxy_env_var)
if not proxy_url:
return None
proxy_protocol = proxy_env_var.lower().split('_')[0]
if not re.match('^https?://', proxy_url, flags=re.IGNORECASE):
logging.warning(
"proxy_info_from_url requires a protocol, which is always "
"http or https.")
proxy_url = proxy_protocol + '://' + proxy_url
return httplib2.proxy_info_from_url(proxy_url, method=proxy_protocol)
def get_new_http():
"""Creates and returns a new httplib2.Http instance.
Returns:
An initialized httplib2.Http instance.
"""
proxy_info = None
for proxy_env_var in ['http_proxy', 'https_proxy']:
if os.environ.get(proxy_env_var):
proxy_info = proxy_info_from_environment_var(proxy_env_var)
break
# Use a non-infinite SSL timeout to avoid hangs during network flakiness.
return httplib2.Http(
proxy_info=proxy_info, timeout=DEFAULT_HTTP_TIMEOUT_SECONDS)
| 33.418919 | 78 | 0.754145 |
794086c00bdb488d58e9465d3bbae4142b8fd280 | 435 | py | Python | src/parser.py | dotfortun/wumpys | 01eac1045ee0844972d01b6d75ee842390c60d26 | [
"MIT"
] | null | null | null | src/parser.py | dotfortun/wumpys | 01eac1045ee0844972d01b6d75ee842390c60d26 | [
"MIT"
] | null | null | null | src/parser.py | dotfortun/wumpys | 01eac1045ee0844972d01b6d75ee842390c60d26 | [
"MIT"
] | null | null | null | import csv
class Parser(object):
"""docstring for Parser."""
def __init__(self, _lang = 'en_us'):
super(Parser, self).__init__()
reader = []
self.strings = {}
with open(_lang + '.csv', 'rb') as localization:
reader = csv.reader(localization)
for row in reader:
self.strings[row[0]] = row[1]
def render(self, _player, _wumpus):
return NotImplemented
| 27.1875 | 56 | 0.570115 |
794086ce36a97fec1f377ce2158aa0d7289e1295 | 21,980 | py | Python | pw_env_setup/py/pw_env_setup/env_setup.py | silvergasp/pigweed | b095218bcd7064ddcc5af5f689ce235fc9e4cc91 | [
"Apache-2.0"
] | null | null | null | pw_env_setup/py/pw_env_setup/env_setup.py | silvergasp/pigweed | b095218bcd7064ddcc5af5f689ce235fc9e4cc91 | [
"Apache-2.0"
] | null | null | null | pw_env_setup/py/pw_env_setup/env_setup.py | silvergasp/pigweed | b095218bcd7064ddcc5af5f689ce235fc9e4cc91 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Environment setup script for Pigweed.
This script installs everything and writes out a file for the user's shell
to source.
For now, this is valid Python 2 and Python 3. Once we switch to running this
with PyOxidizer it can be upgraded to recent Python 3.
"""
from __future__ import print_function
import argparse
import copy
import glob
import inspect
import json
import os
import shutil
import subprocess
import sys
# TODO(pwbug/67): Remove import hacks once the oxidized prebuilt binaries are
# proven stable for first-time bootstrapping. For now, continue to support
# running directly from source without assuming a functioning Python
# environment when running for the first time.
# If we're running oxidized, filesystem-centric import hacks won't work. In that
# case, jump straight to the imports and assume oxidation brought in the deps.
if not getattr(sys, 'oxidized', False):
old_sys_path = copy.deepcopy(sys.path)
filename = None
if hasattr(sys.modules[__name__], '__file__'):
filename = __file__
else:
# Try introspection in environments where __file__ is not populated.
frame = inspect.currentframe()
if frame is not None:
filename = inspect.getfile(frame)
# If none of our strategies worked, we're in a strange runtime environment.
# The imports are almost certainly going to fail.
if filename is None:
raise RuntimeError(
'Unable to locate pw_env_setup module; cannot continue.\n'
'\n'
'Try updating to one of the standard Python implemetations:\n'
' https://www.python.org/downloads/')
sys.path = [
os.path.abspath(os.path.join(filename, os.path.pardir, os.path.pardir))
]
import pw_env_setup # pylint: disable=unused-import
sys.path = old_sys_path
# pylint: disable=wrong-import-position
from pw_env_setup.cipd_setup import update as cipd_update
from pw_env_setup.cipd_setup import wrapper as cipd_wrapper
from pw_env_setup.colors import Color, enable_colors
from pw_env_setup import cargo_setup
from pw_env_setup import environment
from pw_env_setup import spinner
from pw_env_setup import virtualenv_setup
from pw_env_setup import windows_env_start
# TODO(pwbug/67, pwbug/68) switch to shutil.which().
def _which(executable,
pathsep=os.pathsep,
use_pathext=None,
case_sensitive=None):
if use_pathext is None:
use_pathext = (os.name == 'nt')
if case_sensitive is None:
case_sensitive = (os.name != 'nt' and sys.platform != 'darwin')
if not case_sensitive:
executable = executable.lower()
exts = None
if use_pathext:
exts = frozenset(os.environ['PATHEXT'].split(pathsep))
if not case_sensitive:
exts = frozenset(x.lower() for x in exts)
if not exts:
raise ValueError('empty PATHEXT')
paths = os.environ['PATH'].split(pathsep)
for path in paths:
try:
entries = frozenset(os.listdir(path))
if not case_sensitive:
entries = frozenset(x.lower() for x in entries)
except OSError:
continue
if exts:
for ext in exts:
if executable + ext in entries:
return os.path.join(path, executable + ext)
else:
if executable in entries:
return os.path.join(path, executable)
return None
class _Result:
class Status:
DONE = 'done'
SKIPPED = 'skipped'
FAILED = 'failed'
def __init__(self, status, *messages):
self._status = status
self._messages = list(messages)
def ok(self):
return self._status in {_Result.Status.DONE, _Result.Status.SKIPPED}
def status_str(self):
return self._status
def messages(self):
return self._messages
def _process_globs(globs):
unique_globs = []
for pat in globs:
if pat and pat not in unique_globs:
unique_globs.append(pat)
files = []
warnings = []
for pat in unique_globs:
if pat:
matches = glob.glob(pat)
if not matches:
warnings.append(
'warning: pattern "{}" matched 0 files'.format(pat))
files.extend(matches)
if globs and not files:
warnings.append('warning: matched 0 total files')
return files, warnings
def result_func(glob_warnings):
def result(status, *args):
return _Result(status, *([str(x) for x in glob_warnings] + list(args)))
return result
class ConfigFileError(Exception):
pass
# TODO(mohrr) remove disable=useless-object-inheritance once in Python 3.
# pylint: disable=useless-object-inheritance
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class EnvSetup(object):
"""Run environment setup for Pigweed."""
def __init__(self, pw_root, cipd_cache_dir, shell_file, quiet, install_dir,
use_pigweed_defaults, cipd_package_file, virtualenv_root,
virtualenv_requirements, virtualenv_gn_target,
virtualenv_gn_out_dir, cargo_package_file, enable_cargo,
json_file, project_root, config_file):
self._env = environment.Environment()
self._project_root = project_root
self._pw_root = pw_root
self._setup_root = os.path.join(pw_root, 'pw_env_setup', 'py',
'pw_env_setup')
self._cipd_cache_dir = cipd_cache_dir
self._shell_file = shell_file
self._is_windows = os.name == 'nt'
self._quiet = quiet
self._install_dir = install_dir
self._virtualenv_root = (virtualenv_root
or os.path.join(install_dir, 'pigweed-venv'))
if os.path.isfile(shell_file):
os.unlink(shell_file)
if isinstance(self._pw_root, bytes) and bytes != str:
self._pw_root = self._pw_root.decode()
self._cipd_package_file = []
self._virtualenv_requirements = []
self._virtualenv_gn_targets = []
self._cargo_package_file = []
self._enable_cargo = enable_cargo
if config_file:
self._parse_config_file(config_file)
self._json_file = json_file
setup_root = os.path.join(pw_root, 'pw_env_setup', 'py',
'pw_env_setup')
# TODO(pwbug/67, pwbug/68) Investigate pulling these files into an
# oxidized env setup executable instead of referring to them in the
# source tree. Note that this could be error-prone because users expect
# changes to the files in the source tree to affect bootstrap.
if use_pigweed_defaults:
# If updating this section make sure to update
# $PW_ROOT/pw_env_setup/docs.rst as well.
self._cipd_package_file.append(
os.path.join(setup_root, 'cipd_setup', 'pigweed.json'))
self._cipd_package_file.append(
os.path.join(setup_root, 'cipd_setup', 'luci.json'))
# Only set if no other GN target is provided.
if not virtualenv_gn_target:
self._virtualenv_gn_targets.append(
virtualenv_setup.GnTarget(
'{}#pw_env_setup:python.install'.format(pw_root)))
self._cargo_package_file.append(
os.path.join(setup_root, 'cargo_setup', 'packages.txt'))
self._cipd_package_file.extend(cipd_package_file)
self._virtualenv_requirements.extend(virtualenv_requirements)
self._virtualenv_gn_targets.extend(virtualenv_gn_target)
self._virtualenv_gn_out_dir = virtualenv_gn_out_dir
self._cargo_package_file.extend(cargo_package_file)
self._env.set('PW_PROJECT_ROOT', project_root)
self._env.set('PW_ROOT', pw_root)
self._env.set('_PW_ACTUAL_ENVIRONMENT_ROOT', install_dir)
self._env.add_replacement('_PW_ACTUAL_ENVIRONMENT_ROOT', install_dir)
self._env.add_replacement('PW_ROOT', pw_root)
def _parse_config_file(self, config_file):
config = json.load(config_file)
self._cipd_package_file.extend(
os.path.join(self._project_root, x)
for x in config.pop('cipd_package_files', ()))
virtualenv = config.pop('virtualenv', {})
if virtualenv.get('gn_root'):
root = os.path.join(self._project_root, virtualenv.pop('gn_root'))
else:
root = self._project_root
for target in virtualenv.pop('gn_targets', ()):
self._virtualenv_gn_targets.append(
virtualenv_setup.GnTarget('{}#{}'.format(root, target)))
if virtualenv:
raise ConfigFileError(
'unrecognized option in {}: "virtualenv.{}"'.format(
config_file.name, next(iter(virtualenv))))
if config:
raise ConfigFileError('unrecognized option in {}: "{}"'.format(
config_file.name, next(iter(config))))
def _log(self, *args, **kwargs):
# Not using logging module because it's awkward to flush a log handler.
if self._quiet:
return
flush = kwargs.pop('flush', False)
print(*args, **kwargs)
if flush:
sys.stdout.flush()
def setup(self):
"""Runs each of the env_setup steps."""
if os.name == 'nt':
windows_env_start.print_banner(bootstrap=True, no_shell_file=False)
else:
enable_colors()
steps = [
('CIPD package manager', self.cipd),
('Python environment', self.virtualenv),
('Host tools', self.host_tools),
]
# TODO(pwbug/63): Add a Windows version of cargo to CIPD.
if not self._is_windows and self._enable_cargo:
steps.append(("Rust cargo", self.cargo))
if self._is_windows:
steps.append(("Windows scripts", self.win_scripts))
self._log(
Color.bold('Downloading and installing packages into local '
'source directory:\n'))
max_name_len = max(len(name) for name, _ in steps)
self._env.comment('''
This file is automatically generated. DO NOT EDIT!
For details, see $PW_ROOT/pw_env_setup/py/pw_env_setup/env_setup.py and
$PW_ROOT/pw_env_setup/py/pw_env_setup/environment.py.
'''.strip())
if not self._is_windows:
self._env.comment('''
For help debugging errors in this script, uncomment the next line.
set -x
Then use `set +x` to go back to normal.
'''.strip())
self._env.echo(
Color.bold(
'Activating environment (setting environment variables):'))
self._env.echo('')
for name, step in steps:
self._log(' Setting up {name:.<{width}}...'.format(
name=name, width=max_name_len),
end='',
flush=True)
self._env.echo(
' Setting environment variables for {name:.<{width}}...'.
format(name=name, width=max_name_len),
newline=False,
)
spin = spinner.Spinner()
with spin():
result = step(spin)
self._log(result.status_str())
self._env.echo(result.status_str())
for message in result.messages():
sys.stderr.write('{}\n'.format(message))
self._env.echo(message)
if not result.ok():
return -1
self._log('')
self._env.echo('')
self._env.finalize()
self._env.echo(Color.bold('Checking the environment:'))
self._env.echo()
self._env.doctor()
self._env.echo()
self._env.echo(
Color.bold('Environment looks good, you are ready to go!'))
self._env.echo()
with open(self._shell_file, 'w') as outs:
self._env.write(outs)
deactivate = os.path.join(
self._install_dir,
'deactivate{}'.format(os.path.splitext(self._shell_file)[1]))
with open(deactivate, 'w') as outs:
self._env.write_deactivate(outs)
config = {
# Skipping sysname and nodename in os.uname(). nodename could change
# based on the current network. sysname won't change, but is
# redundant because it's contained in release or version, and
# skipping it here simplifies logic.
'uname': ' '.join(getattr(os, 'uname', lambda: ())()[2:]),
'os': os.name,
}
with open(os.path.join(self._install_dir, 'config.json'), 'w') as outs:
outs.write(
json.dumps(config, indent=4, separators=(',', ': ')) + '\n')
if self._json_file is not None:
with open(self._json_file, 'w') as outs:
self._env.json(outs)
return 0
def cipd(self, spin):
install_dir = os.path.join(self._install_dir, 'cipd')
try:
cipd_client = cipd_wrapper.init(install_dir, silent=True)
except cipd_wrapper.UnsupportedPlatform as exc:
return result_func((' {!r}'.format(exc), ))(
_Result.Status.SKIPPED,
' abandoning CIPD setup',
)
package_files, glob_warnings = _process_globs(self._cipd_package_file)
result = result_func(glob_warnings)
if not package_files:
return result(_Result.Status.SKIPPED)
if not cipd_update.update(cipd=cipd_client,
root_install_dir=install_dir,
package_files=package_files,
cache_dir=self._cipd_cache_dir,
env_vars=self._env,
spin=spin):
return result(_Result.Status.FAILED)
return result(_Result.Status.DONE)
def virtualenv(self, unused_spin):
"""Setup virtualenv."""
requirements, req_glob_warnings = _process_globs(
self._virtualenv_requirements)
result = result_func(req_glob_warnings)
orig_python3 = _which('python3')
with self._env():
new_python3 = _which('python3')
# There is an issue with the virtualenv module on Windows where it
# expects sys.executable to be called "python.exe" or it fails to
# properly execute. If we installed Python 3 in the CIPD step we need
# to address this. Detect if we did so and if so create a copy of
# python3.exe called python.exe so that virtualenv works.
if orig_python3 != new_python3 and self._is_windows:
python3_copy = os.path.join(os.path.dirname(new_python3),
'python.exe')
if not os.path.exists(python3_copy):
shutil.copyfile(new_python3, python3_copy)
new_python3 = python3_copy
if not requirements and not self._virtualenv_gn_targets:
return result(_Result.Status.SKIPPED)
if not virtualenv_setup.install(
project_root=self._project_root,
venv_path=self._virtualenv_root,
requirements=requirements,
gn_targets=self._virtualenv_gn_targets,
gn_out_dir=self._virtualenv_gn_out_dir,
python=new_python3,
env=self._env,
):
return result(_Result.Status.FAILED)
return result(_Result.Status.DONE)
def host_tools(self, unused_spin):
# The host tools are grabbed from CIPD, at least initially. If the
# user has a current host build, that build will be used instead.
# TODO(mohrr) find a way to do stuff like this for all projects.
host_dir = os.path.join(self._pw_root, 'out', 'host')
self._env.prepend('PATH', os.path.join(host_dir, 'host_tools'))
return _Result(_Result.Status.DONE)
def win_scripts(self, unused_spin):
# These scripts act as a compatibility layer for windows.
env_setup_dir = os.path.join(self._pw_root, 'pw_env_setup')
self._env.prepend('PATH', os.path.join(env_setup_dir,
'windows_scripts'))
return _Result(_Result.Status.DONE)
def cargo(self, unused_spin):
install_dir = os.path.join(self._install_dir, 'cargo')
package_files, glob_warnings = _process_globs(self._cargo_package_file)
result = result_func(glob_warnings)
if not package_files:
return result(_Result.Status.SKIPPED)
if not cargo_setup.install(install_dir=install_dir,
package_files=package_files,
env=self._env):
return result(_Result.Status.FAILED)
return result(_Result.Status.DONE)
def parse(argv=None):
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
pw_root = os.environ.get('PW_ROOT', None)
if not pw_root:
try:
with open(os.devnull, 'w') as outs:
pw_root = subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'],
stderr=outs).strip()
except subprocess.CalledProcessError:
pw_root = None
parser.add_argument(
'--pw-root',
default=pw_root,
required=not pw_root,
)
project_root = os.environ.get('PW_PROJECT_ROOT', None) or pw_root
parser.add_argument(
'--project-root',
default=project_root,
required=not project_root,
)
parser.add_argument(
'--cipd-cache-dir',
default=os.environ.get('CIPD_CACHE_DIR',
os.path.expanduser('~/.cipd-cache-dir')),
)
parser.add_argument(
'--shell-file',
help='Where to write the file for shells to source.',
required=True,
)
parser.add_argument(
'--quiet',
help='Reduce output.',
action='store_true',
default='PW_ENVSETUP_QUIET' in os.environ,
)
parser.add_argument(
'--install-dir',
help='Location to install environment.',
required=True,
)
parser.add_argument(
'--config-file',
help='JSON file describing CIPD and virtualenv requirements.',
type=argparse.FileType('r'),
)
parser.add_argument(
'--use-pigweed-defaults',
help='Use Pigweed default values in addition to the given environment '
'variables.',
action='store_true',
)
parser.add_argument(
'--cipd-package-file',
help='CIPD package file. JSON file consisting of a list of dicts with '
'"path" and "tags" keys, where "tags" a list of str.',
default=[],
action='append',
)
parser.add_argument(
'--virtualenv-requirements',
help='Pip requirements file. Compiled with pip-compile.',
default=[],
action='append',
)
parser.add_argument(
'--virtualenv-gn-target',
help=('GN targets that build and install Python packages. Format: '
'path/to/gn_root#target'),
default=[],
action='append',
type=virtualenv_setup.GnTarget,
)
parser.add_argument(
'--virtualenv-gn-out-dir',
help=('Output directory to use when building and installing Python '
'packages with GN; defaults to a unique path in the environment '
'directory.'))
parser.add_argument(
'--virtualenv-root',
help=('Root of virtualenv directory. Default: '
'<install_dir>/pigweed-venv'),
default=None,
)
parser.add_argument(
'--cargo-package-file',
help='Rust cargo packages to install. Lines with package name and '
'version separated by a space.',
default=[],
action='append',
)
parser.add_argument(
'--enable-cargo',
help='Enable cargo installation.',
action='store_true',
)
parser.add_argument(
'--json-file',
help='Dump environment variable operations to a JSON file.',
default=None,
)
args = parser.parse_args(argv)
others = (
'use_pigweed_defaults',
'cipd_package_file',
'virtualenv_requirements',
'virtualenv_gn_target',
'cargo_package_file',
)
one_required = others + ('config_file', )
if not any(getattr(args, x) for x in one_required):
parser.error('At least one of ({}) is required'.format(', '.join(
'"--{}"'.format(x.replace('_', '-')) for x in one_required)))
if args.config_file and any(getattr(args, x) for x in others):
parser.error('Cannot combine --config-file with any of {}'.format(
', '.join('"--{}"'.format(x.replace('_', '-'))
for x in one_required)))
return args
def main():
try:
return EnvSetup(**vars(parse())).setup()
except subprocess.CalledProcessError as err:
print()
print(err.output)
raise
if __name__ == '__main__':
sys.exit(main())
| 33.660031 | 80 | 0.608098 |
794087a7086892c12de4f1ca32b5366b45b9d017 | 2,006 | py | Python | scripts/generate_README.py | nmningmei/melody_embedding | e82b4c897b421e61a1909e5b4f44523e3cb7aa61 | [
"MIT"
] | null | null | null | scripts/generate_README.py | nmningmei/melody_embedding | e82b4c897b421e61a1909e5b4f44523e3cb7aa61 | [
"MIT"
] | null | null | null | scripts/generate_README.py | nmningmei/melody_embedding | e82b4c897b421e61a1909e5b4f44523e3cb7aa61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 10:40:48 2019
@author: nmei
"""
import os
import pandas as pd
import torch
from torchvision import models
from torchvision.transforms import functional as TF
from torch import optim
from torch.utils.data import Dataset,DataLoader
from torch.autograd import Variable
from torch import nn
from models import CNN_path,RNN_path
if __name__ == '__main__':
experiment = 'train_one_by_one'
weight_dir = '../weights/{}'.format(experiment)
working_dir = '../results/{}'.format(experiment)
results = pd.read_csv(os.path.join(working_dir,'scores.csv'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
learning_rate = 1e-4
n_epochs = 1000
batch_size = 5
beginning = """
# melody_embedding
## Self-supervised Learning
[insert text]
## Goals:
- [ ] Train the 2 pathways to extract informative embeddings
- [ ] Examine the embeddings via decoding
## Convolutional Neural Network -- re-train mobileNetV2:
```
{CNN_mobilenet}
```
## Convolutional Recurrent Neural Network:
```
{CRNN}
```
"""
computer_vision_net = CNN_path(device = device)
sound_net = RNN_path(device = device,batch_size = batch_size)
beginning = beginning.format(**{'best_score': results['distance'].min(),
"CNN_mobilenet":computer_vision_net.forward,
"CRNN":sound_net.forward})
computer_vision_net.load_state_dict(torch.load(os.path.join(weight_dir,'CNN_path.pth')))
sound_net.load_state_dict( torch.load(os.path.join(weight_dir,'RNN_path.pth')))
if os.path.exists('../README.md'):
os.remove('../README.md')
with open('../README.md','w') as f:
f.close()
with open('../README.md', 'w') as f:
f.write(beginning)
f.close()
| 27.108108 | 92 | 0.61665 |
79408870623797bbab98ea5420ab6c0a60facbeb | 12,690 | py | Python | setup.py | hwangdeyu/onnxruntime | 989fe2498f4dd0beaf1b4324bf74a39abaf86a29 | [
"MIT"
] | null | null | null | setup.py | hwangdeyu/onnxruntime | 989fe2498f4dd0beaf1b4324bf74a39abaf86a29 | [
"MIT"
] | null | null | null | setup.py | hwangdeyu/onnxruntime | 989fe2498f4dd0beaf1b4324bf74a39abaf86a29 | [
"MIT"
] | 1 | 2020-11-09T07:51:33.000Z | 2020-11-09T07:51:33.000Z | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from setuptools import setup, find_packages, Extension
from distutils import log as logger
from distutils.command.build_ext import build_ext as _build_ext
from glob import glob
from os import path, getcwd, environ, remove, walk, makedirs, listdir
from shutil import copyfile, copytree, rmtree
import platform
import subprocess
import sys
import datetime
nightly_build = False
featurizers_build = False
package_name = 'onnxruntime'
wheel_name_suffix = None
# Any combination of the following arguments can be applied
if '--use_featurizers' in sys.argv:
featurizers_build = True
sys.argv.remove('--use_featurizers')
if '--nightly_build' in sys.argv:
package_name = 'ort-nightly'
nightly_build = True
sys.argv.remove('--nightly_build')
for arg in sys.argv[1:]:
if arg.startswith("--wheel_name_suffix="):
wheel_name_suffix = arg[len("--wheel_name_suffix="):]
nightly_build = True
sys.argv.remove(arg)
break
# The following arguments are mutually exclusive
if '--use_tensorrt' in sys.argv:
package_name = 'onnxruntime-gpu-tensorrt'
sys.argv.remove('--use_tensorrt')
if '--nightly_build' in sys.argv:
package_name = 'ort-trt-nightly'
nightly_build = True
sys.argv.remove('--nightly_build')
elif '--use_cuda' in sys.argv:
package_name = 'onnxruntime-gpu'
sys.argv.remove('--use_cuda')
if '--nightly_build' in sys.argv:
package_name = 'ort-gpu-nightly'
nightly_build = True
sys.argv.remove('--nightly_build')
elif '--use_ngraph' in sys.argv:
package_name = 'onnxruntime-ngraph'
sys.argv.remove('--use_ngraph')
elif '--use_openvino' in sys.argv:
package_name = 'onnxruntime-openvino'
sys.argv.remove('--use_openvino')
elif '--use_dnnl' in sys.argv:
package_name = 'onnxruntime-dnnl'
sys.argv.remove('--use_dnnl')
elif '--use_nuphar' in sys.argv:
package_name = 'onnxruntime-nuphar'
sys.argv.remove('--use_nuphar')
elif '--use_vitisai' in sys.argv:
package_name = 'onnxruntime-vitisai'
sys.argv.remove('--use_vitisai')
# --use_acl is specified in build.py, but not parsed here
# PEP 513 defined manylinux1_x86_64 and manylinux1_i686
# PEP 571 defined manylinux2010_x86_64 and manylinux2010_i686
# PEP 599 defines the following platform tags:
# manylinux2014_x86_64
# manylinux2014_i686
# manylinux2014_aarch64
# manylinux2014_armv7l
# manylinux2014_ppc64
# manylinux2014_ppc64le
# manylinux2014_s390x
manylinux_tags = [
'manylinux1_x86_64',
'manylinux1_i686',
'manylinux2010_x86_64',
'manylinux2010_i686',
'manylinux2014_x86_64',
'manylinux2014_i686',
'manylinux2014_aarch64',
'manylinux2014_armv7l',
'manylinux2014_ppc64',
'manylinux2014_ppc64le',
'manylinux2014_s390x',
]
is_manylinux = environ.get('AUDITWHEEL_PLAT', None) in manylinux_tags
class build_ext(_build_ext):
def build_extension(self, ext):
dest_file = self.get_ext_fullpath(ext.name)
logger.info('copying %s -> %s', ext.sources[0], dest_file)
copyfile(ext.sources[0], dest_file)
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
if not is_manylinux:
self.root_is_pure = False
def _rewrite_ld_preload(self, to_preload):
with open('onnxruntime/capi/_ld_preload.py', 'rt') as f:
ld_preload = f.read().splitlines()
with open('onnxruntime/capi/_ld_preload.py', 'wt') as f:
for line in ld_preload:
f.write(line)
f.write('\n')
if 'LD_PRELOAD_BEGIN_MARK' in line:
break
if len(to_preload) > 0:
f.write('from ctypes import CDLL, RTLD_GLOBAL\n')
for library in to_preload:
f.write('_{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split('.')[0], library))
def run(self):
if is_manylinux:
source = 'onnxruntime/capi/onnxruntime_pybind11_state.so'
dest = 'onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so'
logger.info('copying %s -> %s', source, dest)
copyfile(source, dest)
result = subprocess.run(['patchelf', '--print-needed', dest], check=True, stdout=subprocess.PIPE, universal_newlines=True)
cuda_dependencies = ['libcublas.so', 'libcudnn.so', 'libcudart.so', 'libcurand.so', 'libcufft.so']
to_preload = []
args = ['patchelf', '--debug']
for line in result.stdout.split('\n'):
for dependency in cuda_dependencies:
if dependency in line:
to_preload.append(line)
args.extend(['--remove-needed', line])
args.append(dest)
if len(to_preload) > 0:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
self._rewrite_ld_preload(to_preload)
_bdist_wheel.run(self)
if is_manylinux:
file = glob(path.join(self.dist_dir, '*linux*.whl'))[0]
logger.info('repairing %s for manylinux1', file)
try:
subprocess.run(['auditwheel', 'repair', '-w', self.dist_dir, file], check=True, stdout=subprocess.PIPE)
finally:
logger.info('removing %s', file)
remove(file)
except ImportError as error:
print("Error importing dependencies:")
print(error)
bdist_wheel = None
# Additional binaries
if platform.system() == 'Linux':
libs = ['onnxruntime_pybind11_state.so', 'libdnnl.so.1', 'libmklml_intel.so', 'libiomp5.so', 'mimalloc.so']
# dnnl EP is built as shared lib
libs.extend(['libonnxruntime_providers_dnnl.so'])
# nGraph Libs
libs.extend(['libngraph.so', 'libcodegen.so', 'libcpu_backend.so', 'libmkldnn.so', 'libtbb_debug.so', 'libtbb_debug.so.2', 'libtbb.so', 'libtbb.so.2'])
# OpenVINO Libs
if package_name == 'onnxruntime-openvino':
if platform.system() == 'Linux':
libs.extend(['libovep_ngraph.so'])
# Nuphar Libs
libs.extend(['libtvm.so.0.5.1'])
if nightly_build:
libs.extend(['libonnxruntime_pywrapper.so'])
elif platform.system() == "Darwin":
libs = ['onnxruntime_pybind11_state.so', 'libdnnl.1.dylib', 'mimalloc.so'] # TODO add libmklml and libiomp5 later.
# dnnl EP is built as shared lib
libs.extend(['libonnxruntime_providers_dnnl.dylib'])
if nightly_build:
libs.extend(['libonnxruntime_pywrapper.dylib'])
else:
libs = ['onnxruntime_pybind11_state.pyd', 'dnnl.dll', 'mklml.dll', 'libiomp5md.dll']
# dnnl EP is built as dll
libs.extend(['onnxruntime_providers_dnnl.dll'])
libs.extend(['ngraph.dll', 'cpu_backend.dll', 'tbb.dll', 'mimalloc-override.dll', 'mimalloc-redirect.dll', 'mimalloc-redirect32.dll'])
# Nuphar Libs
libs.extend(['tvm.dll'])
if nightly_build:
libs.extend(['onnxruntime_pywrapper.dll'])
if is_manylinux:
data = ['capi/libonnxruntime_pywrapper.so'] if nightly_build else []
ext_modules = [
Extension(
'onnxruntime.capi.onnxruntime_pybind11_state',
['onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so'],
),
]
else:
data = [path.join('capi', x) for x in libs if path.isfile(path.join('onnxruntime', 'capi', x))]
ext_modules = []
# Additional examples
examples_names = ["mul_1.onnx", "logreg_iris.onnx", "sigmoid.onnx"]
examples = [path.join('datasets', x) for x in examples_names]
# Extra files such as EULA and ThirdPartyNotices
extra = ["LICENSE", "ThirdPartyNotices.txt", "Privacy.md"]
# Description
README = path.join(getcwd(), "docs/python/README.rst")
if not path.exists(README):
this = path.dirname(__file__)
README = path.join(this, "docs/python/README.rst")
if not path.exists(README):
raise FileNotFoundError("Unable to find 'README.rst'")
with open(README) as f:
long_description = f.read()
packages = [
'onnxruntime',
'onnxruntime.backend',
'onnxruntime.capi',
'onnxruntime.capi.training',
'onnxruntime.datasets',
'onnxruntime.tools',
]
package_data = {}
data_files = []
if package_name == 'onnxruntime-nuphar':
packages += ["onnxruntime.nuphar"]
extra += [path.join('nuphar', 'NUPHAR_CACHE_VERSION')]
if featurizers_build:
# Copy the featurizer data from its current directory into the onnx runtime directory so that the
# content can be included as module data.
# Apparently, the root_dir is different based on how the script is invoked
source_root_dir = None
dest_root_dir = None
for potential_source_prefix, potential_dest_prefix in [
(getcwd(), getcwd()),
(path.dirname(__file__), path.dirname(__file__)),
(path.join(getcwd(), ".."), getcwd()),
]:
potential_dir = path.join(potential_source_prefix, "external", "FeaturizersLibrary", "Data")
if path.isdir(potential_dir):
source_root_dir = potential_source_prefix
dest_root_dir = potential_dest_prefix
break
if source_root_dir is None:
raise Exception("Unable to find the build root dir")
assert dest_root_dir is not None
featurizer_source_dir = path.join(source_root_dir, "external", "FeaturizersLibrary", "Data")
assert path.isdir(featurizer_source_dir), featurizer_source_dir
featurizer_dest_dir = path.join(dest_root_dir, "onnxruntime", "FeaturizersLibrary", "Data")
if path.isdir(featurizer_dest_dir):
rmtree(featurizer_dest_dir)
for item in listdir(featurizer_source_dir):
this_featurizer_source_fullpath = path.join(featurizer_source_dir)
assert path.isdir(this_featurizer_source_fullpath), this_featurizer_source_fullpath
copytree(this_featurizer_source_fullpath, featurizer_dest_dir)
packages.append("onnxruntime.FeaturizersLibrary.Data.{}".format(item))
package_data[packages[-1]] = listdir(path.join(featurizer_dest_dir, item))
package_data["onnxruntime"] = data + examples + extra
version_number = ''
with open('VERSION_NUMBER') as f:
version_number = f.readline().strip()
if nightly_build:
#https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables
build_suffix = environ.get('BUILD_BUILDNUMBER')
if build_suffix is None:
#The following line is only for local testing
build_suffix = str(datetime.datetime.now().date().strftime("%Y%m%d"))
else:
build_suffix = build_suffix.replace('.','')
version_number = version_number + ".dev" + build_suffix
if wheel_name_suffix:
package_name = "{}_{}".format(package_name, wheel_name_suffix)
cmd_classes = {}
if bdist_wheel is not None :
cmd_classes['bdist_wheel'] = bdist_wheel
cmd_classes['build_ext'] = build_ext
requirements_path = path.join(getcwd(), "requirements.txt")
if not path.exists(requirements_path):
this = path.dirname(__file__)
requirements_path = path.join(this, "requirements.txt")
if not path.exists(requirements_path):
raise FileNotFoundError("Unable to find 'requirements.txt'")
with open(requirements_path) as f:
install_requires = f.read().splitlines()
# Setup
setup(
name=package_name,
version=version_number,
description='ONNX Runtime Python bindings',
long_description=long_description,
author='Microsoft Corporation',
author_email='[email protected]',
cmdclass=cmd_classes,
license="MIT License",
packages=packages,
ext_modules=ext_modules,
package_data=package_data,
data_files=data_files,
install_requires=install_requires,
entry_points= {
'console_scripts': [
'onnxruntime_test = onnxruntime.tools.onnxruntime_test:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'],
)
| 36.889535 | 153 | 0.660126 |
794089c7037c1c328f01dc0db6fa6994316830a5 | 6,449 | py | Python | 3d-tracking/lib/utils/net_utils.py | sadjadasghari/3d-vehicle-tracking | af05d52be81db32fc6a21bf60a757ebc46557998 | [
"BSD-3-Clause"
] | 2 | 2019-10-11T02:19:58.000Z | 2019-10-11T02:20:22.000Z | 3d-tracking/lib/utils/net_utils.py | reinforcementdriving/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 | [
"BSD-3-Clause"
] | null | null | null | 3d-tracking/lib/utils/net_utils.py | reinforcementdriving/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 | [
"BSD-3-Clause"
] | null | null | null | import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.roi_crop.functions.roi_crop import RoICropFunction
from model.utils.config import cfg
from torch.autograd import Variable
def save_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
if p.requires_grad:
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = np.sqrt(totalnorm)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad:
p.grad.mul_(norm)
def vis_detections(im, class_name, dets, thresh=0.8):
"""Visual debugging of detections."""
for i in range(np.minimum(10, dets.shape[0])):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
score = dets[i, -1]
if score > thresh:
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score),
(bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im
def adjust_learning_rate(optimizer, decay=0.1):
"""Sets the learning rate to the initial LR decayed by 0.5 every 20
epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
def save_checkpoint(state, filename):
torch.save(state, filename)
def _smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights,
bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = torch.abs(in_box_diff)
smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float()
in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = out_loss_box
for i in sorted(dim, reverse=True):
loss_box = loss_box.sum(i)
loss_box = loss_box.mean()
return loss_box
def _crop_pool_layer(bottom, rois, max_pool=True):
# code modified from
# https://github.com/ruotianluo/pytorch-faster-rcnn
# implement it using stn
# box to affine
# input (x1,y1,x2,y2)
"""
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
"""
rois = rois.detach()
batch_size = bottom.size(0)
D = bottom.size(1)
H = bottom.size(2)
W = bottom.size(3)
roi_per_batch = rois.size(0) / batch_size
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = bottom.size(2)
width = bottom.size(3)
# affine theta
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
if max_pool:
pre_pool_size = cfg.POOLING_SIZE * 2
grid = F.affine_grid(theta, torch.Size(
(rois.size(0), 1, pre_pool_size, pre_pool_size)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(
roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
crops = F.max_pool2d(crops, 2, 2)
else:
grid = F.affine_grid(theta, torch.Size(
(rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(
roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
return crops, grid
def _affine_grid_gen(rois, input_size, grid_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
grid = F.affine_grid(theta,
torch.Size((rois.size(0), 1, grid_size, grid_size)))
return grid
def _affine_theta(rois, input_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
# theta = torch.cat([\
# (x2 - x1) / (width - 1),
# zero,
# (x1 + x2 - width + 1) / (width - 1),
# zero,
# (y2 - y1) / (height - 1),
# (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
theta = torch.cat([ \
(y2 - y1) / (height - 1),
zero,
(y1 + y2 - height + 1) / (height - 1),
zero,
(x2 - x1) / (width - 1),
(x1 + x2 - width + 1) / (width - 1)], 1).view(-1, 2, 3)
return theta
| 30.709524 | 78 | 0.540394 |
79408a451c0657a529f04c32b42c3f4958c0bb35 | 7,031 | py | Python | BSSN/BSSN_stress_energy_source_terms.py | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | 1 | 2021-12-13T05:51:18.000Z | 2021-12-13T05:51:18.000Z | BSSN/BSSN_stress_energy_source_terms.py | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | null | null | null | BSSN/BSSN_stress_energy_source_terms.py | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | null | null | null | # As documented in the NRPy+ tutorial module
# Tutorial-BSSN_stress_energy_source_terms.ipynb,
# this module will construct expressions for
# BSSN stress-energy source terms, in terms of
# elements of T^{mu nu}.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import BSSN.ADMBSSN_tofrom_4metric as AB4m # NRPy+: ADM/BSSN <-> 4-metric conversions
import sys # Standard Python modules for multiplatform OS-level functions
thismodule = __name__
# Define BSSN source terms in terms of T^{mu nu} or T_{mu nu}
def stress_energy_source_terms_ito_T4UU_and_ADM_or_BSSN_metricvars(inputvars,custom_T4UU=None):
# Step 1: Check if rfm.reference_metric() already called. If not, BSSN
# quantities are not yet defined, so cannot proceed!
if rfm.have_already_called_reference_metric_function == False:
print("BSSN_source_terms_ito_T4UU(): Must call reference_metric() first!")
sys.exit(1)
# Step 2.a: Define gamma4DD[mu][nu] = g_{mu nu} + n_{mu} n_{nu}
alpha = sp.symbols("alpha", real=True)
zero = sp.sympify(0)
n4D = [sp.sympify(-1)*alpha, zero, zero, zero]
AB4m.g4DD_ito_BSSN_or_ADM(inputvars)
gamma4DD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
gamma4DD[mu][nu] = AB4m.g4DD[mu][nu] + n4D[mu] * n4D[nu]
# Step 2.b: If expression for components of T4UU not given, declare T4UU here
if custom_T4UU is None: # Use "is None" instead of "==None", as the former is more correct.
T4UU = ixp.declarerank2("T4UU","sym01",DIM=4)
else:
T4UU = custom_T4UU
# Step 2.c: Define BSSN source terms
global SDD,SD,S,rho
# Step 2.c.i: S_{ij} = gamma_{i mu} gamma_{j nu} T^{mu nu}
SDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
for mu in range(4):
for nu in range(4):
SDD[i][j] += gamma4DD[i+1][mu] * gamma4DD[j+1][nu] * T4UU[mu][nu]
# Step 2.c.ii: S_{i} = -gamma_{i mu} n_{nu} T^{mu nu}
SD = ixp.zerorank1()
for i in range(3):
for mu in range(4):
for nu in range(4):
SD[i] += - gamma4DD[i+1][mu] * n4D[nu] * T4UU[mu][nu]
# Step 2.c.iii: S = gamma^{ij} S_{ij}
if inputvars == "ADM":
gammaDD = ixp.declarerank2("gammaDD", "sym01")
gammaUU, dummydet = ixp.symm_matrix_inverter3x3(gammaDD) # Set gammaUU
elif inputvars == "BSSN":
import BSSN.ADM_in_terms_of_BSSN as AitoB # NRPy+: ADM quantities in terms of BSSN quantities
AitoB.ADM_in_terms_of_BSSN()
gammaUU = AitoB.gammaUU
S = zero
for i in range(3):
for j in range(3):
S += gammaUU[i][j] * SDD[i][j]
# Step 2.c.iv: rho = n_{mu} n_{nu} T^{mu nu}
rho = zero
for mu in range(4):
for nu in range(4):
rho += n4D[mu] * n4D[nu] * T4UU[mu][nu]
return SDD,SD,S,rho
# Step 3: Add BSSN stress-energy source terms to BSSN RHSs
def BSSN_source_terms_for_BSSN_RHSs(custom_T4UU=None):
global sourceterm_trK_rhs, sourceterm_a_rhsDD, sourceterm_lambda_rhsU, sourceterm_Lambdabar_rhsU
# Step 3.a: Call BSSN_source_terms_ito_T4UU to get SDD, SD, S, & rho
if custom_T4UU == "unrescaled BSSN source terms already given":
SDD = ixp.declarerank2("SDD", "sym01")
SD = ixp.declarerank1("SD")
S = sp.symbols("S", real=True)
rho = sp.symbols("rho", real=True)
else:
SDD,SD,S,rho = stress_energy_source_terms_ito_T4UU_and_ADM_or_BSSN_metricvars("BSSN", custom_T4UU)
PI = par.Cparameters("REAL", thismodule, ["PI"], "3.14159265358979323846264338327950288")
alpha = sp.symbols("alpha", real=True)
# Step 3.b: trK_rhs
sourceterm_trK_rhs = 4 * PI * alpha * (rho + S)
# Step 3.c: Abar_rhsDD:
# Step 3.c.i: Compute trace-free part of S_{ij}:
import BSSN.BSSN_quantities as Bq
Bq.BSSN_basic_tensors() # Sets gammabarDD
gammabarUU, dummydet = ixp.symm_matrix_inverter3x3(Bq.gammabarDD) # Set gammabarUU
tracefree_SDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
tracefree_SDD[i][j] = SDD[i][j]
for i in range(3):
for j in range(3):
for k in range(3):
for m in range(3):
tracefree_SDD[i][j] += -sp.Rational(1, 3) * Bq.gammabarDD[i][j] * gammabarUU[k][m] * SDD[k][m]
# Step 3.c.ii: Define exp_m4phi = e^{-4 phi}
Bq.phi_and_derivs()
# Step 3.c.iii: Evaluate stress-energy part of AbarDD's RHS
sourceterm_a_rhsDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
Abar_rhsDDij = -8 * PI * alpha * Bq.exp_m4phi * tracefree_SDD[i][j]
sourceterm_a_rhsDD[i][j] = Abar_rhsDDij / rfm.ReDD[i][j]
# Step 3.d: Stress-energy part of Lambdabar_rhsU = stressenergy_Lambdabar_rhsU
sourceterm_Lambdabar_rhsU = ixp.zerorank1()
for i in range(3):
for j in range(3):
sourceterm_Lambdabar_rhsU[i] += -16 * PI * alpha * gammabarUU[i][j] * SD[j]
sourceterm_lambda_rhsU = ixp.zerorank1()
for i in range(3):
sourceterm_lambda_rhsU[i] = sourceterm_Lambdabar_rhsU[i] / rfm.ReU[i]
# Step 4: Add BSSN stress-energy source terms to BSSN constraints
def BSSN_source_terms_for_BSSN_constraints(custom_T4UU=None):
global sourceterm_H, sourceterm_MU
# Step 4.a: Call BSSN_source_terms_ito_T4UU to get SDD, SD, S, & rho
if custom_T4UU == "unrescaled BSSN source terms already given":
# SDD and S unused, so we ignore their return values from ixp.declarerankN() below
ixp.declarerank2("SDD", "sym01")
SD = ixp.declarerank1("SD")
sp.symbols("S", real=True)
rho = sp.symbols("rho", real=True)
else:
_SDD,SD,_S,rho = stress_energy_source_terms_ito_T4UU_and_ADM_or_BSSN_metricvars("BSSN", custom_T4UU) #_SDD,_S unused.
PI = par.Cparameters("REAL", thismodule, ["PI"], "3.14159265358979323846264338327950288")
# Step 4.b: Add source term to the Hamiltonian constraint H
sourceterm_H = -16 * PI * rho
# Step 4.c: Add source term to the momentum constraint M^i
# Step 4.c.i: Compute gammaUU in terms of BSSN quantities
import BSSN.ADM_in_terms_of_BSSN as AitoB
AitoB.ADM_in_terms_of_BSSN() # Provides gammaUU
# Step 4.c.ii: Raise S_i
SU = ixp.zerorank1()
for i in range(3):
for j in range(3):
SU[i] += AitoB.gammaUU[i][j] * SD[j]
# Step 4.c.iii: Add source term to momentum constraint & rescale:
sourceterm_MU = ixp.zerorank1()
for i in range(3):
sourceterm_MU[i] = -8 * PI * SU[i] / rfm.ReU[i]
| 43.401235 | 125 | 0.637747 |
79408bc3c9a32ddcd3c5c6049cd695e96e409be8 | 4,764 | py | Python | model.py | rashidhaffadi/EGT | 827eab656548ba47a07cfe21c1e0f4335c74f52a | [
"MIT"
] | null | null | null | model.py | rashidhaffadi/EGT | 827eab656548ba47a07cfe21c1e0f4335c74f52a | [
"MIT"
] | null | null | null | model.py | rashidhaffadi/EGT | 827eab656548ba47a07cfe21c1e0f4335c74f52a | [
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.functional import F
def load_model(output_size, num_of_ngrams=200000,pretrained=False, path="/", name="checkpoint1.state_dict"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SequenceModel().to(device).eval()
if pretrained:
state_dict = torch.load(path + name, map_location=device)
model.load_state_dict(state_dict)
model.chars_embedding = nn.Embedding(num_embeddings=num_of_ngrams, padding_idx=0, embedding_dim=embedding_dim)
return model
class GeneralReLU(nn.Module):
"""docstring for GeneralReLU"""
def __init__(self, leak=None, sub=None, maxv=None):
super(GeneralReLU, self).__init__()
self.leak, self.sub, self.maxv = leak, sub, maxv
def forward(self, x):
x = F.leaky_relu(x, self.leak) if self.leak is not None else F.relu(x)
if self.sub is not None: x.sub_(self.sub)
if self.maxv is not None: x.clamp_max_(self.maxv)
return x
class AdaptiveConcatPool2d(nn.Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`."
def __init__(self, sz=1):
"Output will be 2*sz"
super(AdaptiveConcatPool2d, self).__init__()
self.output_size = sz
self.ap = nn.AdaptiveAvgPool2d(self.output_size)
self.mp = nn.AdaptiveMaxPool2d(self.output_size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
def conv_layer(ni, nf, ks, stride=2, padding=0, leak=None, sub=None, maxv=None, bn=False, dp=None):
conv = nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=padding)
layers = [conv]
if bn is not None: layers.append(nn.BatchNorm2d(nf))
if dp is not None: layers.append(nn.Dropout(dp))
relu = GeneralReLU(leak, sub, maxv)
layers.append(relu)
return nn.Sequential(*layers)
def linear_layer(ni, no, leak=None, sub=None, maxv=None):
return nn.Sequential(nn.Linear(ni, no),
GeneralReLU(leak, sub, maxv))
# params = [[3, 96, 11, 4], ...]
class CNNModel(nn.Module):
"""docstring for EyeMosel"""
def __init__(self, params=[], **kwargs):
super(CNNModel, self).__init__()
self.params = params
self.layers = [conv_layer(*param, **kwargs) for param in self.params]
self.features = nn.Sequential(*self.layers)
self.pool = AdaptiveConcatPool2d(1)
self.flatten = nn.Flatten()
def forward(self, x):
x = self.features(x)
x = self.pool(x)
x = self.flatten(x)
# x = x.view(x.size(0), -1)
return x
# eye_params=[]
class EyeModel(nn.Module):
"""docstring for EyeModel"""
def __init__(self, params=[[]]):
super(EyeModel, self).__init__()
self.params = params
self.cnn = CNNModel(self.params[:-1])
self.fc = linear_layer(*self.params[-1])
def forward(self, xl, xr):
xl = self.cnn(xl)
xr = self.cnn(xr)
x = torch.cat((xl, xr), 1)
x = self.fc(x)
return x
class FaceModel(nn.Module):
"""docstring for FaceModel"""
def __init__(self, params=[[]]):
super(FaceModel, self).__init__()
self.params = params
self.cnn = CNNModel(self.params[:-1])
self.fc = linear_layer(*params[-1])
def forward(self, x):
x = self.cnn(x)
x = self.fc(x)
return x
class FaceGridModel(nn.Module):
# Model for the face grid pathway
def __init__(self, params=[[]]):
super(FaceGridModel, self).__init__()
self.params = params
self.cnn = CNNModel(self.params[:-1])
self.fc = linear_layer(*params[-1])
def forward(self, x):
x = self.cnn(x)
x = self.fc(x)
return x
class StaticModel(nn.Module):
"""docstring for EncoderCNN"""
def __init__(self, eye_params=[[]], face_params=[[]], face_grid_params=[[]], pretrained=False):
super(StaticModel, self).__init__()
self.eye_params = eye_params
self.face_params = face_params
self.face_grid_params = face_grid_params
self.eye_model = EyeModel(self.eye_params)
self.face_model = FaceModel(self.face_params)
self.face_grid_model = FaceGridModel(self.face_grid_params)
self.fc1 = linear_layer(, 128)
self.fc2 = linear_layer(, 128)
self.out = nn.Linear(128*2, 2)
def forward(self, xf, xl, xr, xg)
# eyes
xe = self.eye_model(xl, xr)#out: 1024
#face and grid
xf = self.face_model(xf)#out: 512
xg = self.face_grid_model(xg)#out: 64
xf = torch.cat((xf, xg), 1)
xf = self.fc1(xf)
x = torch.cat((xe, xf), 1)
x = self.fc2(x)
x = self.out(x)
return x
def freeze(m):
pass
class SequenceModel(nn?Module):
"""docstring for SequenceModel"""
def __init__(self, arg):
super(SequenceModel, self).__init__()
self.encoder = StaticModel(pretrained=True)
freeze(self.encoder)
# load
def forward(self, x):
features = self.encoder(x)
| 29.590062 | 118 | 0.65932 |
79408cb104f56eb379d7c716fedaab025c28d03c | 2,409 | py | Python | python/snippets/stt_streaming_recognize_gender_identification.py | dchebakov/voicekit-examples | bb77eddafcb50d409fb8e6a1c80331d92dbc2be9 | [
"Apache-2.0"
] | 24 | 2019-11-01T22:31:32.000Z | 2021-10-08T10:30:36.000Z | python/snippets/stt_streaming_recognize_gender_identification.py | dchebakov/voicekit-examples | bb77eddafcb50d409fb8e6a1c80331d92dbc2be9 | [
"Apache-2.0"
] | 19 | 2019-10-23T21:24:33.000Z | 2021-12-08T21:06:06.000Z | python/snippets/stt_streaming_recognize_gender_identification.py | dchebakov/voicekit-examples | bb77eddafcb50d409fb8e6a1c80331d92dbc2be9 | [
"Apache-2.0"
] | 24 | 2019-12-03T09:48:53.000Z | 2021-12-10T10:50:15.000Z | #!/usr/bin/env python3
import sys
sys.path.append("..")
from tinkoff.cloud.stt.v1 import stt_pb2_grpc, stt_pb2
from auth import authorization_metadata
import grpc
import os
import wave
endpoint = os.environ.get("VOICEKIT_ENDPOINT") or "api.tinkoff.ai:443"
api_key = os.environ["VOICEKIT_API_KEY"]
secret_key = os.environ["VOICEKIT_SECRET_KEY"]
def build_first_request(sample_rate_hertz, num_channels):
request = stt_pb2.StreamingRecognizeRequest()
request.streaming_config.config.encoding = stt_pb2.AudioEncoding.LINEAR16
request.streaming_config.config.sample_rate_hertz = sample_rate_hertz
request.streaming_config.config.num_channels = num_channels
request.streaming_config.config.enable_gender_identification = True
return request
def generate_requests():
try:
with wave.open("../../audio/stierlitz.wav") as f:
yield build_first_request(f.getframerate(), f.getnchannels())
frame_samples = f.getframerate()//10 # Send 100ms at a time
for data in iter(lambda:f.readframes(frame_samples), b''):
request = stt_pb2.StreamingRecognizeRequest()
request.audio_content = data
yield request
except Exception as e:
print("Got exception in generate_requests", e)
raise
def print_streaming_recognition_responses(responses):
for response in responses:
for result in response.results:
print("Channel", result.recognition_result.channel)
print("Phrase start:", result.recognition_result.start_time.ToTimedelta())
print("Phrase end: ", result.recognition_result.end_time.ToTimedelta())
for alternative in result.recognition_result.alternatives:
print('"' + alternative.transcript + '"')
print("------------------")
print("gender_identification_results:")
print(f"\tmale_proba={result.recognition_result.gender_identification_result.male_proba:.2f}")
print(f"\tfemale_proba={result.recognition_result.gender_identification_result.female_proba:.2f}")
stub = stt_pb2_grpc.SpeechToTextStub(grpc.secure_channel(endpoint, grpc.ssl_channel_credentials()))
metadata = authorization_metadata(api_key, secret_key, "tinkoff.cloud.stt")
responses = stub.StreamingRecognize(generate_requests(), metadata=metadata)
print_streaming_recognition_responses(responses)
| 44.611111 | 110 | 0.723537 |
79408cc9e791305bf6465c4f1b14ed37d820447d | 186 | py | Python | src/api_rutas/routers/catalogo.py | PythonistaMX/py261 | 614de0c8c78f26f10d485f1f46fc2c673fc79b6f | [
"MIT"
] | null | null | null | src/api_rutas/routers/catalogo.py | PythonistaMX/py261 | 614de0c8c78f26f10d485f1f46fc2c673fc79b6f | [
"MIT"
] | null | null | null | src/api_rutas/routers/catalogo.py | PythonistaMX/py261 | 614de0c8c78f26f10d485f1f46fc2c673fc79b6f | [
"MIT"
] | null | null | null | from fastapi import APIRouter
from typing import List
from data import CARRERAS
router = APIRouter()
@router.get("/carreras")
def consulta_carreras():
return {'carreras': CARRERAS} | 20.666667 | 33 | 0.763441 |
79408d48586f955811db76842dda8219b438116f | 3,384 | py | Python | django/contrib/gis/gdal/libgdal.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 23 | 2020-09-07T02:32:09.000Z | 2022-01-29T06:49:43.000Z | virtual/lib/python3.6/site-packages/django/contrib/gis/gdal/libgdal.py | kahenya-anita/Insta-Clone | 4894e959c17170505e73aee6dc497aeb29d55a71 | [
"MIT"
] | 61 | 2021-01-10T12:59:01.000Z | 2021-06-24T09:19:20.000Z | virtual/lib/python3.6/site-packages/django/contrib/gis/gdal/libgdal.py | kahenya-anita/Insta-Clone | 4894e959c17170505e73aee6dc497aeb29d55a71 | [
"MIT"
] | 11 | 2020-07-31T08:20:43.000Z | 2020-08-21T04:08:29.000Z | import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, ImportError, ImproperlyConfigured, OSError):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal301', 'gdal300', 'gdal204', 'gdal203', 'gdal202', 'gdal201', 'gdal20']
elif os.name == 'posix':
# *NIX library names.
lib_names = [
'gdal', 'GDAL',
'gdal3.1.0', 'gdal3.0.0',
'gdal2.4.0', 'gdal2.3.0', 'gdal2.2.0', 'gdal2.1.0', 'gdal2.0.0',
]
else:
raise ImproperlyConfigured('GDAL is unsupported on OS "%s".' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise ImproperlyConfigured(
'Could not find the GDAL library (tried "%s"). Is GDAL installed? '
'If it is, try setting GDAL_LIBRARY_PATH in your settings.'
% '", "'.join(lib_names)
)
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Return the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Return only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Return the full GDAL version information."
return _version_info(b'')
def gdal_version_info():
ver = gdal_version()
m = re.match(br'^(?P<major>\d+)\.(?P<minor>\d+)(?:\.(?P<subminor>\d+))?', ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
major, minor, subminor = m.groups()
return (int(major), int(minor), subminor and int(subminor))
GDAL_VERSION = gdal_version_info()
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s', error_number, message)
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
| 27.966942 | 92 | 0.695331 |
79408e3c3fd329be9483d69a8d6e7964c98b998e | 4,986 | py | Python | allennlp/allennlp/modules/stacked_alternating_lstm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/allennlp/modules/stacked_alternating_lstm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/allennlp/modules/stacked_alternating_lstm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | """
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards.
"""
from typing import Optional, Tuple, Union, List
import torch
from torch.nn.utils.rnn import PackedSequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedAlternatingLstm(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards. This implementation is based on the
description in [Deep Semantic Role Labelling - What works and what's next]
(https://homes.cs.washington.edu/~luheng/files/acl2017_hllz.pdf).
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
(https://arxiv.org/abs/1512.05287).
use_input_projection_bias : `bool`, optional (default = True)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
# Returns
output_accumulator : PackedSequence
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
go_forward = layer_index % 2 == 0
layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
lstm_input_size = hidden_size
self.add_module("layer_{}".format(layer_index), layer)
layers.append(layer)
self.lstm_layers = layers
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[Union[torch.Tensor, PackedSequence], TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
# Returns
output_sequence : PackedSequence
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: Tuple[torch.Tensor, torch.Tensor]
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0))
)
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, "layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_hidden_state, final_cell_state = tuple(
torch.cat(state_list, 0) for state_list in zip(*final_states)
)
return output_sequence, (final_hidden_state, final_cell_state)
| 39.888 | 87 | 0.654232 |
79408e7bad606937443c0ef22182273ec2de712e | 5,024 | py | Python | project_1_2/src/bbox.py | tillaczel/Deep-Learning-in-Computer-Vision | 792e90a3ad5bafdb30e0267226c2c75b8afd01e3 | [
"MIT"
] | null | null | null | project_1_2/src/bbox.py | tillaczel/Deep-Learning-in-Computer-Vision | 792e90a3ad5bafdb30e0267226c2c75b8afd01e3 | [
"MIT"
] | null | null | null | project_1_2/src/bbox.py | tillaczel/Deep-Learning-in-Computer-Vision | 792e90a3ad5bafdb30e0267226c2c75b8afd01e3 | [
"MIT"
] | 1 | 2021-06-08T09:28:01.000Z | 2021-06-08T09:28:01.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import os
import torch
import wandb
from torch import nn
from tqdm import tqdm
from project_1_2.src.data import get_data_raw
def plt_bboxes(img, bboxes, labels, filename=None):
fig, axs = plt.subplots(1, 1, figsize=(10, 10))
axs.imshow(img)
for bbox, label in zip(bboxes, labels):
# TODO: add colors
l, t, w, h = bbox[0], bbox[2], bbox[1] - bbox[0], bbox[3] - bbox[2]
plt.text(l,t, str(label), fontsize=30)
rect = patches.Rectangle((l, t), w, h, linewidth=1, edgecolor='r', facecolor='none')
axs.add_patch(rect)
if filename is not None:
plt.savefig(filename)
def bbox_intersection_over_union(boxA, boxB):
max_l = max(boxA[0], boxB[0])
min_r = min(boxA[1], boxB[1])
max_t = max(boxA[2], boxB[2])
min_b = min(boxA[3], boxB[3])
interArea = max(0, min_r - max_l + 1) * max(0, min_b - max_t + 1)
boxAArea = (boxA[1] - boxA[0] + 1) * (boxA[3] - boxA[2] + 1)
boxBArea = (boxB[1] - boxB[0] + 1) * (boxB[3] - boxB[2] + 1)
return interArea / float(boxAArea + boxBArea - interArea)
def non_maximum_suppression(bboxes, p_classes, p_threshold=0.6, iou_threshold=0.5):
class_idx, probs = np.argmax(p_classes, axis=1), np.max(p_classes, axis=1)
idx = np.argwhere(p_threshold < probs).flatten()
bboxes, probs, class_idx = bboxes[idx], probs[idx], class_idx[idx]
final_bboxes, final_probs, final_class_idx = list(), list(), list()
while probs.shape[0] > 0:
max_idx = np.argmax(probs)
final_bboxes.append(bboxes[max_idx]), np.delete(bboxes, max_idx)
final_probs.append(probs[max_idx]), np.delete(probs, max_idx)
final_class_idx.append(class_idx[max_idx]), np.delete(class_idx, max_idx)
ious = list()
for bbox in bboxes:
ious.append(bbox_intersection_over_union(final_bboxes[-1], bbox))
idx = np.argwhere(np.array(ious) < iou_threshold).flatten()
bboxes, probs, class_idx = bboxes[idx], probs[idx], class_idx[idx]
return final_bboxes, final_probs, final_class_idx
def filter_bboxes(result, img, filename=None, p_threshold=0.1, iou_threshold=0.5):
bboxes, p_classes = map(np.array, zip(*result))
bboxes, probs, class_idx = non_maximum_suppression(bboxes, p_classes, p_threshold=p_threshold, iou_threshold=iou_threshold)
plt_bboxes(img, bboxes, class_idx, filename=filename)
return bboxes, probs, class_idx
def coord_to_boxes(coord, ratio): # TODO: this is probably wrong...
center = (coord * 32 + 112)
coord = ( # who knows where is up or left
int((center[0] - 112) / ratio),
int((center[0] + 112) / ratio),
int((center[1] - 112) / ratio),
int((center[1] + 112) / ratio),
)
return coord
def run_detection(engine):
engine.model.resnet[-1] = nn.AvgPool2d(kernel_size=(7, 7), stride=(1, 1))
n_images_to_process = 10
thresholds = [(0.1, 0.5), (0.2, 0.5), (0.1, 0.3), (0.25, 0.25)]
resize_ratios = (0.55, 0.7, 0.8) # todo: config
# dataset = get_data_raw(split='test', resize_ratios=resize_ratios)
dataset = get_data_raw(split='train', resize_ratios=resize_ratios)
for i in tqdm(range(n_images_to_process)):
im_dir = os.path.join(wandb.run.dir, f'img_{i}')
if not os.path.isdir(im_dir):
os.mkdir(im_dir)
result2 = []
resized, original_image, ratios, meta = dataset[i]
# this is ugly but should work
for img, ratio in zip(resized, ratios):
result = []
y = engine(img.unsqueeze(dim=0))
probs = torch.softmax(y, 1).detach().cpu().numpy()
for i in range(probs.shape[-1]): # x-axis
for j in range(probs.shape[-2]): # y-axis
p = probs[0, :, j, i]
coord = coord_to_boxes(np.array([i, j]), ratio)
result.append((
coord,
p[:10]
))
result2.append((
coord,
p[:10]
))
for th in thresholds:
th_dir = os.path.join(im_dir, f'thresh_{th[0]:.2f}_{th[1]:.2f}')
if not os.path.isdir(th_dir):
os.mkdir(th_dir)
p_threshold, iou_threshold = th
filename = os.path.join(th_dir, f'bbox_{ratio:.2f}.png')
filter_bboxes(result, original_image, filename=filename, p_threshold=p_threshold, iou_threshold=iou_threshold)
for th in thresholds:
th_dir = os.path.join(im_dir, f'thresh_{th[0]:.2f}_{th[1]:.2f}')
if not os.path.isdir(th_dir):
os.mkdir(th_dir)
p_threshold, iou_threshold = th
filename = os.path.join(th_dir, f'bbox_.png')
filter_bboxes(result2, original_image, filename=filename, p_threshold=p_threshold, iou_threshold=iou_threshold)
| 40.845528 | 127 | 0.600916 |
79408fd7b054da64da452849eb1ca6b7ec8962c9 | 1,141 | py | Python | Plugins/Aspose-Cells-Java-for-Jython/asposecells/WorkingWithWorksheets/ProtectingWorksheet.py | mnote/Aspose.Cells-for-Java | bf71d5a86806effd279af93fd511dbefd90106e5 | [
"MIT"
] | 90 | 2016-04-14T10:14:58.000Z | 2022-03-29T07:40:21.000Z | Plugins/Aspose-Cells-Java-for-Jython/asposecells/WorkingWithWorksheets/ProtectingWorksheet.py | mnote/Aspose.Cells-for-Java | bf71d5a86806effd279af93fd511dbefd90106e5 | [
"MIT"
] | 19 | 2018-03-23T09:50:42.000Z | 2021-11-01T09:37:38.000Z | Plugins/Aspose-Cells-Java-for-Jython/asposecells/WorkingWithWorksheets/ProtectingWorksheet.py | mnote/Aspose.Cells-for-Java | bf71d5a86806effd279af93fd511dbefd90106e5 | [
"MIT"
] | 72 | 2016-04-09T07:16:12.000Z | 2022-03-23T20:28:09.000Z | from asposecells import Settings
from com.aspose.cells import Workbook
from com.aspose.cells import SaveFormat
class ProtectingWorksheet:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithWorksheets/ProtectingWorksheet/'
#Instantiating a Excel object by excel file path
excel = Workbook(dataDir + "Book1.xls")
#Accessing the first worksheet in the Excel file
worksheets = excel.getWorksheets()
worksheet = worksheets.get(0)
protection = worksheet.getProtection()
#The following 3 methods are only for Excel 2000 and earlier formats
protection.setAllowEditingContent(False)
protection.setAllowEditingObject(False)
protection.setAllowEditingScenario(False)
#Protects the first worksheet with a password "1234"
protection.setPassword("1234")
#Saving the modified Excel file in default format
excel.save(dataDir + "output.xls")
#Print Message
print "Sheet protected successfully."
if __name__ == '__main__':
ProtectingWorksheet() | 32.6 | 82 | 0.672217 |
794090e5dbc49acfbf7cdb2b57dd503ec9221c82 | 719 | py | Python | test/test_images_audits_api.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 2 | 2022-01-27T10:36:33.000Z | 2022-03-09T14:21:12.000Z | test/test_images_audits_api.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 7 | 2022-01-13T10:44:19.000Z | 2022-02-15T23:44:44.000Z | test/test_images_audits_api.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | null | null | null | """
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import unittest
import pfruck_contabo
from pfruck_contabo.api.images_audits_api import ImagesAuditsApi # noqa: E501
class TestImagesAuditsApi(unittest.TestCase):
"""ImagesAuditsApi unit test stubs"""
def setUp(self):
self.api = ImagesAuditsApi() # noqa: E501
def tearDown(self):
pass
def test_retrieve_image_audits_list(self):
"""Test case for retrieve_image_audits_list
List history about your custom images (audit) # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 19.972222 | 78 | 0.67733 |
794092c7c6167735d303d2de57e5b852a5a23c19 | 897 | py | Python | examples/undocumented/python_modular/kernel_sparse_linear_modular.py | srgnuclear/shogun | 33c04f77a642416376521b0cd1eed29b3256ac13 | [
"Ruby",
"MIT"
] | 1 | 2015-11-05T18:31:14.000Z | 2015-11-05T18:31:14.000Z | examples/undocumented/python_modular/kernel_sparse_linear_modular.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | examples/undocumented/python_modular/kernel_sparse_linear_modular.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat,1.1],[traindat,testdat,1.2]]
def kernel_sparse_linear_modular (fm_train_real=traindat,fm_test_real=testdat,scale=1.1):
from modshogun import SparseRealFeatures
from modshogun import LinearKernel, AvgDiagKernelNormalizer
feats_train=SparseRealFeatures(fm_train_real)
feats_test=SparseRealFeatures(fm_test_real)
kernel=LinearKernel()
kernel.set_normalizer(AvgDiagKernelNormalizer(scale))
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('SparseLinear')
kernel_sparse_linear_modular(*parameter_list[0])
| 30.931034 | 89 | 0.816054 |
794096875d29da17c61cd2ad7231b29f1af38cba | 12,482 | py | Python | boto/emr/emrobject.py | scriptsrc/boto | 5506aba84638f921bc84b125f78d5e1c600d14fb | [
"MIT"
] | null | null | null | boto/emr/emrobject.py | scriptsrc/boto | 5506aba84638f921bc84b125f78d5e1c600d14fb | [
"MIT"
] | 1 | 2021-04-30T21:35:51.000Z | 2021-04-30T21:35:51.000Z | boto/emr/emrobject.py | scriptsrc/boto | 5506aba84638f921bc84b125f78d5e1c600d14fb | [
"MIT"
] | null | null | null | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2010 Jeremy Thurgood <[email protected]>
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module contains EMR response objects
"""
from boto.resultset import ResultSet
class EmrObject(object):
Fields = set()
def __init__(self, connection=None):
self.connection = connection
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in self.Fields:
setattr(self, name.lower(), value)
class RunJobFlowResponse(EmrObject):
Fields = set(['JobFlowId'])
class AddInstanceGroupsResponse(EmrObject):
Fields = set(['InstanceGroupIds', 'JobFlowId'])
class ModifyInstanceGroupsResponse(EmrObject):
Fields = set(['RequestId'])
class Arg(EmrObject):
def __init__(self, connection=None):
self.value = None
def endElement(self, name, value, connection):
self.value = value
class StepId(Arg):
pass
class SupportedProduct(Arg):
pass
class JobFlowStepList(EmrObject):
def __ini__(self, connection=None):
self.connection = connection
self.stepids = None
def startElement(self, name, attrs, connection):
if name == 'StepIds':
self.stepids = ResultSet([('member', StepId)])
return self.stepids
else:
return None
class BootstrapAction(EmrObject):
Fields = set([
'Args',
'Name',
'Path',
'ScriptPath',
])
def startElement(self, name, attrs, connection):
if name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
class KeyValue(EmrObject):
Fields = set([
'Key',
'Value',
])
class Step(EmrObject):
Fields = set([
'ActionOnFailure',
'CreationDateTime',
'EndDateTime',
'Jar',
'LastStateChangeReason',
'MainClass',
'Name',
'StartDateTime',
'State',
])
def __init__(self, connection=None):
self.connection = connection
self.args = None
def startElement(self, name, attrs, connection):
if name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
if name == 'Properties':
self.properties = ResultSet([('member', KeyValue)])
return self.properties
class InstanceGroup(EmrObject):
Fields = set([
'BidPrice',
'CreationDateTime',
'EndDateTime',
'InstanceGroupId',
'InstanceRequestCount',
'InstanceRole',
'InstanceRunningCount',
'InstanceType',
'LastStateChangeReason',
'LaunchGroup',
'Market',
'Name',
'ReadyDateTime',
'StartDateTime',
'State',
])
class JobFlow(EmrObject):
Fields = set([
'AmiVersion',
'AvailabilityZone',
'CreationDateTime',
'Ec2KeyName',
'EndDateTime',
'HadoopVersion',
'Id',
'InstanceCount',
'JobFlowId',
'KeepJobFlowAliveWhenNoSteps',
'LastStateChangeReason',
'LogUri',
'MasterInstanceId',
'MasterInstanceType',
'MasterPublicDnsName',
'Name',
'NormalizedInstanceHours',
'ReadyDateTime',
'RequestId',
'SlaveInstanceType',
'StartDateTime',
'State',
'TerminationProtected',
'Type',
'Value',
'VisibleToAllUsers',
])
def __init__(self, connection=None):
self.connection = connection
self.steps = None
self.instancegroups = None
self.bootstrapactions = None
def startElement(self, name, attrs, connection):
if name == 'Steps':
self.steps = ResultSet([('member', Step)])
return self.steps
elif name == 'InstanceGroups':
self.instancegroups = ResultSet([('member', InstanceGroup)])
return self.instancegroups
elif name == 'BootstrapActions':
self.bootstrapactions = ResultSet([('member', BootstrapAction)])
return self.bootstrapactions
elif name == 'SupportedProducts':
self.supported_products = ResultSet([('member', SupportedProduct)])
return self.supported_products
else:
return None
class ClusterTimeline(EmrObject):
Fields = set([
'CreationDateTime',
'ReadyDateTime',
'EndDateTime'
])
class ClusterStateChangeReason(EmrObject):
Fields = set([
'Code',
'Message'
])
class ClusterStatus(EmrObject):
Fields = set([
'State',
'StateChangeReason',
'Timeline'
])
def __init__(self, connection=None):
self.connection = connection
self.timeline = None
def startElement(self, name, attrs, connection):
if name == 'Timeline':
self.timeline = ClusterTimeline()
return self.timeline
elif name == 'StateChangeReason':
self.statechangereason = ClusterStateChangeReason()
return self.statechangereason
else:
return None
class Ec2InstanceAttributes(EmrObject):
Fields = set([
'Ec2KeyName',
'Ec2SubnetId',
'Ec2AvailabilityZone',
'IamInstanceProfile'
])
class Application(EmrObject):
Fields = set([
'Name',
'Version',
'Args',
'AdditionalInfo'
])
class Cluster(EmrObject):
Fields = set([
'Id',
'Name',
'LogUri',
'RequestedAmiVersion',
'RunningAmiVersion',
'AutoTerminate',
'TerminationProtected',
'VisibleToAllUsers'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
self.ec2instanceattributes = None
self.applications = None
self.tags = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
elif name == 'Ec2InstanceAttributes':
self.ec2instanceattributes = Ec2InstanceAttributes()
return self.ec2instanceattributes
elif name == 'Applications':
self.applications = ResultSet([('member', Application)])
return self.applications
elif name == 'Tags':
self.tags = ResultSet([('member', KeyValue)])
return self.tags
else:
return None
class ClusterSummary(Cluster):
Fields = set([
'Id',
'Name'
])
class ClusterSummaryList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection):
self.connection = connection
self.clusters = None
def startElement(self, name, attrs, connection):
if name == 'Clusters':
self.clusters = ResultSet([('member', ClusterSummary)])
return self.clusters
else:
return None
class StepConfig(EmrObject):
Fields = set([
'Jar',
'MainClass'
])
def __init__(self, connection=None):
self.connection = connection
self.properties = None
self.args = None
def startElement(self, name, attrs, connection):
if name == 'Properties':
self.properties = ResultSet([('member', KeyValue)])
return self.properties
elif name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
else:
return None
class HadoopStep(EmrObject):
Fields = set([
'Id',
'Name',
'ActionOnFailure'
])
def __init__(self, connection=None):
self.connection = connection
self.config = None
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Config':
self.config = StepConfig()
return self.config
elif name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceGroupInfo(EmrObject):
Fields = set([
'Id',
'Name',
'Market',
'InstanceGroupType',
'BidPrice',
'InstanceType',
'RequestedInstanceCount',
'RunningInstanceCount'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceGroupList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.instancegroups = None
def startElement(self, name, attrs, connection):
if name == 'InstanceGroups':
self.instancegroups = ResultSet([('member', InstanceGroupInfo)])
return self.instancegroups
else:
return None
class InstanceInfo(EmrObject):
Fields = set([
'Id',
'Ec2InstanceId',
'PublicDnsName',
'PublicIpAddress',
'PrivateDnsName',
'PrivateIpAddress'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.instances = None
def startElement(self, name, attrs, connection):
if name == 'Instances':
self.instances = ResultSet([('member', InstanceInfo)])
return self.instances
else:
return None
class StepSummary(EmrObject):
Fields = set([
'Id',
'Name'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
self.config = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
elif name == 'Config':
self.config = StepConfig()
return self.config
else:
return None
class StepSummaryList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.steps = None
def startElement(self, name, attrs, connection):
if name == 'Steps':
self.steps = ResultSet([('member', StepSummary)])
return self.steps
else:
return None
class BootstrapActionList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.actions = None
def startElement(self, name, attrs, connection):
if name == 'BootstrapActions':
self.actions = ResultSet([('member', BootstrapAction)])
return self.actions
else:
return None
| 25.114688 | 79 | 0.585563 |
794096e126e50af4d55931eb91660da94428a383 | 5,554 | py | Python | deep CNN features/xception_extract.py | subhankar01/Covid-Chestxray-lambda-fuzzy | dff45f39a6cacda2848319ce192d2bd88d4a0d13 | [
"MIT"
] | 5 | 2021-09-21T05:36:38.000Z | 2022-03-27T11:46:31.000Z | deep CNN features/xception_extract.py | kirti031/covidfs-aihc | 188d766569d768332ff0f0ee392bdf4d438e3da2 | [
"MIT"
] | 1 | 2022-03-28T05:35:23.000Z | 2022-03-28T05:35:23.000Z | deep CNN features/xception_extract.py | kirti031/covidfs-aihc | 188d766569d768332ff0f0ee392bdf4d438e3da2 | [
"MIT"
] | 5 | 2021-07-26T07:18:09.000Z | 2022-02-08T20:11:31.000Z | import os
from os.path import basename, join, exists
import numpy as np
import math
np.random.seed(777)
import tensorflow_addons as tfa
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
from tensorflow.keras import callbacks
from tensorflow.keras import callbacks
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from numpy import array
from numpy import argmax
from numpy import mean
from numpy import std
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
os.chdir(r"COVID_Xray/")
train_dir="aug/"
test_dir="test/"
total=0
print('---Training set details----')
for sub_folder in os.listdir(train_dir):
no_of_images=len(os.listdir(train_dir + sub_folder))
total+=no_of_images
print(str(no_of_images) + " " + sub_folder + " images")
print("Total no. of Chest Xray training images=",total)
total=0
print('---Test set details----')
for sub_folder in os.listdir(test_dir):
no_of_images=len(os.listdir(test_dir + sub_folder))
total+=no_of_images
print(str(no_of_images) + " " + sub_folder + " images")
print("Total no. of Chest Xray test images=",total)
extracted_features_dir="COVID_Xray/extracted_features/"
img_height =512
img_width = 512
batch_size =32
input_shape = (img_width, img_height, 3)
print("-----------------Image Augmentation for Xception--------------")
random_seed = np.random.seed(1142)
train_datagen = ImageDataGenerator(
rescale=1./255,
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
validation_split= 0.2,
zoom_range=0.1,
shear_range=0.2)
train_generator_xcep = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
seed = random_seed,
shuffle=False,
subset = 'training',
class_mode='categorical')
val_generator_xcep = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
seed = random_seed,
shuffle=False,
subset = 'validation',
class_mode='categorical')
test_datagen=ImageDataGenerator(rescale=1./255)
test_generator_xcep=test_datagen.flow_from_directory(test_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
seed=random_seed,
shuffle=False,
class_mode='categorical')
nb_train_samples = len(train_generator_xcep.filenames)
nb_validation_samples = len(val_generator_xcep.filenames)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
nb_test_samples = len(test_generator_xcep.filenames)
predict_size_test = int(math.ceil(nb_test_samples / batch_size))
model_name="Xception"
model = Xception(include_top=False, weights="imagenet",pooling='avg',input_shape=input_shape)
image_input =model.input
x1 = layers.GlobalAveragePooling2D()(model.get_layer("block4_sepconv1").output) #layer_27
x2 = layers.GlobalAveragePooling2D()(model.get_layer("block5_sepconv1").output) #layer 37
x3 = layers.GlobalAveragePooling2D()(model.get_layer("block14_sepconv1").output) #layer_126
out= layers.Concatenate()([x1,x2,x3])
out=layers.Dense(512,activation='relu')(out)
out=layers.Dropout(0.5)(out)
out=layers.Dense(3,activation='softmax',name= 'output')(out)
custom_xcep_model = models.Model(image_input , out)
custom_xcep_model.summary()
for layer in custom_xcep_model.layers[:115]:
layer.trainable = False
custom_xcep_model.summary()
nEpochs=100
base_lr=1e-3
opt = optimizers.Adam(lr=base_lr, beta_1=0.6, beta_2=0.8,amsgrad=True)
custom_xcep_model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy'])
checkpoint1 = callbacks.ModelCheckpoint('saved models/Xception/xception_weights.h5', monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list=[checkpoint1]
# Training the modified Xception network for refining the deep feature embedding
history =custom_xcep_model.fit(train_generator_xcep,
epochs=nEpochs,
validation_data=val_generator_xcep,
callbacks=callbacks_list)
bottleneck= tf.keras.Model(inputs=custom_xcep_model.input, outputs=custom_xcep_model.layers[130].output)
#Saving features of the training images
features_train =bottleneck.predict_generator(train_generator_xcep, predict_size_train)
np.save(extracted_features_dir+model_name+'_train_features.npy', features_train)
# Saving features of the validation images
features_validation =bottleneck.predict_generator(val_generator_xcep, predict_size_validation)
np.save(extracted_features_dir+model_name+'_val_features.npy', features_validation)
# Saving features of the test images
features_test = bottleneck.predict_generator(test_generator_xcep, predict_size_test)
np.save(extracted_features_dir+model_name+'_test_features.npy', features_test)
| 39.112676 | 153 | 0.725423 |
794097a539c0527c4d1eddbba8dada197ad99e92 | 136 | py | Python | New folder/Re5.py | piyushparastiwari/python-project | 5dba0ef4e77f1d2528f510327de4224b60b1d4ba | [
"Apache-2.0"
] | null | null | null | New folder/Re5.py | piyushparastiwari/python-project | 5dba0ef4e77f1d2528f510327de4224b60b1d4ba | [
"Apache-2.0"
] | null | null | null | New folder/Re5.py | piyushparastiwari/python-project | 5dba0ef4e77f1d2528f510327de4224b60b1d4ba | [
"Apache-2.0"
] | null | null | null | import re
st=re.findall("amit","amit and amita belongs to same family")
print(st)
print(type(st))
for val in st:
print(val)
| 17 | 62 | 0.661765 |
794099b79690a01fc125091849af22f3f674a9b8 | 12,764 | py | Python | cryptoapis/model/get_transaction_details_by_transaction_idribsl_vout.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/get_transaction_details_by_transaction_idribsl_vout.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/get_transaction_details_by_transaction_idribsl_vout.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.get_transaction_details_by_transaction_idribsl_script_pub_key import GetTransactionDetailsByTransactionIDRIBSLScriptPubKey
globals()['GetTransactionDetailsByTransactionIDRIBSLScriptPubKey'] = GetTransactionDetailsByTransactionIDRIBSLScriptPubKey
class GetTransactionDetailsByTransactionIDRIBSLVout(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'is_spent': (bool,), # noqa: E501
'script_pub_key': (GetTransactionDetailsByTransactionIDRIBSLScriptPubKey,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'is_spent': 'isSpent', # noqa: E501
'script_pub_key': 'scriptPubKey', # noqa: E501
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, is_spent, script_pub_key, value, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDRIBSLVout - a model defined in OpenAPI
Args:
is_spent (bool): Defines whether the output is spent or not.
script_pub_key (GetTransactionDetailsByTransactionIDRIBSLScriptPubKey):
value (str): Represents the sent/received amount.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.is_spent = is_spent
self.script_pub_key = script_pub_key
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, is_spent, script_pub_key, value, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDRIBSLVout - a model defined in OpenAPI
Args:
is_spent (bool): Defines whether the output is spent or not.
script_pub_key (GetTransactionDetailsByTransactionIDRIBSLScriptPubKey):
value (str): Represents the sent/received amount.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.is_spent = is_spent
self.script_pub_key = script_pub_key
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.423488 | 484 | 0.595425 |
794099f3733d9f29ae16bb1fe07e87791b9c79f9 | 83,138 | py | Python | lib/tests/test_transaction.py | keepkeyjon/electrum | 71c5e4f6bcfcfbf8944b1a1d84693a1a65b694bd | [
"MIT"
] | null | null | null | lib/tests/test_transaction.py | keepkeyjon/electrum | 71c5e4f6bcfcfbf8944b1a1d84693a1a65b694bd | [
"MIT"
] | null | null | null | lib/tests/test_transaction.py | keepkeyjon/electrum | 71c5e4f6bcfcfbf8944b1a1d84693a1a65b694bd | [
"MIT"
] | null | null | null | import unittest
from lib import transaction
from lib.bitcoin import TYPE_ADDRESS
from lib.keystore import xpubkey_to_address
from lib.util import bh2u
unsigned_blob = '01000000012a5c9a94fcde98f5581cd00162c60a13936ceb75389ea65bf38633b424eb4031000000005701ff4c53ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000ffffffff0140420f00000000001976a914230ac37834073a42146f11ef8414ae929feaafc388ac00000000'
signed_blob = '01000000012a5c9a94fcde98f5581cd00162c60a13936ceb75389ea65bf38633b424eb4031000000006c493046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d985012102e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6ffffffff0140420f00000000001976a914230ac37834073a42146f11ef8414ae929feaafc388ac00000000'
v2_blob = "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700"
signed_segwit_blob = "01000000000101b66d722484f2db63e827ebf41d02684fed0c6550e85015a6c9d41ef216a8a6f00000000000fdffffff0280c3c90100000000160014b65ce60857f7e7892b983851c2a8e3526d09e4ab64bac30400000000160014c478ebbc0ab2097706a98e10db7cf101839931c4024730440220789c7d47f876638c58d98733c30ae9821c8fa82b470285dcdf6db5994210bf9f02204163418bbc44af701212ad42d884cc613f3d3d831d2d0cc886f767cca6e0235e012103083a6dc250816d771faa60737bfe78b23ad619f6b458e0a1f1688e3a0605e79c00000000"
class TestBCDataStream(unittest.TestCase):
def test_compact_size(self):
s = transaction.BCDataStream()
values = [0, 1, 252, 253, 2**16-1, 2**16, 2**32-1, 2**32, 2**64-1]
for v in values:
s.write_compact_size(v)
with self.assertRaises(transaction.SerializationError):
s.write_compact_size(-1)
self.assertEqual(bh2u(s.input),
'0001fcfdfd00fdfffffe00000100feffffffffff0000000001000000ffffffffffffffffff')
for v in values:
self.assertEqual(s.read_compact_size(), v)
with self.assertRaises(transaction.SerializationError):
s.read_compact_size()
def test_string(self):
s = transaction.BCDataStream()
with self.assertRaises(transaction.SerializationError):
s.read_string()
msgs = ['Hello', ' ', 'World', '', '!']
for msg in msgs:
s.write_string(msg)
for msg in msgs:
self.assertEqual(s.read_string(), msg)
with self.assertRaises(transaction.SerializationError):
s.read_string()
def test_bytes(self):
s = transaction.BCDataStream()
s.write(b'foobar')
self.assertEqual(s.read_bytes(3), b'foo')
self.assertEqual(s.read_bytes(2), b'ba')
self.assertEqual(s.read_bytes(4), b'r')
self.assertEqual(s.read_bytes(1), b'')
class TestTransaction(unittest.TestCase):
def test_tx_unsigned(self):
expected = {
'inputs': [{
'type': 'p2pkh',
'address': '1446oU3z268EeFgfcwJv6X2VBXHfoYxfuD',
'num_sig': 1,
'prevout_hash': '3140eb24b43386f35ba69e3875eb6c93130ac66201d01c58f598defc949a5c2a',
'prevout_n': 0,
'pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6'],
'scriptSig': '01ff4c53ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000',
'sequence': 4294967295,
'signatures': [None],
'x_pubkeys': ['ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000']}],
'lockTime': 0,
'outputs': [{
'address': '14CHYaaByjJZpx4oHBpfDMdqhTyXnZ3kVs',
'prevout_n': 0,
'scriptPubKey': '76a914230ac37834073a42146f11ef8414ae929feaafc388ac',
'type': TYPE_ADDRESS,
'value': 1000000}],
'version': 1
}
tx = transaction.Transaction(unsigned_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': unsigned_blob, 'complete': False, 'final': True})
self.assertEqual(tx.get_outputs(), [('14CHYaaByjJZpx4oHBpfDMdqhTyXnZ3kVs', 1000000)])
self.assertEqual(tx.get_output_addresses(), ['14CHYaaByjJZpx4oHBpfDMdqhTyXnZ3kVs'])
self.assertTrue(tx.has_address('14CHYaaByjJZpx4oHBpfDMdqhTyXnZ3kVs'))
self.assertTrue(tx.has_address('1446oU3z268EeFgfcwJv6X2VBXHfoYxfuD'))
self.assertFalse(tx.has_address('1CQj15y1N7LDHp7wTt28eoD1QhHgFgxECH'))
self.assertEqual(tx.serialize(), unsigned_blob)
tx.update_signatures(signed_blob)
self.assertEqual(tx.raw, signed_blob)
tx.update(unsigned_blob)
tx.raw = None
blob = str(tx)
self.assertEqual(transaction.deserialize(blob), expected)
def test_tx_signed(self):
expected = {
'inputs': [{
'type': 'p2pkh',
'address': '1446oU3z268EeFgfcwJv6X2VBXHfoYxfuD',
'num_sig': 1,
'prevout_hash': '3140eb24b43386f35ba69e3875eb6c93130ac66201d01c58f598defc949a5c2a',
'prevout_n': 0,
'pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6'],
'scriptSig': '493046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d985012102e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6',
'sequence': 4294967295,
'signatures': ['3046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d98501'],
'x_pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6']}],
'lockTime': 0,
'outputs': [{
'address': '14CHYaaByjJZpx4oHBpfDMdqhTyXnZ3kVs',
'prevout_n': 0,
'scriptPubKey': '76a914230ac37834073a42146f11ef8414ae929feaafc388ac',
'type': TYPE_ADDRESS,
'value': 1000000}],
'version': 1
}
tx = transaction.Transaction(signed_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': signed_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), signed_blob)
tx.update_signatures(signed_blob)
self.assertEqual(tx.estimated_total_size(), 193)
self.assertEqual(tx.estimated_base_size(), 193)
self.assertEqual(tx.estimated_witness_size(), 0)
self.assertEqual(tx.estimated_weight(), 772)
self.assertEqual(tx.estimated_size(), 193)
def test_estimated_output_size(self):
estimated_output_size = transaction.Transaction.estimated_output_size
self.assertEqual(estimated_output_size('14gcRovpkCoGkCNBivQBvw7eso7eiNAbxG'), 34)
self.assertEqual(estimated_output_size('35ZqQJcBQMZ1rsv8aSuJ2wkC7ohUCQMJbT'), 32)
self.assertEqual(estimated_output_size('bc1q3g5tmkmlvxryhh843v4dz026avatc0zzr6h3af'), 31)
self.assertEqual(estimated_output_size('bc1qnvks7gfdu72de8qv6q6rhkkzu70fqz4wpjzuxjf6aydsx7wxfwcqnlxuv3'), 43)
# TODO other tests for segwit tx
def test_tx_signed_segwit(self):
tx = transaction.Transaction(signed_segwit_blob)
self.assertEqual(tx.estimated_total_size(), 222)
self.assertEqual(tx.estimated_base_size(), 113)
self.assertEqual(tx.estimated_witness_size(), 109)
self.assertEqual(tx.estimated_weight(), 561)
self.assertEqual(tx.estimated_size(), 141)
def test_errors(self):
with self.assertRaises(TypeError):
transaction.Transaction.pay_script(output_type=None, addr='')
with self.assertRaises(BaseException):
xpubkey_to_address('')
def test_parse_xpub(self):
res = xpubkey_to_address('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200')
self.assertEqual(res, ('04ee98d63800824486a1cf5b4376f2f574d86e0a3009a6448105703453f3368e8e1d8d090aaecdd626a45cc49876709a3bbb6dc96a4311b3cac03e225df5f63dfc', '19h943e4diLc68GXW7G75QNe2KWuMu7BaJ'))
def test_version_field(self):
tx = transaction.Transaction(v2_blob)
self.assertEqual(tx.txid(), "b97f9180173ab141b61b9f944d841e60feec691d6daab4d4d932b24dd36606fe")
#####
def _run_naive_tests_on_tx(self, raw_tx, txid):
tx = transaction.Transaction(raw_tx)
self.assertEqual(txid, tx.txid())
self.assertEqual(raw_tx, tx.serialize())
self.assertTrue(tx.estimated_size() >= 0)
def test_txid_coinbase_to_p2pk(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4103400d0302ef02062f503253482f522cfabe6d6dd90d39663d10f8fd25ec88338295d4c6ce1c90d4aeb368d8bdbadcc1da3b635801000000000000000474073e03ffffffff013c25cf2d01000000434104b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e6537a576782eba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c1e0908ef7bac00000000'
txid = 'dbaf14e1c476e76ea05a8b71921a46d6b06f0a950f17c5f9f1a03b8fae467f10'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_coinbase_to_p2pkh(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff25033ca0030400001256124d696e656420627920425443204775696c640800000d41000007daffffffff01c00d1298000000001976a91427a1f12771de5cc3b73941664b2537c15316be4388ac00000000'
txid = '4328f9311c6defd9ae1bd7f4516b62acf64b361eb39dfcf09d9925c5fd5c61e8'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_segwit_coinbase_to_p2pk(self):
raw_tx = '020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0502cd010101ffffffff0240be402500000000232103f4e686cdfc96f375e7c338c40c9b85f4011bb843a3e62e46a1de424ef87e9385ac0000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000000000000000000000000000000000000000000000000000000'
txid = 'fb5a57c24e640a6d8d831eb6e41505f3d54363c507da3733b098d820e3803301'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_segwit_coinbase_to_p2pkh(self):
raw_tx = '020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0502c3010101ffffffff0240be4025000000001976a9141ea896d897483e0eb33dd6423f4a07970d0a0a2788ac0000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000000000000000000000000000000000000000000000000000000'
txid = 'ed3d100577477d799107eba97e76770b3efa253c7200e9abfb43da5d2b33513e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pk_to_p2pkh(self):
raw_tx = '010000000118231a31d2df84f884ced6af11dc24306319577d4d7c340124a7e2dd9c314077000000004847304402200b6c45891aed48937241907bc3e3868ee4c792819821fcde33311e5a3da4789a02205021b59692b652a01f5f009bd481acac2f647a7d9c076d71d85869763337882e01fdffffff016c95052a010000001976a9149c4891e7791da9e622532c97f43863768264faaf88ac00000000'
txid = '90ba90a5b115106d26663fce6c6215b8699c5d4b2672dd30756115f3337dddf9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pk_to_p2sh(self):
raw_tx = '0100000001e4643183d6497823576d17ac2439fb97eba24be8137f312e10fcc16483bb2d070000000048473044022032bbf0394dfe3b004075e3cbb3ea7071b9184547e27f8f73f967c4b3f6a21fa4022073edd5ae8b7b638f25872a7a308bb53a848baa9b9cc70af45fcf3c683d36a55301fdffffff011821814a0000000017a9143c640bc28a346749c09615b50211cb051faff00f8700000000'
txid = '172bdf5a690b874385b98d7ab6f6af807356f03a26033c6a65ab79b4ac2085b5'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pk_to_p2wpkh(self):
raw_tx = '01000000015e5e2bf15f5793fdfd01e0ccd380033797ed2d4dba9498426ca84904176c26610000000049483045022100c77aff69f7ab4bb148f9bccffc5a87ee893c4f7f7f96c97ba98d2887a0f632b9022046367bdb683d58fa5b2e43cfc8a9c6d57724a27e03583942d8e7b9afbfeea5ab01fdffffff017289824a00000000160014460fc70f208bffa9abf3ae4abbd2f629d9cdcf5900000000'
txid = 'ca554b1014952f900aa8cf6e7ab02137a6fdcf933ad6a218de3891a2ef0c350d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pkh_to_p2pkh(self):
raw_tx = '0100000001f9dd7d33f315617530dd72264b5d9c69b815626cce3f66266d1015b1a590ba90000000006a4730440220699bfee3d280a499daf4af5593e8750b54fef0557f3c9f717bfa909493a84f60022057718eec7985b7796bb8630bf6ea2e9bf2892ac21bd6ab8f741a008537139ffe012103b4289890b40590447b57f773b5843bf0400e9cead08be225fac587b3c2a8e973fdffffff01ec24052a010000001976a914ce9ff3d15ed5f3a3d94b583b12796d063879b11588ac00000000'
txid = '24737c68f53d4b519939119ed83b2a8d44d716d7f3ca98bcecc0fbb92c2085ce'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pkh_to_p2sh(self):
raw_tx = '010000000195232c30f6611b9f2f82ec63f5b443b132219c425e1824584411f3d16a7a54bc000000006b4830450221009f39ac457dc8ff316e5cc03161c9eff6212d8694ccb88d801dbb32e85d8ed100022074230bb05e99b85a6a50d2b71e7bf04d80be3f1d014ea038f93943abd79421d101210317be0f7e5478e087453b9b5111bdad586038720f16ac9658fd16217ffd7e5785fdffffff0200e40b540200000017a914d81df3751b9e7dca920678cc19cac8d7ec9010b08718dfd63c2c0000001976a914303c42b63569ff5b390a2016ff44651cd84c7c8988acc7010000'
txid = '155e4740fa59f374abb4e133b87247dccc3afc233cb97c2bf2b46bba3094aedc'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pkh_to_p2wpkh(self):
raw_tx = '0100000001ce85202cb9fbc0ecbc98caf3d716d7448d2a3bd89e113999514b3df5687c7324000000006b483045022100adab7b6cb1179079c9dfc0021f4db0346730b7c16555fcc4363059dcdd95f653022028bcb816f4fb98615fb8f4b18af3ad3708e2d72f94a6466cc2736055860422cf012102a16a25148dd692462a691796db0a4a5531bcca970a04107bf184a2c9f7fd8b12fdffffff012eb6042a010000001600147d0170de18eecbe84648979d52b666dddee0b47400000000'
txid = 'ed29e100499e2a3a64a2b0cb3a68655b9acd690d29690fa541be530462bf3d3c'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2sh_to_p2pkh(self):
raw_tx = '01000000000101f9823f87af35d158e7dc81a67011f4e511e3f6cab07ac108e524b0ff8b950b39000000002322002041f0237866eb72e4a75cd6faf5ccd738703193907d883aa7b3a8169c636706a9fdffffff020065cd1d000000001976a9148150cd6cf729e7e262699875fec1f760b0aab3cc88acc46f9a3b0000000017a91433ccd0f95a7b9d8eef68be40bb59c64d6e14d87287040047304402205ca97126a5956c2deaa956a2006d79a348775d727074a04b71d9c18eb5e5525402207b9353497af15881100a2786adab56c8930c02d46cc1a8b55496c06e22d3459b01483045022100b4fa898057927c2d920ae79bca752dda58202ea8617d3e6ed96cbd5d1c0eb2fc02200824c0e742d1b4d643cec439444f5d8779c18d4f42c2c87cce24044a3babf2df0147522102db78786b3c214826bd27010e3c663b02d67144499611ee3f2461c633eb8f1247210377082028c124098b59a5a1e0ea7fd3ebca72d59c793aecfeedd004304bac15cd52aec9010000'
txid = '17e1d498ba82503e3bfa81ac4897a57e33f3d36b41bcf4765ba604466c478986'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2sh_to_p2sh(self):
raw_tx = '01000000000101b58520acb479ab656a3c03263af0567380aff6b67a8db98543870b695adf2b170000000017160014cfd2b9f7ed9d4d4429ed6946dbb3315f75e85f14fdffffff020065cd1d0000000017a91485f5681bec38f9f07ae9790d7f27c2bb90b5b63c87106ab32c0000000017a914ff402e164dfce874435641ae9ac41fc6fb14c4e18702483045022100b3d1c89c7c92151ed1df78815924569446782776b6a2c170ca5d74c5dd1ad9b102201d7bab1974fd2aa66546dd15c1f1e276d787453cec31b55a2bd97b050abf20140121024a1742ece86df3dbce4717c228cf51e625030cef7f5e6dde33a4fffdd17569eac7010000'
txid = 'ead0e7abfb24ddbcd6b89d704d7a6091e43804a458baa930adf6f1cb5b6b42f7'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2sh_to_p2wpkh(self):
raw_tx = '010000000001018689476c4604a65b76f4bc416bd3f3337ea59748ac81fa3b3e5082ba98d4e1170100000023220020ae40340707f9726c0f453c3d47c96e7f3b7b4b85608eb3668b69bbef9c7ab374fdffffff0218b2cc1d0000000017a914f2fdd81e606ff2ab804d7bb46bf8838a711c277b870065cd1d0000000016001496ad8959c1f0382984ecc4da61c118b4c8751e5104004730440220387b9e7d402fbcada9ba55a27a8d0563eafa9904ebd2f8f7e3d86e4b45bc0ec202205f37fa0e2bf8cbd384f804562651d7c6f69adce5db4c1a5b9103250a47f73e6b01473044022074903f4dd4fd6b32289be909eb5109924740daa55e79be6dbd728687683f9afa02205d934d981ca12cbec450611ca81dc4127f8da5e07dd63d41049380502de3f15401475221025c3810b37147105106cef970f9b91d3735819dee4882d515c1187dbd0b8f0c792103e007c492323084f1c103beff255836408af89bb9ae7f2fcf60502c28ff4b0c9152aeca010000'
txid = '6f294c84cbd0241650931b4c1be3dfb2f175d682c7a9538b30b173e1083deed3'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2wpkh_to_p2pkh(self):
raw_tx = '0100000000010197e6bf4a70bc118e3a8d9842ed80422e335679dfc29b5ba0f9123f6a5863b8470000000000fdffffff02402bca7f130000001600146f579c953d9e7e7719f2baa20bde22eb5f24119200e87648170000001976a9140cd8fa5fd81c3acf33f93efd179b388de8dd693388ac0247304402204ff33b3ea8fb270f62409bfc257457ca5eb1fec5e4d3a7c11aa487207e131d4d022032726b998e338e5245746716e5cd0b40d32b69d1535c3d841f049d98a5d819b1012102dc3ce3220363aff579eb2c45c973e8b186a829c987c3caea77c61975666e7d1bc8010000'
txid = 'c721ed35767a3a209b688e68e3bb136a72d2b631fe81c56be8bdbb948c343dbc'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2wpkh_to_p2sh(self):
raw_tx = '010000000001013c3dbf620453be41a50f69290d69cd9a5b65683acbb0a2643a2a9e4900e129ed0000000000fdffffff02002f68590000000017a914c7c4dcd0ddf70f15c6df13b4a4d56e9f13c49b2787a0429cd000000000160014e514e3ecf89731e7853e4f3a20983484c569d3910247304402205368cc548209303db5a8f2ebc282bd0f7af0d080ce0f7637758587f94d3971fb0220098cec5752554758bc5fa4de332b980d5e0054a807541581dc5e4de3ed29647501210233717cd73d95acfdf6bd72c4fb5df27cd6bd69ce947daa3f4a442183a97877efc8010000'
txid = '390b958bffb024e508c17ab0caf6e311e5f41170a681dce758d135af873f82f9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2wpkh_to_p2wpkh(self):
raw_tx = '010000000001010d350cefa29138de18a2d63a93cffda63721b07a6ecfa80a902f9514104b55ca0000000000fdffffff012a4a824a00000000160014b869999d342a5d42d6dc7af1efc28456da40297a024730440220475bb55814a52ea1036919e4408218c693b8bf93637b9f54c821b5baa3b846e102207276ed7a79493142c11fb01808a4142bbdd525ae7bdccdf8ecb7b8e3c856b4d90121024cdeaca7a53a7e23a1edbe9260794eaa83063534b5f111ee3c67d8b0cb88f0eec8010000'
txid = '51087ece75c697cc872d2e643d646b0f3e1f2666fa1820b7bff4343d50dd680e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_input_p2wsh_p2sh_not_multisig(self):
raw_tx = '0100000000010160f84fdcda039c3ca1b20038adea2d49a53db92f7c467e8def13734232bb610804000000232200202814720f16329ab81cb8867c4d447bd13255931f23e6655944c9ada1797fcf88ffffffff0ba3dcfc04000000001976a91488124a57c548c9e7b1dd687455af803bd5765dea88acc9f44900000000001976a914da55045a0ccd40a56ce861946d13eb861eb5f2d788ac49825e000000000017a914ca34d4b190e36479aa6e0023cfe0a8537c6aa8dd87680c0d00000000001976a914651102524c424b2e7c44787c4f21e4c54dffafc088acf02fa9000000000017a914ee6c596e6f7066466d778d4f9ba633a564a6e95d874d250900000000001976a9146ca7976b48c04fd23867748382ee8401b1d27c2988acf5119600000000001976a914cf47d5dcdba02fd547c600697097252d38c3214a88ace08a12000000000017a914017bef79d92d5ec08c051786bad317e5dd3befcf87e3d76201000000001976a9148ec1b88b66d142bcbdb42797a0fd402c23e0eec288ac718f6900000000001976a914e66344472a224ce6f843f2989accf435ae6a808988ac65e51300000000001976a914cad6717c13a2079066f876933834210ebbe68c3f88ac0347304402201a4907c4706104320313e182ecbb1b265b2d023a79586671386de86bb47461590220472c3db9fc99a728ebb9b555a72e3481d20b181bd059a9c1acadfb853d90c96c01210338a46f2a54112fef8803c8478bc17e5f8fc6a5ec276903a946c1fafb2e3a8b181976a914eda8660085bf607b82bd18560ca8f3a9ec49178588ac00000000'
txid = 'e9933221a150f78f9f224899f8568ff6422ffcc28ca3d53d87936368ff7c4b1d'
self._run_naive_tests_on_tx(raw_tx, txid)
# input: p2sh, not multisig
def test_txid_regression_issue_3899(self):
raw_tx = '0100000004328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c010000000b0009630330472d5fae685bffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c020000000b0009630359646d5fae6858ffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c030000000b000963034bd4715fae6854ffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c040000000b000963036de8705fae6860ffffffff0130750000000000001976a914b5abca61d20f9062fb1fdbb880d9d93bac36675188ac00000000'
txid = 'f570d5d1e965ee61bcc7005f8fefb1d3abbed9d7ddbe035e2a68fa07e5fc4a0d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_negative_version_num(self):
raw_tx = 'f0b47b9a01ecf5e5c3bbf2cf1f71ecdc7f708b0b222432e914b394e24aad1494a42990ddfc000000008b483045022100852744642305a99ad74354e9495bf43a1f96ded470c256cd32e129290f1fa191022030c11d294af6a61b3da6ed2c0c296251d21d113cfd71ec11126517034b0dcb70014104a0fe6e4a600f859a0932f701d3af8e0ecd4be886d91045f06a5a6b931b95873aea1df61da281ba29cadb560dad4fc047cf47b4f7f2570da4c0b810b3dfa7e500ffffffff0240420f00000000001976a9147eeacb8a9265cd68c92806611f704fc55a21e1f588ac05f00d00000000001976a914eb3bd8ccd3ba6f1570f844b59ba3e0a667024a6a88acff7f0000'
txid = 'c659729a7fea5071361c2c1a68551ca2bf77679b27086cc415adeeb03852e369'
self._run_naive_tests_on_tx(raw_tx, txid)
# these transactions are from Bitcoin Core unit tests --->
# https://github.com/bitcoin/bitcoin/blob/11376b5583a283772c82f6d32d0007cdbf5b8ef0/src/test/data/tx_valid.json
def test_txid_bitcoin_core_0001(self):
raw_tx = '0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000490047304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000'
txid = '23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0002(self):
raw_tx = '0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a0048304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2bab01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000'
txid = 'fcabc409d8e685da28536e1e5ccc91264d755cd4c57ed4cae3dbaa4d3b93e8ed'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0003(self):
raw_tx = '0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a01ff47304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000'
txid = 'c9aa95f2c48175fdb70b34c23f1c3fc44f869b073a6f79b1343fbce30c3cb575'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0004(self):
raw_tx = '0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000495147304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000'
txid = 'da94fda32b55deb40c3ed92e135d69df7efc4ee6665e0beb07ef500f407c9fd2'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0005(self):
raw_tx = '0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000494f47304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000'
txid = 'f76f897b206e4f78d60fe40f2ccb542184cfadc34354d3bb9bdc30cc2f432b86'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0006(self):
raw_tx = '01000000010276b76b07f4935c70acf54fbf1f438a4c397a9fb7e633873c4dd3bc062b6b40000000008c493046022100d23459d03ed7e9511a47d13292d3430a04627de6235b6e51a40f9cd386f2abe3022100e7d25b080f0bb8d8d5f878bba7d54ad2fda650ea8d158a33ee3cbd11768191fd004104b0e2c879e4daf7b9ab68350228c159766676a14f5815084ba166432aab46198d4cca98fa3e9981d0a90b2effc514b76279476550ba3663fdcaff94c38420e9d5000000000100093d00000000001976a9149a7b0f3b80c6baaeedce0a0842553800f832ba1f88ac00000000'
txid = 'c99c49da4c38af669dea436d3e73780dfdb6c1ecf9958baa52960e8baee30e73'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0007(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000006a473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000'
txid = 'e41ffe19dff3cbedb413a2ca3fbbcd05cb7fd7397ffa65052f8928aa9c700092'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0008(self):
raw_tx = '01000000023d6cf972d4dff9c519eff407ea800361dd0a121de1da8b6f4138a2f25de864b4000000008a4730440220ffda47bfc776bcd269da4832626ac332adfca6dd835e8ecd83cd1ebe7d709b0e022049cffa1cdc102a0b56e0e04913606c70af702a1149dc3b305ab9439288fee090014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff21ebc9ba20594737864352e95b727f1a565756f9d365083eb1a8596ec98c97b7010000008a4730440220503ff10e9f1e0de731407a4a245531c9ff17676eda461f8ceeb8c06049fa2c810220c008ac34694510298fa60b3f000df01caa244f165b727d4896eb84f81e46bcc4014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff01f0da5200000000001976a914857ccd42dded6df32949d4646dfa10a92458cfaa88ac00000000'
txid = 'f7fdd091fa6d8f5e7a8c2458f5c38faffff2d3f1406b6e4fe2c99dcc0d2d1cbb'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0009(self):
raw_tx = '01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000'
txid = 'b56471690c3ff4f7946174e51df68b47455a0d29344c351377d712e6d00eabe5'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0010(self):
raw_tx = '010000000100010000000000000000000000000000000000000000000000000000000000000000000009085768617420697320ffffffff010000000000000000015100000000'
txid = '99517e5b47533453cc7daa332180f578be68b80370ecfe84dbfff7f19d791da4'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0011(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100c66c9cdf4c43609586d15424c54707156e316d88b0a1534c9e6b0d4f311406310221009c0fe51dbc9c4ab7cc25d3fdbeccf6679fe6827f08edf2b4a9f16ee3eb0e438a0123210338e8034509af564c62644c07691942e0c056752008a173c89f60ab2a88ac2ebfacffffffff010000000000000000015100000000'
txid = 'ab097537b528871b9b64cb79a769ae13c3c3cd477cc9dddeebe657eabd7fdcea'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0012(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100e1eadba00d9296c743cb6ecc703fd9ddc9b3cd12906176a226ae4c18d6b00796022100a71aef7d2874deff681ba6080f1b278bac7bb99c61b08a85f4311970ffe7f63f012321030c0588dc44d92bdcbf8e72093466766fdc265ead8db64517b0c542275b70fffbacffffffff010040075af0750700015100000000'
txid = '4d163e00f1966e9a1eab8f9374c3e37f4deb4857c247270e25f7d79a999d2dc9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0013(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022027deccc14aa6668e78a8c9da3484fbcd4f9dcc9bb7d1b85146314b21b9ae4d86022100d0b43dece8cfb07348de0ca8bc5b86276fa88f7f2138381128b7c36ab2e42264012321029bb13463ddd5d2cc05da6e84e37536cb9525703cfd8f43afdb414988987a92f6acffffffff020040075af075070001510000000000000000015100000000'
txid = '9fe2ef9dde70e15d78894a4800b7df3bbfb1addb9a6f7d7c204492fdb6ee6cc4'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0014(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025151ffffffff010000000000000000015100000000'
txid = '99d3825137602e577aeaf6a2e3c9620fd0e605323dc5265da4a570593be791d4'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0015(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff6451515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151ffffffff010000000000000000015100000000'
txid = 'c0d67409923040cc766bbea12e4c9154393abef706db065ac2e07d91a9ba4f84'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0016(self):
raw_tx = '010000000200010000000000000000000000000000000000000000000000000000000000000000000049483045022100d180fd2eb9140aeb4210c9204d3f358766eb53842b2a9473db687fa24b12a3cc022079781799cd4f038b85135bbe49ec2b57f306b2bb17101b17f71f000fcab2b6fb01ffffffff0002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000'
txid = 'c610d85d3d5fdf5046be7f123db8a0890cee846ee58de8a44667cfd1ab6b8666'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0017(self):
raw_tx = '01000000020001000000000000000000000000000000000000000000000000000000000000000000004948304502203a0f5f0e1f2bdbcd04db3061d18f3af70e07f4f467cbc1b8116f267025f5360b022100c792b6e215afc5afc721a351ec413e714305cb749aae3d7fee76621313418df101010000000002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000'
txid = 'a647a7b3328d2c698bfa1ee2dd4e5e05a6cea972e764ccb9bd29ea43817ca64f'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0018(self):
raw_tx = '010000000370ac0a1ae588aaf284c308d67ca92c69a39e2db81337e563bf40c59da0a5cf63000000006a4730440220360d20baff382059040ba9be98947fd678fb08aab2bb0c172efa996fd8ece9b702201b4fb0de67f015c90e7ac8a193aeab486a1f587e0f54d0fb9552ef7f5ce6caec032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff7d815b6447e35fbea097e00e028fb7dfbad4f3f0987b4734676c84f3fcd0e804010000006b483045022100c714310be1e3a9ff1c5f7cacc65c2d8e781fc3a88ceb063c6153bf950650802102200b2d0979c76e12bb480da635f192cc8dc6f905380dd4ac1ff35a4f68f462fffd032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff3f1f097333e4d46d51f5e77b53264db8f7f5d2e18217e1099957d0f5af7713ee010000006c493046022100b663499ef73273a3788dea342717c2640ac43c5a1cf862c9e09b206fcb3f6bb8022100b09972e75972d9148f2bdd462e5cb69b57c1214b88fc55ca638676c07cfc10d8032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff0380841e00000000001976a914bfb282c70c4191f45b5a6665cad1682f2c9cfdfb88ac80841e00000000001976a9149857cc07bed33a5cf12b9c5e0500b675d500c81188ace0fd1c00000000001976a91443c52850606c872403c0601e69fa34b26f62db4a88ac00000000'
txid = 'afd9c17f8913577ec3509520bd6e5d63e9c0fd2a5f70c787993b097ba6ca9fae'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0019(self):
raw_tx = '01000000012312503f2491a2a97fcd775f11e108a540a5528b5d4dee7a3c68ae4add01dab300000000fdfe0000483045022100f6649b0eddfdfd4ad55426663385090d51ee86c3481bdc6b0c18ea6c0ece2c0b0220561c315b07cffa6f7dd9df96dbae9200c2dee09bf93cc35ca05e6cdf613340aa0148304502207aacee820e08b0b174e248abd8d7a34ed63b5da3abedb99934df9fddd65c05c4022100dfe87896ab5ee3df476c2655f9fbe5bd089dccbef3e4ea05b5d121169fe7f5f4014c695221031d11db38972b712a9fe1fc023577c7ae3ddb4a3004187d41c45121eecfdbb5b7210207ec36911b6ad2382860d32989c7b8728e9489d7bbc94a6b5509ef0029be128821024ea9fac06f666a4adc3fc1357b7bec1fd0bdece2b9d08579226a8ebde53058e453aeffffffff0180380100000000001976a914c9b99cddf847d10685a4fabaa0baf505f7c3dfab88ac00000000'
txid = 'f4b05f978689c89000f729cae187dcfbe64c9819af67a4f05c0b4d59e717d64d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0020(self):
raw_tx = '0100000001f709fa82596e4f908ee331cb5e0ed46ab331d7dcfaf697fe95891e73dac4ebcb000000008c20ca42095840735e89283fec298e62ac2ddea9b5f34a8cbb7097ad965b87568100201b1b01dc829177da4a14551d2fc96a9db00c6501edfa12f22cd9cefd335c227f483045022100a9df60536df5733dd0de6bc921fab0b3eee6426501b43a228afa2c90072eb5ca02201c78b74266fac7d1db5deff080d8a403743203f109fbcabf6d5a760bf87386d20100ffffffff01c075790000000000232103611f9a45c18f28f06f19076ad571c344c82ce8fcfe34464cf8085217a2d294a6ac00000000'
txid = 'cc60b1f899ec0a69b7c3f25ddf32c4524096a9c5b01cbd84c6d0312a0c478984'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0021(self):
raw_tx = '01000000012c651178faca83be0b81c8c1375c4b0ad38d53c8fe1b1c4255f5e795c25792220000000049483045022100d6044562284ac76c985018fc4a90127847708c9edb280996c507b28babdc4b2a02203d74eca3f1a4d1eea7ff77b528fde6d5dc324ec2dbfdb964ba885f643b9704cd01ffffffff010100000000000000232102c2410f8891ae918cab4ffc4bb4a3b0881be67c7a1e7faa8b5acf9ab8932ec30cac00000000'
txid = '1edc7f214659d52c731e2016d258701911bd62a0422f72f6c87a1bc8dd3f8667'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0022(self):
raw_tx = '0100000001f725ea148d92096a79b1709611e06e94c63c4ef61cbae2d9b906388efd3ca99c000000000100ffffffff0101000000000000002321028a1d66975dbdf97897e3a4aef450ebeb5b5293e4a0b4a6d3a2daaa0b2b110e02ac00000000'
txid = '018adb7133fde63add9149a2161802a1bcf4bdf12c39334e880c073480eda2ff'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0023(self):
raw_tx = '0100000001be599efaa4148474053c2fa031c7262398913f1dc1d9ec201fd44078ed004e44000000004900473044022022b29706cb2ed9ef0cb3c97b72677ca2dfd7b4160f7b4beb3ba806aa856c401502202d1e52582412eba2ed474f1f437a427640306fd3838725fab173ade7fe4eae4a01ffffffff010100000000000000232103ac4bba7e7ca3e873eea49e08132ad30c7f03640b6539e9b59903cf14fd016bbbac00000000'
txid = '1464caf48c708a6cc19a296944ded9bb7f719c9858986d2501cf35068b9ce5a2'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0024(self):
raw_tx = '010000000112b66d5e8c7d224059e946749508efea9d66bf8d0c83630f080cf30be8bb6ae100000000490047304402206ffe3f14caf38ad5c1544428e99da76ffa5455675ec8d9780fac215ca17953520220779502985e194d84baa36b9bd40a0dbd981163fa191eb884ae83fc5bd1c86b1101ffffffff010100000000000000232103905380c7013e36e6e19d305311c1b81fce6581f5ee1c86ef0627c68c9362fc9fac00000000'
txid = '1fb73fbfc947d52f5d80ba23b67c06a232ad83fdd49d1c0a657602f03fbe8f7a'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0025(self):
raw_tx = '0100000001b0ef70cc644e0d37407e387e73bfad598d852a5aa6d691d72b2913cebff4bceb000000004a00473044022068cd4851fc7f9a892ab910df7a24e616f293bcb5c5fbdfbc304a194b26b60fba022078e6da13d8cb881a22939b952c24f88b97afd06b4c47a47d7f804c9a352a6d6d0100ffffffff0101000000000000002321033bcaa0a602f0d44cc9d5637c6e515b0471db514c020883830b7cefd73af04194ac00000000'
txid = '24cecfce0fa880b09c9b4a66c5134499d1b09c01cc5728cd182638bea070e6ab'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0026(self):
raw_tx = '0100000001c188aa82f268fcf08ba18950f263654a3ea6931dabc8bf3ed1d4d42aaed74cba000000004b0000483045022100940378576e069aca261a6b26fb38344e4497ca6751bb10905c76bb689f4222b002204833806b014c26fd801727b792b1260003c55710f87c5adbd7a9cb57446dbc9801ffffffff0101000000000000002321037c615d761e71d38903609bf4f46847266edc2fb37532047d747ba47eaae5ffe1ac00000000'
txid = '9eaa819e386d6a54256c9283da50c230f3d8cd5376d75c4dcc945afdeb157dd7'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0027(self):
raw_tx = '01000000012432b60dc72cebc1a27ce0969c0989c895bdd9e62e8234839117f8fc32d17fbc000000004a493046022100a576b52051962c25e642c0fd3d77ee6c92487048e5d90818bcf5b51abaccd7900221008204f8fb121be4ec3b24483b1f92d89b1b0548513a134e345c5442e86e8617a501ffffffff010000000000000000016a00000000'
txid = '46224764c7870f95b58f155bce1e38d4da8e99d42dbb632d0dd7c07e092ee5aa'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0028(self):
raw_tx = '01000000014710b0e7cf9f8930de259bdc4b84aa5dfb9437b665a3e3a21ff26e0bf994e183000000004a493046022100a166121a61b4eeb19d8f922b978ff6ab58ead8a5a5552bf9be73dc9c156873ea02210092ad9bc43ee647da4f6652c320800debcf08ec20a094a0aaf085f63ecb37a17201ffffffff010000000000000000016a00000000'
txid = '8d66836045db9f2d7b3a75212c5e6325f70603ee27c8333a3bce5bf670d9582e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0029(self):
raw_tx = '01000000015ebaa001d8e4ec7a88703a3bcf69d98c874bca6299cca0f191512bf2a7826832000000004948304502203bf754d1c6732fbf87c5dcd81258aefd30f2060d7bd8ac4a5696f7927091dad1022100f5bcb726c4cf5ed0ed34cc13dadeedf628ae1045b7cb34421bc60b89f4cecae701ffffffff010000000000000000016a00000000'
txid = 'aab7ef280abbb9cc6fbaf524d2645c3daf4fcca2b3f53370e618d9cedf65f1f8'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0030(self):
raw_tx = '010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a900000000924830450221009c0a27f886a1d8cb87f6f595fbc3163d28f7a81ec3c4b252ee7f3ac77fd13ffa02203caa8dfa09713c8c4d7ef575c75ed97812072405d932bd11e6a1593a98b679370148304502201e3861ef39a526406bad1e20ecad06be7375ad40ddb582c9be42d26c3a0d7b240221009d0a3985e96522e59635d19cc4448547477396ce0ef17a58e7d74c3ef464292301ffffffff010000000000000000016a00000000'
txid = '6327783a064d4e350c454ad5cd90201aedf65b1fc524e73709c52f0163739190'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0031(self):
raw_tx = '010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a48304502207a6974a77c591fa13dff60cabbb85a0de9e025c09c65a4b2285e47ce8e22f761022100f0efaac9ff8ac36b10721e0aae1fb975c90500b50c56e8a0cc52b0403f0425dd0100ffffffff010000000000000000016a00000000'
txid = '892464645599cc3c2d165adcc612e5f982a200dfaa3e11e9ce1d228027f46880'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0032(self):
raw_tx = '010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a483045022100fa4a74ba9fd59c59f46c3960cf90cbe0d2b743c471d24a3d5d6db6002af5eebb02204d70ec490fd0f7055a7c45f86514336e3a7f03503dacecabb247fc23f15c83510151ffffffff010000000000000000016a00000000'
txid = '578db8c6c404fec22c4a8afeaf32df0e7b767c4dda3478e0471575846419e8fc'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0033(self):
raw_tx = '0100000001e0be9e32f1f89c3d916c4f21e55cdcd096741b895cc76ac353e6023a05f4f7cc00000000d86149304602210086e5f736a2c3622ebb62bd9d93d8e5d76508b98be922b97160edc3dcca6d8c47022100b23c312ac232a4473f19d2aeb95ab7bdf2b65518911a0d72d50e38b5dd31dc820121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac4730440220508fa761865c8abd81244a168392876ee1d94e8ed83897066b5e2df2400dad24022043f5ee7538e87e9c6aef7ef55133d3e51da7cc522830a9c4d736977a76ef755c0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000'
txid = '974f5148a0946f9985e75a240bb24c573adbbdc25d61e7b016cdbb0a5355049f'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0034(self):
raw_tx = '01000000013c6f30f99a5161e75a2ce4bca488300ca0c6112bde67f0807fe983feeff0c91001000000e608646561646265656675ab61493046022100ce18d384221a731c993939015e3d1bcebafb16e8c0b5b5d14097ec8177ae6f28022100bcab227af90bab33c3fe0a9abfee03ba976ee25dc6ce542526e9b2e56e14b7f10121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac493046022100c3b93edcc0fd6250eb32f2dd8a0bba1754b0f6c3be8ed4100ed582f3db73eba2022100bf75b5bd2eff4d6bf2bda2e34a40fcc07d4aa3cf862ceaa77b47b81eff829f9a01ab21038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000'
txid = 'b0097ec81df231893a212657bf5fe5a13b2bff8b28c0042aca6fc4159f79661b'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0035(self):
raw_tx = '01000000016f3dbe2ca96fa217e94b1017860be49f20820dea5c91bdcb103b0049d5eb566000000000fd1d0147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac47304402203757e937ba807e4a5da8534c17f9d121176056406a6465054bdd260457515c1a02200f02eccf1bec0f3a0d65df37889143c2e88ab7acec61a7b6f5aa264139141a2b0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000'
txid = 'feeba255656c80c14db595736c1c7955c8c0a497622ec96e3f2238fbdd43a7c9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0036(self):
raw_tx = '01000000012139c555ccb81ee5b1e87477840991ef7b386bc3ab946b6b682a04a621006b5a01000000fdb40148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f2204148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390175ac4830450220646b72c35beeec51f4d5bc1cbae01863825750d7f490864af354e6ea4f625e9c022100f04b98432df3a9641719dbced53393022e7249fb59db993af1118539830aab870148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a580039017521038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000'
txid = 'a0c984fc820e57ddba97f8098fa640c8a7eb3fe2f583923da886b7660f505e1e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0037(self):
raw_tx = '0100000002f9cbafc519425637ba4227f8d0a0b7160b4e65168193d5af39747891de98b5b5000000006b4830450221008dd619c563e527c47d9bd53534a770b102e40faa87f61433580e04e271ef2f960220029886434e18122b53d5decd25f1f4acb2480659fea20aabd856987ba3c3907e0121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffff42e7988254800876b69f24676b3e0205b77be476512ca4d970707dd5c60598ab00000000fd260100483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a53034930460221008431bdfa72bc67f9d41fe72e94c88fb8f359ffa30b33c72c121c5a877d922e1002210089ef5fc22dd8bfc6bf9ffdb01a9862d27687d424d1fefbab9e9c7176844a187a014c9052483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c7153aeffffffff01a08601000000000017a914d8dacdadb7462ae15cd906f1878706d0da8660e68700000000'
txid = '5df1375ffe61ac35ca178ebb0cab9ea26dedbd0e96005dfcee7e379fa513232f'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0038(self):
raw_tx = '0100000002dbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce000000006b4830450221009627444320dc5ef8d7f68f35010b4c050a6ed0d96b67a84db99fda9c9de58b1e02203e4b4aaa019e012e65d69b487fdf8719df72f488fa91506a80c49a33929f1fd50121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffffdbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce010000009300483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303ffffffff01a0860100000000001976a9149bc0bbdd3024da4d0c38ed1aecf5c68dd1d3fa1288ac00000000'
txid = 'ded7ff51d89a4e1ec48162aee5a96447214d93dfb3837946af2301a28f65dbea'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0039(self):
raw_tx = '010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000'
txid = '3444be2e216abe77b46015e481d8cc21abd4c20446aabf49cd78141c9b9db87e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0040(self):
raw_tx = '0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d'
txid = 'abd62b4627d8d9b2d95fcfd8c87e37d2790637ce47d28018e3aece63c1d62649'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0041(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d'
txid = '58b6de8413603b7f556270bf48caedcf17772e7105f5419f6a80be0df0b470da'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0042(self):
raw_tx = '0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff'
txid = '5f99c0abf511294d76cbe144d86b77238a03e086974bc7a8ea0bdb2c681a0324'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0043(self):
raw_tx = '010000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000'
txid = '25d35877eaba19497710666473c50d5527d38503e3521107a3fc532b74cd7453'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0044(self):
raw_tx = '0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000feffffff'
txid = '1b9aef851895b93c62c29fbd6ca4d45803f4007eff266e2f96ff11e9b6ef197b'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0045(self):
raw_tx = '010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000'
txid = '3444be2e216abe77b46015e481d8cc21abd4c20446aabf49cd78141c9b9db87e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0046(self):
raw_tx = '01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b1000000000100000000000000000001000000'
txid = 'f53761038a728b1f17272539380d96e93f999218f8dcb04a8469b523445cd0fd'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0047(self):
raw_tx = '0100000001000100000000000000000000000000000000000000000000000000000000000000000000030251b1000000000100000000000000000001000000'
txid = 'd193f0f32fceaf07bb25c897c8f99ca6f69a52f6274ca64efc2a2e180cb97fc1'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0048(self):
raw_tx = '010000000132211bdd0d568506804eef0d8cc3db68c3d766ab9306cdfcc0a9c89616c8dbb1000000006c493045022100c7bb0faea0522e74ff220c20c022d2cb6033f8d167fb89e75a50e237a35fd6d202203064713491b1f8ad5f79e623d0219ad32510bfaa1009ab30cbee77b59317d6e30001210237af13eb2d84e4545af287b919c2282019c9691cc509e78e196a9d8274ed1be0ffffffff0100000000000000001976a914f1b3ed2eda9a2ebe5a9374f692877cdf87c0f95b88ac00000000'
txid = '50a1e0e6a134a564efa078e3bd088e7e8777c2c0aec10a752fd8706470103b89'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0049(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000'
txid = 'e2207d1aaf6b74e5d98c2fa326d2dc803b56b30a3f90ce779fa5edb762f38755'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0050(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff00000100000000000000000000000000'
txid = 'f335864f7c12ec7946d2c123deb91eb978574b647af125a414262380c7fbd55c'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0051(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000'
txid = 'd1edbcde44691e98a7b7f556bd04966091302e29ad9af3c2baac38233667e0d2'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0052(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000'
txid = '3a13e1b6371c545147173cc4055f0ed73686a9f73f092352fb4b39ca27d360e6'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0053(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff40000100000000000000000000000000'
txid = 'bffda23e40766d292b0510a1b556453c558980c70c94ab158d8286b3413e220d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0054(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000'
txid = '01a86c65460325dc6699714d26df512a62a854a669f6ed2e6f369a238e048cfd'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0055(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000800100000000000000000000000000'
txid = 'f6d2359c5de2d904e10517d23e7c8210cca71076071bbf46de9fbd5f6233dbf1'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0056(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000'
txid = '19c2b7377229dae7aa3e50142a32fd37cef7171a01682f536e9ffa80c186f6c9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0057(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000'
txid = 'c9dda3a24cc8a5acb153d1085ecd2fecf6f87083122f8cdecc515b1148d4c40d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0058(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000'
txid = 'd1edbcde44691e98a7b7f556bd04966091302e29ad9af3c2baac38233667e0d2'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0059(self):
raw_tx = '020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000'
txid = '01a86c65460325dc6699714d26df512a62a854a669f6ed2e6f369a238e048cfd'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0060(self):
raw_tx = '02000000010001000000000000000000000000000000000000000000000000000000000000000000000251b2010000000100000000000000000000000000'
txid = '4b5e0aae1251a9dc66b4d5f483f1879bf518ea5e1765abc5a9f2084b43ed1ea7'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0061(self):
raw_tx = '0200000001000100000000000000000000000000000000000000000000000000000000000000000000030251b2010000000100000000000000000000000000'
txid = '5f16eb3ca4581e2dfb46a28140a4ee15f85e4e1c032947da8b93549b53c105f5'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0062(self):
raw_tx = '0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100cfb07164b36ba64c1b1e8c7720a56ad64d96f6ef332d3d37f9cb3c96477dc44502200a464cd7a9cf94cd70f66ce4f4f0625ef650052c7afcfe29d7d7e01830ff91ed012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000'
txid = 'b2ce556154e5ab22bec0a2f990b2b843f4f4085486c0d2cd82873685c0012004'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0063(self):
raw_tx = '0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100aa5d8aa40a90f23ce2c3d11bc845ca4a12acd99cbea37de6b9f6d86edebba8cb022022dedc2aa0a255f74d04c0b76ece2d7c691f9dd11a64a8ac49f62a99c3a05f9d01232103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac00000000'
txid = 'b2ce556154e5ab22bec0a2f990b2b843f4f4085486c0d2cd82873685c0012004'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0064(self):
raw_tx = '01000000000101000100000000000000000000000000000000000000000000000000000000000000000000171600144c9c3dfac4207d5d8cb89df5722cb3d712385e3fffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100cfb07164b36ba64c1b1e8c7720a56ad64d96f6ef332d3d37f9cb3c96477dc44502200a464cd7a9cf94cd70f66ce4f4f0625ef650052c7afcfe29d7d7e01830ff91ed012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000'
txid = 'fee125c6cd142083fabd0187b1dd1f94c66c89ec6e6ef6da1374881c0c19aece'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0065(self):
raw_tx = '0100000000010100010000000000000000000000000000000000000000000000000000000000000000000023220020ff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3dbffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100aa5d8aa40a90f23ce2c3d11bc845ca4a12acd99cbea37de6b9f6d86edebba8cb022022dedc2aa0a255f74d04c0b76ece2d7c691f9dd11a64a8ac49f62a99c3a05f9d01232103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac00000000'
txid = '5f32557914351fee5f89ddee6c8983d476491d29e601d854e3927299e50450da'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0066(self):
raw_tx = '0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff05540b0000000000000151d0070000000000000151840300000000000001513c0f00000000000001512c010000000000000151000248304502210092f4777a0f17bf5aeb8ae768dec5f2c14feabf9d1fe2c89c78dfed0f13fdb86902206da90a86042e252bcd1e80a168c719e4a1ddcc3cebea24b9812c5453c79107e9832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71000000000000'
txid = '07dfa2da3d67c8a2b9f7bd31862161f7b497829d5da90a88ba0f1a905e7a43f7'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0067(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b0000000000000151000248304502210092f4777a0f17bf5aeb8ae768dec5f2c14feabf9d1fe2c89c78dfed0f13fdb86902206da90a86042e252bcd1e80a168c719e4a1ddcc3cebea24b9812c5453c79107e9832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0068(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff0484030000000000000151d0070000000000000151540b0000000000000151c800000000000000015100024730440220699e6b0cfe015b64ca3283e6551440a34f901ba62dd4c72fe1cb815afb2e6761022021cc5e84db498b1479de14efda49093219441adc6c543e5534979605e273d80b032103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = 'f92bb6e4f3ff89172f23ef647f74c13951b665848009abb5862cdf7a0412415a'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0069(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b000000000000015100024730440220699e6b0cfe015b64ca3283e6551440a34f901ba62dd4c72fe1cb815afb2e6761022021cc5e84db498b1479de14efda49093219441adc6c543e5534979605e273d80b032103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0070(self):
raw_tx = '0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff04b60300000000000001519e070000000000000151860b00000000000001009600000000000000015100000248304502210091b32274295c2a3fa02f5bce92fb2789e3fc6ea947fbe1a76e52ea3f4ef2381a022079ad72aefa3837a2e0c033a8652a59731da05fa4a813f4fc48e87c075037256b822103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = 'e657e25fc9f2b33842681613402759222a58cf7dd504d6cdc0b69a0b8c2e7dcb'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0071(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b0000000000000151000248304502210091b32274295c2a3fa02f5bce92fb2789e3fc6ea947fbe1a76e52ea3f4ef2381a022079ad72aefa3837a2e0c033a8652a59731da05fa4a813f4fc48e87c075037256b822103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0072(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff04b60300000000000001519e070000000000000151860b0000000000000100960000000000000001510002473044022022fceb54f62f8feea77faac7083c3b56c4676a78f93745adc8a35800bc36adfa022026927df9abcf0a8777829bcfcce3ff0a385fa54c3f9df577405e3ef24ee56479022103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '4ede5e22992d43d42ccdf6553fb46e448aa1065ba36423f979605c1e5ab496b8'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0073(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b00000000000001510002473044022022fceb54f62f8feea77faac7083c3b56c4676a78f93745adc8a35800bc36adfa022026927df9abcf0a8777829bcfcce3ff0a385fa54c3f9df577405e3ef24ee56479022103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0074(self):
raw_tx = '01000000000103000100000000000000000000000000000000000000000000000000000000000000000000000200000000010000000000000000000000000000000000000000000000000000000000000100000000ffffffff000100000000000000000000000000000000000000000000000000000000000002000000000200000003e8030000000000000151d0070000000000000151b80b00000000000001510002473044022022fceb54f62f8feea77faac7083c3b56c4676a78f93745adc8a35800bc36adfa022026927df9abcf0a8777829bcfcce3ff0a385fa54c3f9df577405e3ef24ee56479022103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = 'cfe9f4b19f52b8366860aec0d2b5815e329299b2e9890d477edd7f1182be7ac8'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0075(self):
raw_tx = '0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff03e8030000000000000151d0070000000000000151b80b0000000000000151000002483045022100a3cec69b52cba2d2de623eeef89e0ba1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = 'aee8f4865ca40fa77ff2040c0d7de683bea048b103d42ca406dc07dd29d539cb'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0076(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b00000000000001510002483045022100a3cec69b52cba2d2de623eeef89e0ba1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0077(self):
raw_tx = '0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b00000000000001510002483045022100a3cec69b52cba2d2de623ffffffffff1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = '8a1bddf924d24570074b09d7967c145e54dc4cee7972a92fd975a2ad9e64b424'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0078(self):
raw_tx = '0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff010000000000000000015102fd08020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002755100000000'
txid = 'd93ab9e12d7c29d2adc13d5cdf619d53eec1f36eb6612f55af52be7ba0448e97'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0079(self):
raw_tx = '0100000000010c00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff0001000000000000000000000000000000000000000000000000000000000000020000006a473044022026c2e65b33fcd03b2a3b0f25030f0244bd23cc45ae4dec0f48ae62255b1998a00220463aa3982b718d593a6b9e0044513fd67a5009c2fdccc59992cffc2b167889f4012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff0001000000000000000000000000000000000000000000000000000000000000030000006a4730440220008bd8382911218dcb4c9f2e75bf5c5c3635f2f2df49b36994fde85b0be21a1a02205a539ef10fb4c778b522c1be852352ea06c67ab74200977c722b0bc68972575a012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff0001000000000000000000000000000000000000000000000000000000000000040000006b483045022100d9436c32ff065127d71e1a20e319e4fe0a103ba0272743dbd8580be4659ab5d302203fd62571ee1fe790b182d078ecfd092a509eac112bea558d122974ef9cc012c7012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff0001000000000000000000000000000000000000000000000000000000000000050000006a47304402200e2c149b114ec546015c13b2b464bbcb0cdc5872e6775787527af6cbc4830b6c02207e9396c6979fb15a9a2b96ca08a633866eaf20dc0ff3c03e512c1d5a1654f148012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff0001000000000000000000000000000000000000000000000000000000000000060000006b483045022100b20e70d897dc15420bccb5e0d3e208d27bdd676af109abbd3f88dbdb7721e6d6022005836e663173fbdfe069f54cde3c2decd3d0ea84378092a5d9d85ec8642e8a41012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff00010000000000000000000000000000000000000000000000000000000000000700000000ffffffff00010000000000000000000000000000000000000000000000000000000000000800000000ffffffff00010000000000000000000000000000000000000000000000000000000000000900000000ffffffff00010000000000000000000000000000000000000000000000000000000000000a00000000ffffffff00010000000000000000000000000000000000000000000000000000000000000b0000006a47304402206639c6e05e3b9d2675a7f3876286bdf7584fe2bbd15e0ce52dd4e02c0092cdc60220757d60b0a61fc95ada79d23746744c72bac1545a75ff6c2c7cdb6ae04e7e9592012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ffffffff0ce8030000000000000151e9030000000000000151ea030000000000000151eb030000000000000151ec030000000000000151ed030000000000000151ee030000000000000151ef030000000000000151f0030000000000000151f1030000000000000151f2030000000000000151f30300000000000001510248304502210082219a54f61bf126bfc3fa068c6e33831222d1d7138c6faa9d33ca87fd4202d6022063f9902519624254d7c2c8ea7ba2d66ae975e4e229ae38043973ec707d5d4a83012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7102473044022017fb58502475848c1b09f162cb1688d0920ff7f142bed0ef904da2ccc88b168f02201798afa61850c65e77889cbcd648a5703b487895517c88f85cdd18b021ee246a012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000000247304402202830b7926e488da75782c81a54cd281720890d1af064629ebf2e31bf9f5435f30220089afaa8b455bbeb7d9b9c3fe1ed37d07685ade8455c76472cda424d93e4074a012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7102473044022026326fcdae9207b596c2b05921dbac11d81040c4d40378513670f19d9f4af893022034ecd7a282c0163b89aaa62c22ec202cef4736c58cd251649bad0d8139bcbf55012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71024730440220214978daeb2f38cd426ee6e2f44131a33d6b191af1c216247f1dd7d74c16d84a02205fdc05529b0bc0c430b4d5987264d9d075351c4f4484c16e91662e90a72aab24012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710247304402204a6e9f199dc9672cf2ff8094aaa784363be1eb62b679f7ff2df361124f1dca3302205eeb11f70fab5355c9c8ad1a0700ea355d315e334822fa182227e9815308ee8f012103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000'
txid = 'b83579db5246aa34255642768167132a0c3d2932b186cd8fb9f5490460a0bf91'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0080(self):
raw_tx = '010000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e803000000000000015100000000'
txid = '2b1e44fff489d09091e5e20f9a01bbc0e8d80f0662e629fd10709cdb4922a874'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0081(self):
raw_tx = '0100000000010200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff01d00700000000000001510003483045022100e078de4e96a0e05dcdc0a414124dd8475782b5f3f0ed3f607919e9a5eeeb22bf02201de309b3a3109adb3de8074b3610d4cf454c49b61247a2779a0bcbf31c889333032103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc711976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac00000000'
txid = '60ebb1dd0b598e20dd0dd462ef6723dd49f8f803b6a2492926012360119cfdd7'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0082(self):
raw_tx = '0100000000010200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff02e8030000000000000151e90300000000000001510247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000'
txid = 'ed0c7f4163e275f3f77064f471eac861d01fdf55d03aa6858ebd3781f70bf003'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0083(self):
raw_tx = '0100000000010200010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff02e9030000000000000151e80300000000000001510248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000'
txid = 'f531ddf5ce141e1c8a7fdfc85cc634e5ff686f446a5cf7483e9dbe076b844862'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0084(self):
raw_tx = '01000000020001000000000000000000000000000000000000000000000000000000000000000000004847304402202a0b4b1294d70540235ae033d78e64b4897ec859c7b6f1b2b1d8a02e1d46006702201445e756d2254b0f1dfda9ab8e1e1bc26df9668077403204f32d16a49a36eb6983ffffffff00010000000000000000000000000000000000000000000000000000000000000100000049483045022100acb96cfdbda6dc94b489fd06f2d720983b5f350e31ba906cdbd800773e80b21c02200d74ea5bdf114212b4bbe9ed82c36d2e369e302dff57cb60d01c428f0bd3daab83ffffffff02e8030000000000000151e903000000000000015100000000'
txid = '98229b70948f1c17851a541f1fe532bf02c408267fecf6d7e174c359ae870654'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0085(self):
raw_tx = '01000000000102fe3dc9208094f3ffd12645477b3dc56f60ec4fa8e6f5d67c565d1c6b9216b36e000000004847304402200af4e47c9b9629dbecc21f73af989bdaa911f7e6f6c2e9394588a3aa68f81e9902204f3fcf6ade7e5abb1295b6774c8e0abd94ae62217367096bc02ee5e435b67da201ffffffff0815cf020f013ed6cf91d29f4202e8a58726b1ac6c79da47c23d1bee0a6925f80000000000ffffffff0100f2052a010000001976a914a30741f8145e5acadf23f751864167f32e0963f788ac000347304402200de66acf4527789bfda55fc5459e214fa6083f936b430a762c629656216805ac0220396f550692cd347171cbc1ef1f51e15282e837bb2b30860dc77c8f78bc8501e503473044022027dc95ad6b740fe5129e7e62a75dd00f291a2aeb1200b84b09d9e3789406b6c002201a9ecd315dd6a0e632ab20bbb98948bc0c6fb204f2c286963bb48517a7058e27034721026dccc749adc2a9d0d89497ac511f760f45c47dc5ed9cf352a58ac706453880aeadab210255a9626aebf5e29c0e6538428ba0d1dcf6ca98ffdf086aa8ced5e0d0215ea465ac00000000'
txid = '570e3730deeea7bd8bc92c836ccdeb4dd4556f2c33f2a1f7b889a4cb4e48d3ab'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0086(self):
raw_tx = '01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000'
txid = 'e0b8142f587aaa322ca32abce469e90eda187f3851043cc4f2a0fff8c13fc84e'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0087(self):
raw_tx = '0100000000010280e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffffe9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff0280969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac80969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000'
txid = 'b9ecf72df06b8f98f8b63748d1aded5ffc1a1186f8a302e63cf94f6250e29f4d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0088(self):
raw_tx = '0100000000010136641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e0100000023220020a16b5755f7f6f96dbd65f5f0d6ab9418b89af4b1f14a1bb8a09062c35f0dcb54ffffffff0200e9a435000000001976a914389ffce9cd9ae88dcc0631e88a821ffdbe9bfe2688acc0832f05000000001976a9147480a33f950689af511e6e84c138dbbd3c3ee41588ac080047304402206ac44d672dac41f9b00e28f4df20c52eeb087207e8d758d76d92c6fab3b73e2b0220367750dbbe19290069cba53d096f44530e4f98acaa594810388cf7409a1870ce01473044022068c7946a43232757cbdf9176f009a928e1cd9a1a8c212f15c1e11ac9f2925d9002205b75f937ff2f9f3c1246e547e54f62e027f64eefa2695578cc6432cdabce271502473044022059ebf56d98010a932cf8ecfec54c48e6139ed6adb0728c09cbe1e4fa0915302e022007cd986c8fa870ff5d2b3a89139c9fe7e499259875357e20fcbb15571c76795403483045022100fbefd94bd0a488d50b79102b5dad4ab6ced30c4069f1eaa69a4b5a763414067e02203156c6a5c9cf88f91265f5a942e96213afae16d83321c8b31bb342142a14d16381483045022100a5263ea0553ba89221984bd7f0b13613db16e7a70c549a86de0cc0444141a407022005c360ef0ae5a5d4f9f2f87a56c1546cc8268cab08c73501d6b3be2e1e1a8a08824730440220525406a1482936d5a21888260dc165497a90a15669636d8edca6b9fe490d309c022032af0c646a34a44d1f4576bf6a4a74b67940f8faa84c7df9abe12a01a11e2b4783cf56210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba32103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b21034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a21033400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f42103a6d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac162102d8b661b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b56ae00000000'
txid = '27eae69aff1dd4388c0fa05cbbfe9a3983d1b0b5811ebcd4199b86f299370aac'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0089(self):
raw_tx = '010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f1581b0000b64830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0121037a3fb04bcdb09eba90f69961ba1692a3528e45e67c85b200df820212d7594d334aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01ffffffff0101000000000000000000000000'
txid = '22d020638e3b7e1f2f9a63124ac76f5e333c74387862e3675f64b25e960d3641'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0090(self):
raw_tx = '0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61fb5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000'
txid = '2862bc0c69d2af55da7284d1b16a7cddc03971b77e5a97939cca7631add83bf5'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0091(self):
raw_tx = '01000000019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a662896581b0000fd6f01004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c03959601522102cd74a2809ffeeed0092bc124fd79836706e41f048db3f6ae9df8708cefb83a1c2102e615999372426e46fd107b76eaf007156a507584aa2cc21de9eee3bdbd26d36c4c9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175ffffffff0101000000000000000000000000'
txid = '1aebf0c98f01381765a8c33d688f8903e4d01120589ac92b78f1185dc1f4119c'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_bitcoin_core_0092(self):
raw_tx = '010000000001019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a6628964c1d000000ffffffff0101000000000000000007004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960101022102966f109c54e85d3aee8321301136cedeb9fc710fdef58a9de8a73942f8e567c021034ffc99dd9a79dd3cb31e2ab3e0b09e0e67db41ac068c625cd1f491576016c84e9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596017500000000'
txid = '45d17fb7db86162b2b6ca29fa4e163acf0ef0b54110e49b819bda1f948d423a3'
self._run_naive_tests_on_tx(raw_tx, txid)
# txns from Bitcoin Core ends <---
class NetworkMock(object):
def __init__(self, unspent):
self.unspent = unspent
def synchronous_get(self, arg):
return self.unspent
| 111.29585 | 3,841 | 0.899144 |
79409a19e31a597a2549497e81e88c1c1590a076 | 143 | py | Python | languages/python/django-oso/tests/test_app/__init__.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 2,167 | 2020-07-28T15:49:48.000Z | 2022-03-31T06:11:28.000Z | languages/python/django-oso/tests/test_app/__init__.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 1,060 | 2020-07-25T18:37:07.000Z | 2022-03-30T05:49:44.000Z | languages/python/django-oso/tests/test_app/__init__.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 118 | 2020-08-05T19:27:14.000Z | 2022-03-31T16:37:39.000Z | from django.apps import AppConfig
default_app_config = "test_app.TestAppAppConfig"
class TestAppAppConfig(AppConfig):
name = "test_app"
| 17.875 | 48 | 0.79021 |
79409a515f5f170bb6ab591d1f25de3a58b77860 | 273 | py | Python | .idea/fileTemplates/internal/Python Script.py | AndyZ-Salz/Photo_adjust | edfc629d22b48d9d77e7b18f9a7fceabd413e85f | [
"MIT"
] | null | null | null | .idea/fileTemplates/internal/Python Script.py | AndyZ-Salz/Photo_adjust | edfc629d22b48d9d77e7b18f9a7fceabd413e85f | [
"MIT"
] | null | null | null | .idea/fileTemplates/internal/Python Script.py | AndyZ-Salz/Photo_adjust | edfc629d22b48d9d77e7b18f9a7fceabd413e85f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
---------------------------------------
@file : ${NAME}
@Version : ??
@Author : Andy Zang
@software: ${PRODUCT_NAME}
@For :
---------------------------------------
"""
# History:
# ${DATE}: Create
if __name__ == '__main__':
pass | 15.166667 | 39 | 0.369963 |
79409a7464792823835489041f33726d7bbe7745 | 5,817 | py | Python | PECNet/social_eth_ucy_utils.py | Mykko/Human-Path-Prediction | 956fcf16b98c81cf8e23133f9a766192e17e63e0 | [
"MIT"
] | null | null | null | PECNet/social_eth_ucy_utils.py | Mykko/Human-Path-Prediction | 956fcf16b98c81cf8e23133f9a766192e17e63e0 | [
"MIT"
] | null | null | null | PECNet/social_eth_ucy_utils.py | Mykko/Human-Path-Prediction | 956fcf16b98c81cf8e23133f9a766192e17e63e0 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
from IPython import embed
import csv
import torch
from torch import nn
from torch.utils import data
import os
import sys
sys.path.append('../')
from eth_ucy_utils import *
def get_pickle_path(set_name, set_type):
_dir = os.path.dirname(__file__)
if _dir:
return _dir + '/datasets/{0}/{1}/{0}_{1}.p'.format(set_name, set_type)
else:
return './datasets/{0}/{1}/{0}_{1}.p'.format(set_name, set_type)
def find_min_time(t1, t2):
'''given two time frame arrays, find then min dist between starts'''
min_d = 999999999
for t in t2:
if abs(t1[0]-t)<min_d:
min_d = abs(t1[0]-t)
for t in t1:
if abs(t2[0]-t)<min_d:
min_d = abs(t2[0]-t)
return min_d
def find_min_dist(p1x, p1y, p2x, p2y):
'''given two time frame arrays, find then min dist between starts'''
min_d = 999999999
for i in range(len(p1x)):
for j in range(len(p1x)):
if ((p2x[i]-p1x[j])**2 + (p2y[i]-p1y[j])**2)**0.5 < min_d:
min_d = ((p2x[i]-p1x[j])**2 + (p2y[i]-p1y[j])**2)**0.5
return min_d
def social_and_temporal_filter(p1_key, p2_key, all_data_dict, time_thresh=10, dist_tresh=2):
p1_traj, p2_traj = np.array(all_data_dict[p1_key]), np.array(all_data_dict[p2_key])
p1_time, p2_time = p1_traj[:,0], p2_traj[:,0]
p1_x, p2_x = p1_traj[:,2], p2_traj[:,2]
p1_y, p2_y = p1_traj[:,3], p2_traj[:,3]
if all_data_dict[p1_key][0][4]!=all_data_dict[p2_key][0][4]: #adding the condition that they must be from the same environment
return False
if all_data_dict[p1_key][0][1]==all_data_dict[p2_key][0][1]: #if they are the same person id, no self loops
return False
if find_min_time(p1_time, p2_time)>time_thresh:
return False
if find_min_dist(p1_x, p1_y, p2_x, p2_y)>dist_tresh:
return False
return True
def mark_similar(mask, sim_list):
for i in range(len(sim_list)):
for j in range(len(sim_list)):
mask[sim_list[i]][sim_list[j]] = 1
def socially_pickle_data(batch_size=512, time_thresh=0, dist_tresh=10):
print("pickling...")
for scene in ['eth', 'univ', 'zara1', 'zara2', 'hotel']:
for j in ['test']:
path = get_pickle_path(scene, j)
data = pickle.load(open(path, "rb"))
full_dataset = []
full_masks = []
current_batch = []
mask_batch = [[0 for i in range(int(batch_size*2))] for j in range(int(batch_size*2))]
current_size = 0
social_id = 0
data_by_id = {}
person_id = 0
for d in data:
data_by_id[person_id] = d
person_id += 1
all_data_dict = data_by_id.copy()
print("Total People: ", len(list(data_by_id.keys())))
while len(list(data_by_id.keys()))>0:
print(len(list(data_by_id.keys())))
related_list = []
curr_keys = list(data_by_id.keys())
if current_size<batch_size:
pass
else:
full_dataset.append(current_batch.copy())
mask_batch = np.array(mask_batch)
full_masks.append(mask_batch[0:len(current_batch), 0:len(current_batch)])
current_size = 0
social_id = 0
current_batch = []
mask_batch = [[0 for i in range(int(batch_size*2))] for j in range(int(batch_size*2))]
current_batch.append((all_data_dict[curr_keys[0]]))
related_list.append(current_size)
current_size+=1
del data_by_id[curr_keys[0]]
for i in range(1, len(curr_keys)):
if social_and_temporal_filter(curr_keys[0], curr_keys[i], all_data_dict, time_thresh=time_thresh, dist_tresh=dist_tresh):
current_batch.append((all_data_dict[curr_keys[i]]))
related_list.append(current_size)
current_size+=1
del data_by_id[curr_keys[i]]
mark_similar(mask_batch, related_list)
social_id +=1
full_dataset.append(current_batch)
mask_batch = np.array(mask_batch)
full_masks.append(mask_batch[0:len(current_batch),0:len(current_batch)])
all_data = [full_dataset, full_masks]
save_name = "social_eth_ucy_dataset/social_" + str(scene) + "_" + str(j) + "_" + str(batch_size) + "_" + str(time_thresh) + "_" + str(dist_tresh) + ".pickle"
with open(save_name, 'wb') as f:
pickle.dump(all_data, f)
# socially_pickle_data(batch_size=4096, time_thresh=0, dist_tresh=50)
# socially_pickle_data(batch_size=256, time_thresh=0, dist_tresh=50)
def initial_pos(traj_batches):
batches = []
for b in traj_batches:
starting_pos = b[:,7,:].copy()/1000 #starting pos is end of past, start of future. scaled down.
batches.append(starting_pos)
return batches
def calculate_loss(x, reconstructed_x, mean, log_var, criterion, future, interpolated_future):
# reconstruction loss
RCL_dest = criterion(x, reconstructed_x)
ADL_traj = criterion(future, interpolated_future) # better with l2 loss
# kl divergence loss
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
return RCL_dest, KLD, ADL_traj
class SocialDatasetETHUCY(data.Dataset):
def __init__(self, set_name=None, set_type=None, b_size=512, t_tresh=0, d_tresh=10):
'Initialization'
load_name = "../eth_ucy/social_eth_ucy_dataset/social_" + set_name + "_" + set_type + "_" + str(b_size) + "_" + str(t_tresh) + "_" + str(d_tresh) + ".pickle"
with open(load_name, 'rb') as f:
data = pickle.load(f)
traj, masks = data
traj_new = []
for t in traj:
t = np.array(t)
t = t[:,:,2:4]
traj_new.append(t)
if set_name=="train":
#augment training set with reversed tracklets...
reverse_t = np.flip(t, axis=1).copy()
traj_new.append(reverse_t)
#comment
masks_new = []
for m in masks:
masks_new.append(m)
if set_name=="train":
#add second time for the reversed tracklets...
masks_new.append(m)
traj_new = np.array(traj_new)
masks_new = np.array(masks_new)
self.trajectory_batches = traj_new.copy()
self.mask_batches = masks_new.copy()
self.initial_pos_batches = np.array(initial_pos(self.trajectory_batches)) #for relative positioning
print("Initialized social dataloader for ucy/eth...")
| 31.106952 | 160 | 0.691937 |
79409aef19d0ff8b8d84f29dd09951c8689529fc | 808 | py | Python | ex101.py | ezequielwish/Python3 | a4489d49e6919649437cb9e682614240701e2b68 | [
"MIT"
] | 1 | 2022-01-24T02:01:32.000Z | 2022-01-24T02:01:32.000Z | ex101.py | ezequielwish/Python3 | a4489d49e6919649437cb9e682614240701e2b68 | [
"MIT"
] | null | null | null | ex101.py | ezequielwish/Python3 | a4489d49e6919649437cb9e682614240701e2b68 | [
"MIT"
] | null | null | null | # Crie um programa que tenha uma função chamada voto() que vai receber como parâmetro o ano de nascimento
# de uma pessoa, retornando um valor literal indicando se uma pessoa tem voto NEGADO, OPCIONAL e
# OBRIGATÓRIO nas eleições.
def vote(birth_year):
"""
Verifica se a pessoa pode votar
:param birth_year: ano de nascimento da pessoa
:return: mostra na tela se pode ou não votar
"""
from datetime import datetime
year = datetime.now().year
age = year - birth_year
if age < 16:
print(f'\033[32mCom {age} anos não vota.\033[m')
elif 60 > age > 17:
print(f'\033[31mCom {age} anos o voto é obrigatório.\033[m')
else:
print(f'\033[33mCom {age} anos o voto é opcional.\033[m')
birth = int(input('Qual seu ano de nascimento? '))
vote(birth)
| 32.32 | 105 | 0.670792 |
79409c7cc45ee6b8a1f165303ea3cb10bfff085e | 2,969 | py | Python | lib/bes/system/console.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/system/console.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/system/console.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from os import path
import os, sys
from .host import host
from .execute import execute
class console(object):
'Class to deal with the console in a cross platform manner.'
@classmethod
def output(clazz, message, console = False):
if console:
clazz._output_console(message)
else:
clazz._output_stream(sys.stdout, message)
@classmethod
def _output_console(clazz, message):
devices = clazz._possible_devices()
fout = clazz._open_console(devices)
if fout:
try:
clazz._output_stream(fout, message)
except Exception as ex:
pass
finally:
fout.close()
_WINDOWS_DEVICES = [ 'con:' ]
_UNIX_DEVICES = [ '/dev/tty', '/dev/console' ]
@classmethod
def _possible_devices(clazz):
if host.is_windows():
return clazz._WINDOWS_DEVICES
elif host.is_unix():
return clazz._UNIX_DEVICES
return []
@classmethod
def _open_console(clazz, devices):
for dev in devices:
try:
f = open(dev, 'w')
return f
except Exception as ex:
pass
return None
@classmethod
def _output_stream(clazz, stream, message):
stream.write(message)
if not clazz._ends_with_line_break(message):
stream.write(clazz._DEFAULT_LINE_BREAK)
stream.flush()
_LINE_BREAKS = [ '\n', '\r\n' ]
_DEFAULT_LINE_BREAK = '\n' if host.is_unix() else '\r\n'
@classmethod
def _ends_with_line_break(clazz, s):
for lb in clazz._LINE_BREAKS:
if s.endswith(lb):
return True
return False
@classmethod
def terminal_device(clazz):
'Return the current terminal device or None if not a terminal.'
if host.is_windows():
return False
elif host.is_unix():
try:
dev = execute.execute('tty').stdout.strip()
with open(dev, 'r') as f:
if os.isatty(f.fileno()):
return dev
else:
return None
except Exception as ex:
print(ex)
return None
else:
host.raise_unsupported_system()
@classmethod
def terminal_size(clazz):
'Return a 2-tuple ( width, height ) size of the current terminal or None if not a terminal.'
dev = clazz.terminal_device()
if not dev:
return None
assert host.is_unix() # for now only unix
try:
with open(dev, 'r') as f:
cmd = 'stty size < {}'.format(dev)
s = os.popen(cmd, 'r').read().split()
return int(s[1]), int(s[0])
except Exception as ex:
print(ex)
return None
@classmethod
def terminal_width(clazz, default = 80):
'Return the terminal width or None if not a terminal'
s = clazz.terminal_size()
return s[0] if s else default
@classmethod
def terminal_heigh(clazz, default = 36):
'Return the terminal width or None if not a terminal'
s = clazz.terminal_size()
return s[1] if s else default
| 26.04386 | 96 | 0.634557 |
79409d5e7350cdb70ba95cd381679143f629cef5 | 887 | py | Python | aversiestofunciona.py | juanitopereza/Granulacion | 07e455fee338f86bf4dc5eae068b677e87a534d0 | [
"MIT"
] | null | null | null | aversiestofunciona.py | juanitopereza/Granulacion | 07e455fee338f86bf4dc5eae068b677e87a534d0 | [
"MIT"
] | null | null | null | aversiestofunciona.py | juanitopereza/Granulacion | 07e455fee338f86bf4dc5eae068b677e87a534d0 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from os import listdir
#%%
def find_nearest(array,value):
idx = np.argsort(np.abs(array-value))[0:2]
return idx
#%%
mibisec = {}
archivos = listdir("./lineas")
NAVE = np.loadtxt("lineas_NAVE.txt")
c = 299792458.0 # CODATA
grav_red = 625.0 # redshift gravitacional
for cosa in archivos:
mibisec[cosa] = np.loadtxt("./lineas/{}".format(cosa))
linea_nave = NAVE[find_nearest(NAVE,mibisec[cosa][1][0])][0]
mibisec[cosa][:,0] = c*(mibisec[cosa][:,0] - linea_nave)/linea_nave - grav_red
len(mibisec)
#%%
plt.figure(figsize=(15,15))
for cosa in archivos:
plt.scatter(mibisec[cosa][0,0],mibisec[cosa][0,1])
plt.plot(mibisec[cosa][2:,0],mibisec[cosa][2:,1])
plt.xlim(-1000,1000)
plt.ticklabel_format(useOffset=False)
plt.xlabel("$Velocidad\ [m/s]$")
plt.ylabel("$F/F_c$")
#plt.savefig("plots.pdf")
plt.show()
| 23.342105 | 82 | 0.677565 |
79409dc6f34bba687c1e515aab356eb244aeca8b | 828 | py | Python | mp_demo11.py | xinhui94/hades | aed748df9b163337151093b54eebb78d66d74d81 | [
"MIT"
] | null | null | null | mp_demo11.py | xinhui94/hades | aed748df9b163337151093b54eebb78d66d74d81 | [
"MIT"
] | null | null | null | mp_demo11.py | xinhui94/hades | aed748df9b163337151093b54eebb78d66d74d81 | [
"MIT"
] | null | null | null | from multiprocessing import Pool
import time
def f(x):
return x*x
if __name__ == '__main__':
with Pool(processes=4) as pool: # start 4 worker processes
# evaluate "f(10)" asynchronously in a single process
result = pool.apply_async(f, (10,))
# prints "100" unless your computer is *very* slow
print(result.get(timeout=1))
print(pool.map(f, range(10))) # prints "[0, 1, 4,..., 81]"
it = pool.imap(f, range(10))
print(next(it)) # prints "0"
print(next(it)) # prints "1"
# prints "4" unless your computer is *very* slow
print(it.next(timeout=1))
result = pool.apply_async(time.sleep, (10,))
# raises multiprocessing.TimeoutError
print(result.get(timeout=1))
| 30.666667 | 72 | 0.561594 |
79409e7c7c3a9a38f7992535f3a9c59fc9d41539 | 19,625 | py | Python | service/cluster_service.py | reportportal/service-auto-analyzer | 38508e012d0ceeb621c508ad09f16ad6b5a88602 | [
"Apache-2.0"
] | 8 | 2020-06-04T10:32:27.000Z | 2022-02-17T08:11:00.000Z | service/cluster_service.py | reportportal/service-auto-analyzer | 38508e012d0ceeb621c508ad09f16ad6b5a88602 | [
"Apache-2.0"
] | 9 | 2019-12-12T11:18:37.000Z | 2022-02-19T16:17:28.000Z | service/cluster_service.py | reportportal/service-auto-analyzer | 38508e012d0ceeb621c508ad09f16ad6b5a88602 | [
"Apache-2.0"
] | 12 | 2020-04-01T15:19:40.000Z | 2022-03-03T14:41:55.000Z | """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from commons.esclient import EsClient
import elasticsearch
import elasticsearch.helpers
from commons import clusterizer
from utils import utils
from commons.launch_objects import ClusterResult, ClusterInfo
from commons.log_preparation import LogPreparation
from commons.log_merger import LogMerger
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from amqp.amqp import AmqpClient
import json
import logging
from time import time
from datetime import datetime
import hashlib
logger = logging.getLogger("analyzerApp.clusterService")
class ClusterService:
def __init__(self, app_config={}, search_cfg={}):
self.app_config = app_config
self.search_cfg = search_cfg
self.es_client = EsClient(app_config=app_config, search_cfg=search_cfg)
self.log_preparation = LogPreparation()
self.log_merger = LogMerger()
def build_search_similar_items_query(self, queried_log, message,
launch_info,
min_should_match="95%"):
"""Build search query"""
query = {
"_source": ["whole_message", "test_item",
"detected_message", "stacktrace", "launch_id", "cluster_id",
"cluster_message", "potential_status_codes", "found_exceptions"],
"size": 10,
"query": {
"bool": {
"filter": [
{"range": {"log_level": {"gte": utils.ERROR_LOGGING_LEVEL}}},
{"exists": {"field": "issue_type"}},
{"term": {"is_merged": False}},
],
"must_not": [{
"term": {"test_item": {"value": queried_log["_source"]["test_item"],
"boost": 1.0}}
}],
"should": [],
"must": [
{"wildcard": {"cluster_message": "*"}},
utils.build_more_like_this_query(
min_should_match, message,
field_name="whole_message", boost=1.0,
override_min_should_match=None,
max_query_terms=self.search_cfg["MaxQueryTerms"])
]}}}
if launch_info.forUpdate:
query["query"]["bool"]["should"].append(
{"term": {"launch_id": queried_log["_source"]["launch_id"]}})
else:
query["query"]["bool"]["must_not"].append(
{"term": {"launch_id": queried_log["_source"]["launch_id"]}})
query["query"]["bool"]["should"].append(
{"term": {"launch_name": launch_info.launchName}})
if queried_log["_source"]["found_exceptions"].strip():
query["query"]["bool"]["must"].append(
utils.build_more_like_this_query(
"1",
queried_log["_source"]["found_exceptions"],
field_name="found_exceptions", boost=1.0,
override_min_should_match="1",
max_query_terms=self.search_cfg["MaxQueryTerms"]))
if queried_log["_source"]["potential_status_codes"].strip():
number_of_status_codes = str(len(set(
queried_log["_source"]["potential_status_codes"].split())))
query["query"]["bool"]["must"].append(
utils.build_more_like_this_query(
"1",
queried_log["_source"]["potential_status_codes"],
field_name="potential_status_codes", boost=1.0,
override_min_should_match=number_of_status_codes,
max_query_terms=self.search_cfg["MaxQueryTerms"]))
return self.add_query_with_start_time_decay(query)
def add_query_with_start_time_decay(self, main_query):
return {
"size": main_query["size"],
"query": {
"function_score": {
"query": main_query["query"],
"functions": [
{
"exp": {
"start_time": {
"origin": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"scale": "7d",
"offset": "1d",
"decay": self.search_cfg["TimeWeightDecay"]
}
}
},
{
"script_score": {"script": {"source": "0.2"}}
}],
"score_mode": "max",
"boost_mode": "multiply"
}
}
}
def find_similar_items_from_es(
self, groups, log_dict,
log_messages, log_ids, launch_info,
additional_results):
new_clusters = {}
_clusterizer = clusterizer.Clusterizer()
for global_group in groups:
first_item_ind = groups[global_group][0]
min_should_match = utils.calculate_threshold_for_text(
log_messages[first_item_ind],
self.search_cfg["ClusterLogsMinSimilarity"])
query = self.build_search_similar_items_query(
log_dict[first_item_ind],
log_messages[first_item_ind],
launch_info,
min_should_match=utils.prepare_es_min_should_match(
min_should_match))
search_results = self.es_client.es_client.search(
index=log_dict[first_item_ind]["_index"],
body=query)
log_messages_part = [log_messages[first_item_ind]]
log_dict_part = {0: log_dict[first_item_ind]}
ind = 1
for res in search_results["hits"]["hits"]:
if int(res["_id"]) in log_ids:
continue
log_dict_part[ind] = res
log_message = res["_source"]["whole_message"]
if launch_info.cleanNumbers:
log_message = utils.sanitize_text(log_message)
log_message = utils.prepare_message_for_clustering(
log_message, launch_info.numberOfLogLines)
equal = True
for column in ["found_exceptions", "potential_status_codes"]:
candidate_text = " ".join(sorted(res["_source"][column].split())).strip()
text_to_compare = " ".join(
sorted(log_dict[first_item_ind]["_source"][column].split())).strip()
if candidate_text != text_to_compare:
equal = False
break
if not log_message.strip() or not equal:
continue
log_messages_part.append(log_message)
ind += 1
groups_part = _clusterizer.find_clusters(log_messages_part, threshold=min_should_match)
new_group = None
for group in groups_part:
if 0 in groups_part[group]: # and len(groups_part[group]) > 1:
cluster_id = 0
cluster_message = ""
for ind in groups_part[group]:
if log_dict_part[ind]["_source"]["cluster_id"].strip() and int(
log_dict_part[ind]["_source"]["cluster_id"].strip()) != 0:
cluster_id = int(log_dict_part[ind]["_source"]["cluster_id"].strip())
if log_dict_part[ind]["_source"]["cluster_message"].strip():
cluster_message = log_dict_part[ind]["_source"]["cluster_message"]
new_group_log_ids = []
for ind in groups_part[group]:
if ind == 0:
continue
if log_dict_part[ind]["_source"]["launch_id"] != launch_info.launchId:
continue
log_ids.add(int(log_dict_part[ind]["_id"]))
new_group_log_ids.append(log_dict_part[ind]["_id"])
new_group = ClusterInfo(
logIds=new_group_log_ids,
clusterMessage=cluster_message,
clusterId=cluster_id)
break
if new_group:
new_clusters[global_group] = new_group
for group in new_clusters:
if group in additional_results:
additional_results[group].logIds.extend(new_clusters[group].logIds)
else:
additional_results[group] = new_clusters[group]
return additional_results
def calculate_hash(self, group_ids, log_dict, log_messages, number_of_lines):
group_logs = []
log_message = ""
for i in range(min(100, len(group_ids))):
ind = group_ids[i]
group_logs.append(log_messages[ind])
if not log_message:
log_message = utils.first_lines(
log_dict[ind]["_source"]["whole_message"], number_of_lines).strip()
_cnt_vectorizer = CountVectorizer(
binary=True, analyzer="word", token_pattern="[^ ]+", ngram_range=(2, 2))
group_res = _cnt_vectorizer.fit_transform(group_logs).astype(np.int8)
res_bitwise = np.bitwise_and.reduce(group_res.toarray(), axis=0)
bigrams_list = []
for i, feature_name in enumerate(_cnt_vectorizer.get_feature_names()):
if res_bitwise[i] == 1:
bigrams_list.append(feature_name)
hash_message = int(
hashlib.sha1(" ".join(bigrams_list).encode("utf-8")).hexdigest(), 16) % (10 ** 16)
return hash_message, log_message
def gather_cluster_results(
self, groups, additional_results, log_dict, log_messages, number_of_lines):
results_to_return = []
cluster_num = 0
for group in groups:
cnt_items = len(groups[group])
cluster_id = 0
cluster_message = ""
if group in additional_results:
cnt_items += len(additional_results[group].logIds)
cluster_id = additional_results[group].clusterId
cluster_message = additional_results[group].clusterMessage
if cnt_items > 1:
cluster_num += 1
if not cluster_id:
cluster_id, cluster_message = self.calculate_hash(
groups[group], log_dict, log_messages, number_of_lines)
log_ids = []
for ind in groups[group]:
log_ids.append(log_dict[ind]["_id"])
if group in additional_results:
log_ids.extend(additional_results[group].logIds)
results_to_return.append(ClusterInfo(
clusterId=cluster_id,
clusterMessage=cluster_message,
logIds=log_ids))
return results_to_return, cluster_num
def regroup_by_error_ans_status_codes(self, log_messages, log_dict):
regroupped_by_error = {}
for i in range(len(log_messages)):
found_exceptions = " ".join(
sorted(log_dict[i]["_source"]["found_exceptions"].split()))
potential_status_codes = " ".join(
sorted(log_dict[i]["_source"]["potential_status_codes"].split()))
group_key = (found_exceptions, potential_status_codes)
if group_key not in regroupped_by_error:
regroupped_by_error[group_key] = []
regroupped_by_error[group_key].append(i)
return regroupped_by_error
def cluster_messages_with_groupping_by_error(self, log_messages, log_dict):
regroupped_by_error = self.regroup_by_error_ans_status_codes(
log_messages, log_dict)
_clusterizer = clusterizer.Clusterizer()
all_groups = {}
start_group_id = 0
for group_key in regroupped_by_error:
log_messages_part = []
log_messages_idx_dict = {}
for i, idx in enumerate(regroupped_by_error[group_key]):
log_messages_part.append(log_messages[idx])
log_messages_idx_dict[i] = idx
groups = _clusterizer.find_clusters(
log_messages_part,
threshold=self.search_cfg["ClusterLogsMinSimilarity"])
max_group_id = max(groups.keys())
for group_id in groups:
global_idx = start_group_id + group_id
if global_idx not in all_groups:
all_groups[global_idx] = []
for i in groups[group_id]:
all_groups[global_idx].append(log_messages_idx_dict[i])
start_group_id = start_group_id + max_group_id + 1
return all_groups
def get_logs_for_clustering_query(self, launch_info):
query = {
"_source": ["whole_message", "test_item",
"detected_message", "stacktrace", "launch_id", "cluster_id",
"cluster_message", "message", "log_level", "original_message_lines",
"original_message_words_number", "potential_status_codes", "found_exceptions"],
"size": self.app_config["esChunkNumber"],
"query": {
"bool": {
"filter": [
{"range": {"log_level": {"gte": utils.ERROR_LOGGING_LEVEL}}},
{"exists": {"field": "issue_type"}},
{"term": {"is_merged": False}},
],
"must_not": [],
"should": [],
"must": [{"term": {"launch_id": launch_info.launchId}}]}}}
if launch_info.forUpdate:
query["query"]["bool"]["must_not"].append(
{"wildcard": {"cluster_message": "*"}})
return query
def query_logs(self, launch_info, index_name):
logs_by_test_item = {}
for log in elasticsearch.helpers.scan(self.es_client.es_client,
query=self.get_logs_for_clustering_query(launch_info),
index=index_name):
if log["_source"]["test_item"] not in logs_by_test_item:
logs_by_test_item[log["_source"]["test_item"]] = []
logs_by_test_item[log["_source"]["test_item"]].append(log)
return logs_by_test_item
def find_logs_to_cluster(self, launch_info, index_name):
logs_by_test_item = self.query_logs(launch_info, index_name)
log_messages = []
log_dict = {}
ind = 0
for test_item in logs_by_test_item:
merged_logs = self.log_merger.decompose_logs_merged_and_without_duplicates(
logs_by_test_item[test_item])
new_merged_logs = []
for log in merged_logs:
if not log["_source"]["stacktrace"].strip():
continue
new_merged_logs.append(log)
if len(new_merged_logs) > 0:
merged_logs = new_merged_logs
for log in merged_logs:
if log["_source"]["is_merged"]:
continue
log_message = utils.prepare_message_for_clustering(
log["_source"]["whole_message"], launch_info.numberOfLogLines)
if not log_message.strip():
continue
log_messages.append(log_message)
log_dict[ind] = log
ind += 1
return log_messages, log_dict
@utils.ignore_warnings
def find_clusters(self, launch_info):
logger.info("Started clusterizing logs")
index_name = utils.unite_project_name(
str(launch_info.project), self.app_config["esProjectIndexPrefix"])
if not self.es_client.index_exists(index_name):
logger.info("Project %d doesn't exist", index_name)
logger.info("Finished clustering log with 0 clusters.")
return []
t_start = time()
errors_found = []
errors_count = 0
cluster_num = 0
clusters = []
log_ids = []
try:
log_messages, log_dict = self.find_logs_to_cluster(launch_info, index_name)
log_ids = set([int(log["_id"]) for log in log_dict.values()])
groups = self.cluster_messages_with_groupping_by_error(log_messages, log_dict)
additional_results = self.find_similar_items_from_es(
groups, log_dict, log_messages,
log_ids, launch_info,
{})
clusters, cluster_num = self.gather_cluster_results(
groups, additional_results, log_dict, log_messages, launch_info.numberOfLogLines)
if clusters:
bodies = []
for result in clusters:
for log_id in result.logIds:
bodies.append({
"_op_type": "update",
"_id": log_id,
"_index": index_name,
"doc": {"cluster_id": str(result.clusterId),
"cluster_message": result.clusterMessage}})
self.es_client._bulk_index(bodies)
except Exception as err:
logger.error(err)
errors_found.append(utils.extract_exception(err))
errors_count += 1
results_to_share = {launch_info.launchId: {
"not_found": int(cluster_num == 0), "items_to_process": len(log_ids),
"processed_time": time() - t_start, "found_clusters": cluster_num,
"launch_id": launch_info.launchId, "launch_name": launch_info.launchName,
"project_id": launch_info.project, "method": "find_clusters",
"gather_date": datetime.now().strftime("%Y-%m-%d"),
"gather_datetime": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"module_version": [self.app_config["appVersion"]],
"model_info": [],
"errors": errors_found,
"errors_count": errors_count}}
if "amqpUrl" in self.app_config and self.app_config["amqpUrl"].strip():
AmqpClient(self.app_config["amqpUrl"]).send_to_inner_queue(
self.app_config["exchangeName"], "stats_info", json.dumps(results_to_share))
logger.debug("Stats info %s", results_to_share)
logger.info("Processed the launch. It took %.2f sec.", time() - t_start)
logger.info("Finished clustering for the launch with %d clusters.", cluster_num)
return ClusterResult(
project=launch_info.project,
launchId=launch_info.launchId,
clusters=clusters)
| 47.175481 | 103 | 0.551643 |
79409e7ec7ce04a51c73b38a7219038811ba556c | 10,395 | py | Python | mop/ui/module.py | HolisticCoders/mop | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 8 | 2019-09-21T07:17:54.000Z | 2022-02-09T03:33:24.000Z | mop/ui/module.py | ProjectBorealis/master-of-puppets | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 102 | 2019-01-10T21:00:28.000Z | 2019-03-28T11:32:45.000Z | mop/ui/module.py | HolisticCoders/mop | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 3 | 2020-01-12T01:37:34.000Z | 2021-10-08T11:34:08.000Z | import logging
from collections import defaultdict
from functools import partial
from operator import attrgetter
from weakref import WeakValueDictionary
import maya.cmds as cmds
import maya.api.OpenMaya as om2
from mop.vendor.Qt import QtCore, QtWidgets
from mop.ui.signals import publish, subscribe, unsubscribe
from mop.ui.utils import clear_layout
from mop.utils.undo import undoable
from mop.ui.fieldwidgets import map_field_to_widget
from mop.core.rig import Rig
import mop.metadata
from mop.core.fields import ObjectField, ObjectListField
logger = logging.getLogger(__name__)
class ModulePanel(QtWidgets.QDockWidget):
def __init__(self, parent=None):
super(ModulePanel, self).__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setObjectName("mop_settings_panel")
self.setWindowTitle("Module Panel")
self._module_widgets = WeakValueDictionary()
self._modified_fields = set()
self._initial_values = {}
self.setWidget(QtWidgets.QWidget())
self.settings_group = QtWidgets.QGroupBox("Settings")
self.form = QtWidgets.QFormLayout()
self.apply_button = QtWidgets.QPushButton("Apply")
self.reset_button = QtWidgets.QPushButton("Reset")
self.actions_group = QtWidgets.QGroupBox("Actions")
self.mirror_button = QtWidgets.QPushButton("Mirror")
self.update_mirror_button = QtWidgets.QPushButton("Update Mirror")
self.duplicate_button = QtWidgets.QPushButton("Duplicate")
self.delete_button = QtWidgets.QPushButton("Delete")
layout = QtWidgets.QVBoxLayout()
self.widget().setLayout(layout)
layout.addWidget(self.settings_group)
layout.addStretch()
layout.addWidget(self.actions_group)
settings_layout = QtWidgets.QVBoxLayout()
self.settings_group.setLayout(settings_layout)
settings_layout.addLayout(self.form)
settings_actions_layout = QtWidgets.QHBoxLayout()
settings_layout.addLayout(settings_actions_layout)
settings_actions_layout.addWidget(self.apply_button)
settings_actions_layout.addWidget(self.reset_button)
actions_layout = QtWidgets.QVBoxLayout()
self.actions_group.setLayout(actions_layout)
actions_layout.addWidget(self.mirror_button)
actions_layout.addWidget(self.update_mirror_button)
actions_layout.addWidget(self.duplicate_button)
actions_layout.addWidget(self.delete_button)
self.apply_button.hide()
self.reset_button.hide()
self.mirror_button.hide()
self.update_mirror_button.hide()
self.duplicate_button.hide()
self.delete_button.hide()
self.apply_button.released.connect(self._update_module)
self.reset_button.released.connect(self._update_ui)
self.mirror_button.released.connect(self._mirror_module)
self.update_mirror_button.released.connect(self._update_mirror)
self.duplicate_button.released.connect(self._duplicate_module)
self.delete_button.released.connect(self._delete_module)
subscribe("selected-modules-changed", self._on_selection_changed)
def closeEvent(self, event):
unsubscribe("selected-modules-changed", self._on_selection_changed)
def _on_selection_changed(self, modules):
"""Update the module to edit.
``modules`` argument is a :class:`list` of
:class:`mop.core.module.RigModule` and/or :class:`str`
instances.
:param pointer: Data to the selected module.
It is a list of modules and/or joints.
:type pointer: list
"""
if not modules:
return
self.modules = modules
self._update_ui()
def _on_field_edited(self, widget, *args):
label = self.form.labelForField(widget)
if widget.get() != self._initial_values[widget]:
self._modified_fields.add(widget)
label.setStyleSheet("font-weight: bold")
else:
self._modified_fields.remove(widget)
label.setStyleSheet("")
if self._modified_fields:
self.apply_button.setEnabled(True)
self.reset_button.setEnabled(True)
else:
self.apply_button.setEnabled(False)
self.reset_button.setEnabled(False)
def _update_module(self):
"""Update the Maya module."""
if not self.modules:
return
modified_fields = defaultdict(dict)
for module in self.modules:
old_name = module.node_name
for name, widget in self._module_widgets.iteritems():
if widget not in self._modified_fields:
continue
field = getattr(module, name)
old_value = field.get()
value = widget.get()
field.set(value)
label = self.form.labelForField(widget)
label.setStyleSheet("")
self._initial_values[widget] = value
modified_fields[module][name] = (old_value, value)
module.update()
new_name = module.node_name
if new_name != old_name:
modified_fields[module]["node_name"] = (old_name, new_name)
self.apply_button.setEnabled(False)
self.reset_button.setEnabled(False)
self._modified_fields.clear()
publish("modules-updated", modified_fields)
def _delete_module(self):
"""Delete the selected module."""
if not self.modules:
return
button = QtWidgets.QMessageBox.warning(
self,
"mop - Delete Module",
"You are about to delete %d module(s). Continue ?" % len(self.modules),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
)
if button != QtWidgets.QMessageBox.Yes:
return
rig = Rig()
modules = self.modules[:]
for module in self.modules:
if module.name.get() == "root":
logger.warning("Cannot delete root module.")
modules.remove(module)
continue
rig.delete_module(module.node_name)
publish("modules-deleted", modules)
def _duplicate_module(self):
"""Duplicate the selected module."""
if not self.modules:
return
rig = Rig()
new_modules = []
for module in self.modules:
new_module = rig.duplicate_module(module)
new_modules.append(new_module)
publish("modules-created", new_modules)
@undoable
def _mirror_module(self):
if not self.modules:
return
rig = Rig()
new_modules = []
for module in self.modules:
new_module = rig.mirror_module(module)
if new_module is not None:
new_modules.append(new_module)
publish("modules-created", new_modules)
@undoable
def _update_mirror(self):
if not self.modules:
return
for module in self.modules:
mirror_mod = module.module_mirror
if not mirror_mod:
continue
if mirror_mod in self.modules:
self.modules.remove(mirror_mod)
module.update_mirror()
def _update_ui(self):
self._modified_fields.clear()
self._initial_values.clear()
clear_layout(self.form)
if not self.modules:
self.apply_button.hide()
self.reset_button.hide()
self.mirror_button.hide()
self.update_mirror_button.hide()
self.duplicate_button.hide()
self.delete_button.hide()
return
# If one of the module is built, disable actions.
is_built = False
for module in self.modules:
if module.is_built.get():
is_built = True
if is_built:
self.mirror_button.setEnabled(False)
self.update_mirror_button.setEnabled(False)
self.duplicate_button.setEnabled(False)
self.delete_button.setEnabled(False)
else:
self.mirror_button.setEnabled(True)
self.update_mirror_button.setEnabled(True)
self.duplicate_button.setEnabled(True)
self.delete_button.setEnabled(True)
# Enable apply and reset button only when a field has
# been modified.
self.apply_button.setEnabled(False)
self.reset_button.setEnabled(False)
self.apply_button.show()
self.reset_button.show()
self.mirror_button.show()
self.update_mirror_button.show()
self.duplicate_button.show()
self.delete_button.show()
# Only show fields shared by all selected modules.
field_names = set([f.name for f in self.modules[-1].fields])
for other in self.modules[:-1]:
other_names = set([f.name for f in other.fields])
field_names = field_names.intersection(other_names)
# Filter out fields that must be unique, so users cannot
# edit them on multiple modules at once.
for field in self.modules[-1].fields:
if not field.unique:
continue
if field.name in field_names and len(self.modules) > 1:
field_names.remove(field.name)
fields = [f for f in self.modules[-1].fields if f.name in field_names]
ordered_fields = sorted(fields, key=attrgetter("gui_order"))
for field in ordered_fields:
if not field.displayable:
continue
class_name = field.__class__.__name__
widget_data = map_field_to_widget.get(
class_name, map_field_to_widget["StringField"]
)
widget = widget_data(field)
if field.tooltip:
widget.setToolTip(field.tooltip)
value = getattr(self.modules[-1], field.name).get()
widget.set(value)
self._initial_values[widget] = value
self._module_widgets[field.name] = widget
widget.signal().connect(partial(self._on_field_edited, widget))
self.form.addRow(field.display_name, widget)
if not field.editable or is_built:
widget.setEnabled(False)
| 35.968858 | 83 | 0.631457 |
7940a13699228f8aca62b7b55dc8a66cd172d41c | 1,706 | py | Python | parsers/AW.py | oscarmrom/electricitymap-contrib | 1fcbb9a9003a50aa0dccc4e4ca703126f76f2c89 | [
"MIT"
] | 1,582 | 2018-07-16T10:52:36.000Z | 2021-12-06T06:03:32.000Z | parsers/AW.py | oscarmrom/electricitymap-contrib | 1fcbb9a9003a50aa0dccc4e4ca703126f76f2c89 | [
"MIT"
] | 1,463 | 2018-07-09T12:23:35.000Z | 2021-12-06T08:11:37.000Z | parsers/AW.py | oscarmrom/electricitymap-contrib | 1fcbb9a9003a50aa0dccc4e4ca703126f76f2c89 | [
"MIT"
] | 650 | 2018-07-10T02:07:17.000Z | 2021-12-03T11:05:45.000Z | #!/usr/bin/env python3
import arrow
import requests
import datetime
def fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'
# User agent is mandatory or services answers 404
headers = {'user-agent': 'electricitymap.org'}
response = r.get(url, headers=headers)
aruba_json = response.json()
top_data = aruba_json['dashboard_top_data']
# Values currenlty used from service
fossil = top_data['Fossil']
wind = top_data['Wind']
solar = top_data['TotalSolar']
# biogas live value is 0 MW all the time (2021)
biogas = top_data['total_bio_gas']
total = top_data['TotalPower']
# We're using Fossil data to get timestamp in correct time zone
local_date_time = datetime.datetime.strptime(fossil['timestamp'], "%Y-%m-%d %H:%M:%S.%f")
zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')
data = {
'zoneKey': zone_key,
'datetime': zone_date_time.datetime,
'production': {
'oil': float(fossil['value']),
'wind': float(wind['value']),
'solar': float(solar['value']),
'biomass': float(biogas['value']),
'unknown': float(total['value']) - float(fossil['value']) - float(wind['value']) - float(solar['value']) - float(biogas['value'])
},
'storage': {},
'source': 'webaruba.com',
}
return data
if __name__ == '__main__':
print(fetch_production())
| 33.45098 | 141 | 0.641266 |
7940a226c621c69d22ee06321ae53cb525180953 | 1,603 | py | Python | etl/parsers/etw/Microsoft_Windows_GenericRoaming.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_GenericRoaming.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_GenericRoaming.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-GenericRoaming
GUID : 4eacb4d0-263b-4b93-8cd6-778a278e5642
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("4eacb4d0-263b-4b93-8cd6-778a278e5642"), event_id=3, version=0)
class Microsoft_Windows_GenericRoaming_3_0(Etw):
pattern = Struct(
"CollectionId" / WString,
"UnitId" / WString,
"HResultFailure" / Int32sl
)
@declare(guid=guid("4eacb4d0-263b-4b93-8cd6-778a278e5642"), event_id=4, version=0)
class Microsoft_Windows_GenericRoaming_4_0(Etw):
pattern = Struct(
"CollectionId" / WString,
"UnitId" / WString,
"HResultFailure" / Int32sl
)
@declare(guid=guid("4eacb4d0-263b-4b93-8cd6-778a278e5642"), event_id=5, version=0)
class Microsoft_Windows_GenericRoaming_5_0(Etw):
pattern = Struct(
"CollectionId" / WString,
"UnitId" / WString,
"HResultFailure" / Int32sl
)
@declare(guid=guid("4eacb4d0-263b-4b93-8cd6-778a278e5642"), event_id=6, version=0)
class Microsoft_Windows_GenericRoaming_6_0(Etw):
pattern = Struct(
"CollectionId" / WString,
"HResultFailure" / Int32sl
)
@declare(guid=guid("4eacb4d0-263b-4b93-8cd6-778a278e5642"), event_id=7, version=0)
class Microsoft_Windows_GenericRoaming_7_0(Etw):
pattern = Struct(
"Username" / WString,
"HResultFailure" / Int32sl
)
| 29.685185 | 123 | 0.699314 |
7940a25c8f9fb37e1103fd4dd28d18e241213cfc | 7,193 | py | Python | airflow/api_connexion/endpoints/dag_endpoint.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | airflow/api_connexion/endpoints/dag_endpoint.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | airflow/api_connexion/endpoints/dag_endpoint.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from typing import Collection, Optional
from connexion import NoContent
from flask import g, request
from marshmallow import ValidationError
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import or_
from airflow import DAG
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import check_limit, format_parameters
from airflow.api_connexion.schemas.dag_schema import (
DAGCollection,
dag_detail_schema,
dag_schema,
dags_collection_schema,
)
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.exceptions import AirflowException, DagNotFound
from airflow.models.dag import DagModel, DagTag
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@provide_session
def get_dag(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get basic information about a DAG."""
dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).one_or_none()
if dag is None:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
return dag_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
def get_dag_details(*, dag_id: str) -> APIResponse:
"""Get details of DAG."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
return dag_detail_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@format_parameters({'limit': check_limit})
@provide_session
def get_dags(
*,
limit: int,
offset: int = 0,
tags: Optional[Collection[str]] = None,
dag_id_pattern: Optional[str] = None,
only_active: bool = True,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all DAGs."""
if only_active:
dags_query = session.query(DagModel).filter(~DagModel.is_subdag, DagModel.is_active)
else:
dags_query = session.query(DagModel).filter(~DagModel.is_subdag)
if dag_id_pattern:
dags_query = dags_query.filter(DagModel.dag_id.ilike(f'%{dag_id_pattern}%'))
readable_dags = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
dags_query = dags_query.filter(DagModel.dag_id.in_(readable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.filter(or_(*cond))
total_entries = dags_query.count()
dags = dags_query.order_by(DagModel.dag_id).offset(offset).limit(limit).all()
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)])
@provide_session
def patch_dag(*, dag_id: str, update_mask: UpdateMask = None, session: Session = NEW_SESSION) -> APIResponse:
"""Update the specific DAG"""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ['is_paused']:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
patch_body_[update_mask[0]] = patch_body[update_mask[0]]
patch_body = patch_body_
dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).one_or_none()
if not dag:
raise NotFound(f"Dag with id: '{dag_id}' not found")
dag.is_paused = patch_body['is_paused']
session.flush()
return dag_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)])
@format_parameters({'limit': check_limit})
@provide_session
def patch_dags(limit, session, offset=0, only_active=True, tags=None, dag_id_pattern=None, update_mask=None):
"""Patch multiple DAGs."""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ['is_paused']:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
update_mask = update_mask[0]
patch_body_[update_mask] = patch_body[update_mask]
patch_body = patch_body_
if only_active:
dags_query = session.query(DagModel).filter(~DagModel.is_subdag, DagModel.is_active)
else:
dags_query = session.query(DagModel).filter(~DagModel.is_subdag)
if dag_id_pattern == '~':
dag_id_pattern = '%'
dags_query = dags_query.filter(DagModel.dag_id.ilike(f'%{dag_id_pattern}%'))
editable_dags = get_airflow_app().appbuilder.sm.get_editable_dag_ids(g.user)
dags_query = dags_query.filter(DagModel.dag_id.in_(editable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.filter(or_(*cond))
total_entries = dags_query.count()
dags = dags_query.order_by(DagModel.dag_id).offset(offset).limit(limit).all()
dags_to_update = {dag.dag_id for dag in dags}
session.query(DagModel).filter(DagModel.dag_id.in_(dags_to_update)).update(
{DagModel.is_paused: patch_body['is_paused']}, synchronize_session='fetch'
)
session.flush()
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG)])
@provide_session
def delete_dag(dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete the specific DAG."""
from airflow.api.common import delete_dag as delete_dag_module
try:
delete_dag_module.delete_dag(dag_id, session=session)
except DagNotFound:
raise NotFound(f"Dag with id: '{dag_id}' not found")
except AirflowException:
raise AlreadyExists(detail=f"Task instances of dag with id: '{dag_id}' are still running")
return NoContent, HTTPStatus.NO_CONTENT
| 39.521978 | 109 | 0.735993 |
7940a292ef846ac7fd692755bef29a3a90c05154 | 1,975 | py | Python | neutron/agent/ovsdb/impl_idl.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/agent/ovsdb/impl_idl.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/agent/ovsdb/impl_idl.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import moves
from oslo_config import cfg
from ovsdbapp.backend.ovs_idl import transaction
from ovsdbapp.schema.open_vswitch import impl_idl
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import vlog
from neutron.conf.agent import ovs_conf
NeutronOVSDBTransaction = moves.moved_class(
impl_idl.OvsVsctlTransaction,
'NeutronOVSDBTransaction',
__name__)
VswitchdInterfaceAddException = moves.moved_class(
impl_idl.VswitchdInterfaceAddException,
'VswitchdInterfaceAddException',
__name__)
Transaction = moves.moved_class(transaction.Transaction,
'Transaction', __name__)
ovs_conf.register_ovs_agent_opts()
_connection = None
def api_factory(context):
global _connection
if _connection is None:
try:
_connection = connection.Connection(
idl=connection.idl_factory(),
timeout=cfg.CONF.ovs_vsctl_timeout)
except TypeError:
_connection = connection.Connection(
idl_factory=connection.idl_factory,
timeout=cfg.CONF.ovs_vsctl_timeout)
return NeutronOvsdbIdl(_connection)
class NeutronOvsdbIdl(impl_idl.OvsdbIdl):
def __init__(self, connection):
vlog.use_python_logger()
super(NeutronOvsdbIdl, self).__init__(connection)
| 33.474576 | 78 | 0.72557 |
7940a38cdb7906cf408040552ebfd1f441fd2105 | 7,469 | py | Python | extraPackages/matplotlib-3.0.3/examples/specialty_plots/leftventricle_bulleye.py | dolboBobo/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 130 | 2018-02-03T10:25:54.000Z | 2022-03-25T22:27:22.000Z | extraPackages/matplotlib-3.0.2/examples/specialty_plots/leftventricle_bulleye.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 9 | 2018-12-14T07:31:42.000Z | 2020-12-09T20:29:28.000Z | extraPackages/matplotlib-3.0.2/examples/specialty_plots/leftventricle_bulleye.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | """
=====================
Leftventricle Bulleye
=====================
This example demonstrates how to create the 17 segment model for the left
ventricle recommended by the American Heart Association (AHA).
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def bullseye_plot(ax, data, segBold=None, cmap=None, norm=None):
"""
Bullseye representation for the left ventricle.
Parameters
----------
ax : axes
data : list of int and float
The intensity values for each of the 17 segments
segBold: list of int, optional
A list with the segments to highlight
cmap : ColorMap or None, optional
Optional argument to set the desired colormap
norm : Normalize or None, optional
Optional argument to normalize data into the [0.0, 1.0] range
Notes
-----
This function create the 17 segment model for the left ventricle according
to the American Heart Association (AHA) [1]_
References
----------
.. [1] M. D. Cerqueira, N. J. Weissman, V. Dilsizian, A. K. Jacobs,
S. Kaul, W. K. Laskey, D. J. Pennell, J. A. Rumberger, T. Ryan,
and M. S. Verani, "Standardized myocardial segmentation and
nomenclature for tomographic imaging of the heart",
Circulation, vol. 105, no. 4, pp. 539-542, 2002.
"""
if segBold is None:
segBold = []
linewidth = 2
data = np.array(data).ravel()
if cmap is None:
cmap = plt.cm.viridis
if norm is None:
norm = mpl.colors.Normalize(vmin=data.min(), vmax=data.max())
theta = np.linspace(0, 2 * np.pi, 768)
r = np.linspace(0.2, 1, 4)
# Create the bound for the segment 17
for i in range(r.shape[0]):
ax.plot(theta, np.repeat(r[i], theta.shape), '-k', lw=linewidth)
# Create the bounds for the segments 1-12
for i in range(6):
theta_i = np.deg2rad(i * 60)
ax.plot([theta_i, theta_i], [r[1], 1], '-k', lw=linewidth)
# Create the bounds for the segments 13-16
for i in range(4):
theta_i = np.deg2rad(i * 90 - 45)
ax.plot([theta_i, theta_i], [r[0], r[1]], '-k', lw=linewidth)
# Fill the segments 1-6
r0 = r[2:4]
r0 = np.repeat(r0[:, np.newaxis], 128, axis=1).T
for i in range(6):
# First segment start at 60 degrees
theta0 = theta[i * 128:i * 128 + 128] + np.deg2rad(60)
theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)
z = np.ones((128, 2)) * data[i]
ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)
if i + 1 in segBold:
ax.plot(theta0, r0, '-k', lw=linewidth + 2)
ax.plot(theta0[0], [r[2], r[3]], '-k', lw=linewidth + 1)
ax.plot(theta0[-1], [r[2], r[3]], '-k', lw=linewidth + 1)
# Fill the segments 7-12
r0 = r[1:3]
r0 = np.repeat(r0[:, np.newaxis], 128, axis=1).T
for i in range(6):
# First segment start at 60 degrees
theta0 = theta[i * 128:i * 128 + 128] + np.deg2rad(60)
theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)
z = np.ones((128, 2)) * data[i + 6]
ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)
if i + 7 in segBold:
ax.plot(theta0, r0, '-k', lw=linewidth + 2)
ax.plot(theta0[0], [r[1], r[2]], '-k', lw=linewidth + 1)
ax.plot(theta0[-1], [r[1], r[2]], '-k', lw=linewidth + 1)
# Fill the segments 13-16
r0 = r[0:2]
r0 = np.repeat(r0[:, np.newaxis], 192, axis=1).T
for i in range(4):
# First segment start at 45 degrees
theta0 = theta[i * 192:i * 192 + 192] + np.deg2rad(45)
theta0 = np.repeat(theta0[:, np.newaxis], 2, axis=1)
z = np.ones((192, 2)) * data[i + 12]
ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)
if i + 13 in segBold:
ax.plot(theta0, r0, '-k', lw=linewidth + 2)
ax.plot(theta0[0], [r[0], r[1]], '-k', lw=linewidth + 1)
ax.plot(theta0[-1], [r[0], r[1]], '-k', lw=linewidth + 1)
# Fill the segments 17
if data.size == 17:
r0 = np.array([0, r[0]])
r0 = np.repeat(r0[:, np.newaxis], theta.size, axis=1).T
theta0 = np.repeat(theta[:, np.newaxis], 2, axis=1)
z = np.ones((theta.size, 2)) * data[16]
ax.pcolormesh(theta0, r0, z, cmap=cmap, norm=norm)
if 17 in segBold:
ax.plot(theta0, r0, '-k', lw=linewidth + 2)
ax.set_ylim([0, 1])
ax.set_yticklabels([])
ax.set_xticklabels([])
# Create the fake data
data = np.array(range(17)) + 1
# Make a figure and axes with dimensions as desired.
fig, ax = plt.subplots(figsize=(12, 8), nrows=1, ncols=3,
subplot_kw=dict(projection='polar'))
fig.canvas.set_window_title('Left Ventricle Bulls Eyes (AHA)')
# Create the axis for the colorbars
axl = fig.add_axes([0.14, 0.15, 0.2, 0.05])
axl2 = fig.add_axes([0.41, 0.15, 0.2, 0.05])
axl3 = fig.add_axes([0.69, 0.15, 0.2, 0.05])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=1, vmax=17)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(axl, cmap=cmap, norm=norm,
orientation='horizontal')
cb1.set_label('Some Units')
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap2 = mpl.cm.cool
norm2 = mpl.colors.Normalize(vmin=1, vmax=17)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb2 = mpl.colorbar.ColorbarBase(axl2, cmap=cmap2, norm=norm2,
orientation='horizontal')
cb2.set_label('Some other units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under"
# value colors.
cmap3 = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap3.set_over('0.35')
cmap3.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [2, 3, 7, 9, 15]
norm3 = mpl.colors.BoundaryNorm(bounds, cmap3.N)
cb3 = mpl.colorbar.ColorbarBase(axl3, cmap=cmap3, norm=norm3,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0] + bounds + [18],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal')
cb3.set_label('Discrete intervals, some other units')
# Create the 17 segment model
bullseye_plot(ax[0], data, cmap=cmap, norm=norm)
ax[0].set_title('Bulls Eye (AHA)')
bullseye_plot(ax[1], data, cmap=cmap2, norm=norm2)
ax[1].set_title('Bulls Eye (AHA)')
bullseye_plot(ax[2], data, segBold=[3, 5, 6, 11, 12, 16],
cmap=cmap3, norm=norm3)
ax[2].set_title('Segments [3,5,6,11,12,16] in bold')
plt.show()
| 35.398104 | 78 | 0.59834 |
7940a5fc7c9e4761c2a3cb7c5baac81300af08c7 | 406 | py | Python | 36_Valid Sudoku.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | 36_Valid Sudoku.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | 36_Valid Sudoku.py | Alvin1994/leetcode-python3- | ba2bde873c925554cc39f2bd13be81967713477d | [
"Apache-2.0"
] | null | null | null | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
if not board or not board[0]:
return
tmp = []
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] != ".":
tmp += [(board[r][c], r), (c,board[r][c]), (r//3,c//3,board[r][c])]
return len(tmp)==len(set(tmp)) | 33.833333 | 87 | 0.448276 |
7940a71c3d1f64e0c68ac003a49ba6ca705cc5c5 | 14,362 | py | Python | google/cloud/pubsublite_v1/services/cursor_service/transports/grpc_asyncio.py | renovate-bot/python-pubsublite | 9da1e81ba8565c220fae2a58768315e87c57b5bd | [
"Apache-2.0"
] | null | null | null | google/cloud/pubsublite_v1/services/cursor_service/transports/grpc_asyncio.py | renovate-bot/python-pubsublite | 9da1e81ba8565c220fae2a58768315e87c57b5bd | [
"Apache-2.0"
] | null | null | null | google/cloud/pubsublite_v1/services/cursor_service/transports/grpc_asyncio.py | renovate-bot/python-pubsublite | 9da1e81ba8565c220fae2a58768315e87c57b5bd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.pubsublite_v1.types import cursor
from .base import CursorServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import CursorServiceGrpcTransport
class CursorServiceGrpcAsyncIOTransport(CursorServiceTransport):
"""gRPC AsyncIO backend transport for CursorService.
The service that a subscriber client application uses to
manage committed cursors while receiving messsages. A cursor
represents a subscriber's progress within a topic partition for
a given subscription.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "pubsublite.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "pubsublite.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def streaming_commit_cursor(
self,
) -> Callable[
[cursor.StreamingCommitCursorRequest],
Awaitable[cursor.StreamingCommitCursorResponse],
]:
r"""Return a callable for the streaming commit cursor method over gRPC.
Establishes a stream with the server for managing
committed cursors.
Returns:
Callable[[~.StreamingCommitCursorRequest],
Awaitable[~.StreamingCommitCursorResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "streaming_commit_cursor" not in self._stubs:
self._stubs["streaming_commit_cursor"] = self.grpc_channel.stream_stream(
"/google.cloud.pubsublite.v1.CursorService/StreamingCommitCursor",
request_serializer=cursor.StreamingCommitCursorRequest.serialize,
response_deserializer=cursor.StreamingCommitCursorResponse.deserialize,
)
return self._stubs["streaming_commit_cursor"]
@property
def commit_cursor(
self,
) -> Callable[[cursor.CommitCursorRequest], Awaitable[cursor.CommitCursorResponse]]:
r"""Return a callable for the commit cursor method over gRPC.
Updates the committed cursor.
Returns:
Callable[[~.CommitCursorRequest],
Awaitable[~.CommitCursorResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "commit_cursor" not in self._stubs:
self._stubs["commit_cursor"] = self.grpc_channel.unary_unary(
"/google.cloud.pubsublite.v1.CursorService/CommitCursor",
request_serializer=cursor.CommitCursorRequest.serialize,
response_deserializer=cursor.CommitCursorResponse.deserialize,
)
return self._stubs["commit_cursor"]
@property
def list_partition_cursors(
self,
) -> Callable[
[cursor.ListPartitionCursorsRequest],
Awaitable[cursor.ListPartitionCursorsResponse],
]:
r"""Return a callable for the list partition cursors method over gRPC.
Returns all committed cursor information for a
subscription.
Returns:
Callable[[~.ListPartitionCursorsRequest],
Awaitable[~.ListPartitionCursorsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_partition_cursors" not in self._stubs:
self._stubs["list_partition_cursors"] = self.grpc_channel.unary_unary(
"/google.cloud.pubsublite.v1.CursorService/ListPartitionCursors",
request_serializer=cursor.ListPartitionCursorsRequest.serialize,
response_deserializer=cursor.ListPartitionCursorsResponse.deserialize,
)
return self._stubs["list_partition_cursors"]
def close(self):
return self.grpc_channel.close()
__all__ = ("CursorServiceGrpcAsyncIOTransport",)
| 44.190769 | 88 | 0.64239 |
7940a75991b3edb04750627106718ce47a98b548 | 67 | py | Python | nvvm/typedefs.py | uchytilc/PyCu | 9ba25281611bf4dbd70d37f4eba0574f817d6928 | [
"MIT"
] | null | null | null | nvvm/typedefs.py | uchytilc/PyCu | 9ba25281611bf4dbd70d37f4eba0574f817d6928 | [
"MIT"
] | null | null | null | nvvm/typedefs.py | uchytilc/PyCu | 9ba25281611bf4dbd70d37f4eba0574f817d6928 | [
"MIT"
] | null | null | null | from ctypes import c_void_p
nvvmProgram = c_void_p #opaque handle
| 16.75 | 37 | 0.820896 |
7940a7aef274305e2bdd828262502a1de769a9b0 | 3,455 | py | Python | projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 3 | 2021-06-24T01:54:25.000Z | 2021-12-12T16:21:24.000Z | projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 79 | 2021-06-23T10:40:10.000Z | 2021-12-16T07:59:42.000Z | projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 5 | 2021-07-04T07:38:50.000Z | 2021-12-12T16:18:47.000Z | #!/usr/bin/env python
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""universal_robot_ros controller."""
import argparse
import rospy
from controller import Robot
from joint_state_publisher import JointStatePublisher
from gripper_command import GripperCommander
from trajectory_follower import TrajectoryFollower
from camera_publisher import CameraPublisher
from rosgraph_msgs.msg import Clock
class ROSController():
def __init__(self, robot=None):
if not robot:
self.robot = Robot()
else:
self.robot = robot
jointPrefix = rospy.get_param('prefix', '')
if jointPrefix:
print('Setting prefix to %s' % jointPrefix)
self.jointStatePublisher = JointStatePublisher(self.robot, jointPrefix, '/')
self.gripperCommander = GripperCommander(self.robot, self.jointStatePublisher, jointPrefix, 'panda')
self.trajectoryFollower = TrajectoryFollower(self.robot, self.jointStatePublisher, jointPrefix, '/')
self.cameraPublisher = CameraPublisher(self.robot, jointPrefix, '/')
self.gripperCommander.start()
self.trajectoryFollower.start()
init_pos = {
'panda_joint1': 0.000,
'panda_joint2': -0.785,
'panda_joint3': 0.0,
'panda_joint4': -1.570,
'panda_joint5': 0.0,
'panda_joint6': 1.047,
'panda_joint7': 0.0
}
for jt in init_pos:
self.robot.getDevice(jt).setPosition(init_pos[jt])
print("Robot sent to init pose")
# we want to use simulation time for ROS
self.clockPublisher = rospy.Publisher('clock', Clock, queue_size=1)
if not rospy.get_param('use_sim_time', False):
rospy.logwarn('use_sim_time is not set!')
print("Clock publisher created")
def run(self):
timestep = int(self.robot.getBasicTimeStep())
print("Entered thread")
while self.robot.step(timestep) != -1 and not rospy.is_shutdown():
self.jointStatePublisher.publish()
self.cameraPublisher.publish()
self.gripperCommander.update()
self.trajectoryFollower.update()
# pulish simulation clock
msg = Clock()
time = self.robot.getTime()
msg.clock.secs = int(time)
# round prevents precision issues that can cause problems with ROS timers
msg.clock.nsecs = int(round(1000 * (time - msg.clock.secs)) * 1.0e+6)
self.clockPublisher.publish(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--node-name', dest='nodeName', default='panda', help='Specifies the name of the node.')
arguments, unknown = parser.parse_known_args()
rospy.init_node(arguments.nodeName, disable_signals=True)
controller = ROSController()
controller.run()
rospy.spin()
| 35.989583 | 112 | 0.665991 |
7940a8d50b22e8fe5c61d1e2647a3ed618ee660d | 262 | py | Python | orders/urls.py | garcia116/ecommerce-django | 6c416b20446f0daba5774040715313ae28501cf9 | [
"PostgreSQL"
] | null | null | null | orders/urls.py | garcia116/ecommerce-django | 6c416b20446f0daba5774040715313ae28501cf9 | [
"PostgreSQL"
] | null | null | null | orders/urls.py | garcia116/ecommerce-django | 6c416b20446f0daba5774040715313ae28501cf9 | [
"PostgreSQL"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('place_order/', views.place_order, name='place_order'),
path('payments/', views.payments, name='payments'),
path('order_complete/', views.order_complete, name='order_complete')
]
| 29.111111 | 72 | 0.717557 |
7940a8d96137e806db1ba8d535858d3f1c0cef53 | 3,875 | py | Python | mercury_ml/common/providers/artifact_storage/local.py | gabrieloexle/mercury-ml | cc663f84a26ee66ae105bbfc0cd1cbd5629031cd | [
"MIT"
] | null | null | null | mercury_ml/common/providers/artifact_storage/local.py | gabrieloexle/mercury-ml | cc663f84a26ee66ae105bbfc0cd1cbd5629031cd | [
"MIT"
] | null | null | null | mercury_ml/common/providers/artifact_storage/local.py | gabrieloexle/mercury-ml | cc663f84a26ee66ae105bbfc0cd1cbd5629031cd | [
"MIT"
] | null | null | null | import os
import json
def store_h2o_frame(data, directory, filename, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param data: the Frame to save to disk.
:param directory: the directory to the save point on disk.
:param filename: the name to save the frame to.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
:return string filepath: the path to which the file was stored.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
filepath = _make_local_path(os.path.join(directory, filename))
from h2o.job import H2OJob
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
from h2o import api
assert_is_type(data, H2OFrame)
assert_is_type(filepath, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (data.frame_id), data={"path": filepath, "num_parts": parts, "force": force}),
"Export File").poll()
return filepath
def store_pandas_pickle(data, directory, filename, compression=None):
"""
Saves data to Disk as Pickle file.
:param DataFrame data: The data to be saved
:param string dir: dir where the data file should be saved
:param string filename: Name that the data file should be saved with
:return string filepath: the path to which the file was stored.
"""
if not compression:
compression_postfix = ""
else:
compression_postfix = "." + compression
if not os.path.exists(directory):
os.makedirs(directory)
filepath = os.path.join(directory, filename + ".pkl")
data.to_pickle(path=os.path.join(directory, filename + ".pkl" + compression_postfix),
compression=compression)
return filepath
def store_pandas_json(data, directory, filename, orient="table", compression=None):
"""
Saves data to Disk as JSON file.
:param DataFrame data: The data to be saved
:param string dir: dir where the data file should be saved
:param string filename: Name that the data file should be saved with
:return string filepath: the path to which the file was stored.
"""
if compression is None:
compression_postfix = ""
else:
compression_postfix = "." + compression
if not os.path.exists(directory):
os.makedirs(directory)
filepath = os.path.join(directory, filename + ".json")
data.to_json(path_or_buf=filepath,
orient=orient)
return filepath
def store_dict_json(data, directory, filename):
"""
Saves data to Disk as JSON file.
:param dict data: The data to be saved
:param string dir: dir where the data file should be saved
:param string filename: Name that the data file should be saved with
:return string filepath: the path to which the file was stored.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
filepath = os.path.join(directory, filename + ".json")
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
return filepath
def _make_local_path(path_name):
if path_name[0] == ".":
path_name = os.path.join(os.getcwd(), path_name)
path_name = os.path.abspath(path_name)
return path_name
| 33.119658 | 122 | 0.682065 |
7940a924acab9a8a069c85212db65869632c064f | 770 | py | Python | foxtrot/fconstyle.py | dsnezhkov/foxtrot | 7b9adc68d111ffbe21ed5fc91bd7042721d741ff | [
"MIT"
] | 46 | 2018-02-27T20:22:11.000Z | 2021-03-06T04:01:29.000Z | foxtrot/fconstyle.py | claudijd/foxtrot | 94a68d3a54a6d13642d401b160c38a0036c7c839 | [
"MIT"
] | 1 | 2021-06-01T21:47:52.000Z | 2021-06-01T21:47:52.000Z | foxtrot/fconstyle.py | dsnezhkov/foxtrot | 7b9adc68d111ffbe21ed5fc91bd7042721d741ff | [
"MIT"
] | 4 | 2018-05-29T07:44:43.000Z | 2020-02-22T17:54:12.000Z | from pygments.style import Style
from pygments.token import Token
from pygments.styles.default import DefaultStyle
class AgentStyle(Style):
styles = {
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
# User input.
Token: '#ffffcc',
Token.Toolbar: '#ffffff bg:#000000',
# Prompt.
Token.Username: '#884444',
Token.At: '#00aa00',
Token.Marker: '#00aa00',
Token.Host: '#008888',
Token.DTime: '#884444 underline',
}
styles.update(DefaultStyle.styles)
| 27.5 | 72 | 0.612987 |
7940aa1f6db9066e2f4a4d9acced1e39879ec6bf | 19,321 | py | Python | BERT/tokenization.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
] | 8 | 2021-06-18T10:32:27.000Z | 2022-01-16T06:46:25.000Z | adversarial-transfer-nlp/BERT/tokenization.py | AI-secure/Does-Adversairal-Transferability-Indicate-Knowledge-Transferability | a2fb10f56618c6d6dd1638967d59c4a83ffa1c05 | [
"CC0-1.0"
] | 2 | 2021-08-25T15:14:12.000Z | 2022-02-09T23:55:46.000Z | adversarial-transfer-nlp/BERT/tokenization.py | AI-secure/Does-Adversairal-Transferability-Indicate-Knowledge-Transferability | a2fb10f56618c6d6dd1638967d59c4a83ffa1c05 | [
"CC0-1.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
import six
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download vocabulary.".format(
vocab_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?") | 43.223714 | 179 | 0.610579 |
7940abbb7fe5a31650988a120367193e7b568baa | 87 | py | Python | HackerRank/Python/Built_Ins/Input().py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | 1 | 2021-07-13T01:49:25.000Z | 2021-07-13T01:49:25.000Z | HackerRank/Python/Built_Ins/Input().py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | null | null | null | HackerRank/Python/Built_Ins/Input().py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | null | null | null | x, k = map(int, input().split())
s = input()
print( eval(s) == k)
© 2021 GitHub, Inc.
| 14.5 | 32 | 0.551724 |
7940abf8418fe66ef7f37e42d8af98581cdee924 | 6,967 | py | Python | core/dbt/graph/selector_spec.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 3,156 | 2017-03-05T09:59:23.000Z | 2021-06-30T01:27:52.000Z | core/dbt/graph/selector_spec.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 2,608 | 2017-02-27T15:39:40.000Z | 2021-06-30T01:49:20.000Z | core/dbt/graph/selector_spec.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 693 | 2017-03-13T03:04:49.000Z | 2021-06-25T15:57:41.000Z | import os
import re
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from dbt.dataclass_schema import StrEnum
from typing import (
Set, Iterator, List, Optional, Dict, Union, Any, Iterable, Tuple
)
from .graph import UniqueId
from .selector_methods import MethodName
from dbt.exceptions import RuntimeException, InvalidSelectorException
RAW_SELECTOR_PATTERN = re.compile(
r'\A'
r'(?P<childrens_parents>(\@))?'
r'(?P<parents>((?P<parents_depth>(\d*))\+))?'
r'((?P<method>([\w.]+)):)?(?P<value>(.*?))'
r'(?P<children>(\+(?P<children_depth>(\d*))))?'
r'\Z'
)
SELECTOR_METHOD_SEPARATOR = '.'
class IndirectSelection(StrEnum):
Eager = 'eager'
Cautious = 'cautious'
def _probably_path(value: str):
"""Decide if value is probably a path. Windows has two path separators, so
we should check both sep ('\\') and altsep ('/') there.
"""
if os.path.sep in value:
return True
elif os.path.altsep is not None and os.path.altsep in value:
return True
else:
return False
def _match_to_int(match: Dict[str, str], key: str) -> Optional[int]:
raw = match.get(key)
# turn the empty string into None, too.
if not raw:
return None
try:
return int(raw)
except ValueError as exc:
raise RuntimeException(
f'Invalid node spec - could not handle parent depth {raw}'
) from exc
SelectionSpec = Union[
'SelectionCriteria',
'SelectionIntersection',
'SelectionDifference',
'SelectionUnion',
]
@dataclass
class SelectionCriteria:
raw: Any
method: MethodName
method_arguments: List[str]
value: Any
childrens_parents: bool
parents: bool
parents_depth: Optional[int]
children: bool
children_depth: Optional[int]
indirect_selection: IndirectSelection = IndirectSelection.Eager
def __post_init__(self):
if self.children and self.childrens_parents:
raise RuntimeException(
f'Invalid node spec {self.raw} - "@" prefix and "+" suffix '
'are incompatible'
)
@classmethod
def default_method(cls, value: str) -> MethodName:
if _probably_path(value):
return MethodName.Path
else:
return MethodName.FQN
@classmethod
def parse_method(
cls, groupdict: Dict[str, Any]
) -> Tuple[MethodName, List[str]]:
raw_method = groupdict.get('method')
if raw_method is None:
return cls.default_method(groupdict['value']), []
method_parts: List[str] = raw_method.split(SELECTOR_METHOD_SEPARATOR)
try:
method_name = MethodName(method_parts[0])
except ValueError as exc:
raise InvalidSelectorException(
f"'{method_parts[0]}' is not a valid method name"
) from exc
method_arguments: List[str] = method_parts[1:]
return method_name, method_arguments
@classmethod
def selection_criteria_from_dict(
cls, raw: Any, dct: Dict[str, Any],
indirect_selection: IndirectSelection = IndirectSelection.Eager
) -> 'SelectionCriteria':
if 'value' not in dct:
raise RuntimeException(
f'Invalid node spec "{raw}" - no search value!'
)
method_name, method_arguments = cls.parse_method(dct)
parents_depth = _match_to_int(dct, 'parents_depth')
children_depth = _match_to_int(dct, 'children_depth')
# If defined field in selector, override CLI flag
indirect_selection = IndirectSelection(
dct.get('indirect_selection', None) or indirect_selection
)
return cls(
raw=raw,
method=method_name,
method_arguments=method_arguments,
value=dct['value'],
childrens_parents=bool(dct.get('childrens_parents')),
parents=bool(dct.get('parents')),
parents_depth=parents_depth,
children=bool(dct.get('children')),
children_depth=children_depth,
indirect_selection=indirect_selection
)
@classmethod
def dict_from_single_spec(cls, raw: str):
result = RAW_SELECTOR_PATTERN.match(raw)
if result is None:
return {'error': 'Invalid selector spec'}
dct: Dict[str, Any] = result.groupdict()
method_name, method_arguments = cls.parse_method(dct)
meth_name = str(method_name)
if method_arguments:
meth_name += '.' + '.'.join(method_arguments)
dct['method'] = meth_name
dct = {k: v for k, v in dct.items() if (v is not None and v != '')}
if 'childrens_parents' in dct:
dct['childrens_parents'] = bool(dct.get('childrens_parents'))
if 'parents' in dct:
dct['parents'] = bool(dct.get('parents'))
if 'children' in dct:
dct['children'] = bool(dct.get('children'))
return dct
@classmethod
def from_single_spec(
cls, raw: str,
indirect_selection: IndirectSelection = IndirectSelection.Eager
) -> 'SelectionCriteria':
result = RAW_SELECTOR_PATTERN.match(raw)
if result is None:
# bad spec!
raise RuntimeException(f'Invalid selector spec "{raw}"')
return cls.selection_criteria_from_dict(
raw,
result.groupdict(),
indirect_selection=indirect_selection
)
class BaseSelectionGroup(Iterable[SelectionSpec], metaclass=ABCMeta):
def __init__(
self,
components: Iterable[SelectionSpec],
expect_exists: bool = False,
raw: Any = None,
):
self.components: List[SelectionSpec] = list(components)
self.expect_exists = expect_exists
self.raw = raw
def __iter__(self) -> Iterator[SelectionSpec]:
for component in self.components:
yield component
@abstractmethod
def combine_selections(
self,
selections: List[Set[UniqueId]],
) -> Set[UniqueId]:
raise NotImplementedError(
'_combine_selections not implemented!'
)
def combined(self, selections: List[Set[UniqueId]]) -> Set[UniqueId]:
if not selections:
return set()
return self.combine_selections(selections)
class SelectionIntersection(BaseSelectionGroup):
def combine_selections(
self,
selections: List[Set[UniqueId]],
) -> Set[UniqueId]:
return set.intersection(*selections)
class SelectionDifference(BaseSelectionGroup):
def combine_selections(
self,
selections: List[Set[UniqueId]],
) -> Set[UniqueId]:
return set.difference(*selections)
class SelectionUnion(BaseSelectionGroup):
def combine_selections(
self,
selections: List[Set[UniqueId]],
) -> Set[UniqueId]:
return set.union(*selections)
| 29.901288 | 78 | 0.624803 |
7940ace3e6257b46f5b10eb8cca349068f46729d | 1,340 | py | Python | REF/login/SoftDev/2.py | Apizz789/Project_SOFTDEV | b2e389289e201b284cb1b1b545be25279b677fe0 | [
"MIT"
] | 2 | 2021-11-17T04:27:26.000Z | 2022-01-20T06:17:17.000Z | REF/login/SoftDev/2.py | Apizz789/Project_SOFTDEV | b2e389289e201b284cb1b1b545be25279b677fe0 | [
"MIT"
] | null | null | null | REF/login/SoftDev/2.py | Apizz789/Project_SOFTDEV | b2e389289e201b284cb1b1b545be25279b677fe0 | [
"MIT"
] | null | null | null | alp = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
inp = int(input("Enter Number : "))
i = 0
j=0
copy_inp = inp
while inp > 0 :
inp -= i+1
i += 1
inp = copy_inp
while inp > 0 :
j += 1
for k in range(j+1) :
inp -= 1
if inp <= 0 :
break
cur_alp = 0
number_of_print = j+j-2
middle = j-1
cur_value = 0
for row in range(i) :
cur_value = row
for column in range(number_of_print) :
if cur_alp > 25 :
cur_alp = 0
if row % 2 != 0 :
if column >= middle - row and column <= middle and copy_inp > 0:
if cur_alp + cur_value < 26 :
print(alp[cur_alp + cur_value] , end='')
else :
print(alp[cur_alp + cur_value -26] , end='')
cur_value -= 2
cur_alp += 1
copy_inp -= 1
else :
print(".",end='')
elif row % 2 == 0 :
if column <= middle + row and column >= middle and copy_inp > 0 :
print(alp[cur_alp],end = '')
cur_alp += 1
copy_inp -= 1
else :
print("." , end='')
print('')
| 25.283019 | 112 | 0.393284 |
7940ad2a069a396e744feb0c59cbd7dfdf5d76c5 | 66,696 | py | Python | nova/tests/fixtures/libvirt_data.py | oshiro3/nova | a6983621d40261adca9e4ab9feb14910881e9f92 | [
"Apache-2.0"
] | null | null | null | nova/tests/fixtures/libvirt_data.py | oshiro3/nova | a6983621d40261adca9e4ab9feb14910881e9f92 | [
"Apache-2.0"
] | null | null | null | nova/tests/fixtures/libvirt_data.py | oshiro3/nova | a6983621d40261adca9e4ab9feb14910881e9f92 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from lxml import etree
from oslo_utils import units
from nova.objects.fields import Architecture
from nova.virt.libvirt import config
def fake_kvm_guest():
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
obj.memory = 100 * units.Mi
obj.vcpus = 2
obj.cpuset = set([0, 1, 3, 4, 5])
obj.cputune = config.LibvirtConfigGuestCPUTune()
obj.cputune.shares = 100
obj.cputune.quota = 50000
obj.cputune.period = 25000
obj.membacking = config.LibvirtConfigGuestMemoryBacking()
page1 = config.LibvirtConfigGuestMemoryBackingPage()
page1.size_kb = 2048
page1.nodeset = [0, 1, 2, 3, 5]
page2 = config.LibvirtConfigGuestMemoryBackingPage()
page2.size_kb = 1048576
page2.nodeset = [4]
obj.membacking.hugepages.append(page1)
obj.membacking.hugepages.append(page2)
obj.memtune = config.LibvirtConfigGuestMemoryTune()
obj.memtune.hard_limit = 496
obj.memtune.soft_limit = 672
obj.memtune.swap_hard_limit = 1638
obj.memtune.min_guarantee = 2970
obj.numatune = config.LibvirtConfigGuestNUMATune()
numamemory = config.LibvirtConfigGuestNUMATuneMemory()
numamemory.mode = "preferred"
numamemory.nodeset = [0, 1, 2, 3, 8]
obj.numatune.memory = numamemory
numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
numamemnode0.cellid = 0
numamemnode0.mode = "preferred"
numamemnode0.nodeset = [0, 1]
numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
numamemnode1.cellid = 1
numamemnode1.mode = "preferred"
numamemnode1.nodeset = [2, 3]
numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
numamemnode2.cellid = 2
numamemnode2.mode = "preferred"
numamemnode2.nodeset = [8]
obj.numatune.memnodes.extend([numamemnode0,
numamemnode1,
numamemnode2])
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_boot_dev = ["hd", "cdrom", "fd"]
obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
obj.features = [
config.LibvirtConfigGuestFeatureACPI(),
config.LibvirtConfigGuestFeatureAPIC(),
config.LibvirtConfigGuestFeatureKvmHidden()
]
obj.sysinfo = config.LibvirtConfigGuestSysinfo()
obj.sysinfo.bios_vendor = "Acme"
obj.sysinfo.system_version = "1.0.0"
# obj.devices[0]
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/disk-img"
disk.target_dev = "vda"
disk.target_bus = "virtio"
obj.add_device(disk)
# obj.devices[1]
disk = config.LibvirtConfigGuestDisk()
disk.source_device = "cdrom"
disk.source_type = "file"
disk.source_path = "/tmp/cdrom-img"
disk.target_dev = "sda"
disk.target_bus = "sata"
obj.add_device(disk)
# obj.devices[2]
intf = config.LibvirtConfigGuestInterface()
intf.net_type = "network"
intf.mac_addr = "52:54:00:f6:35:8f"
intf.model = "virtio"
intf.source_dev = "virbr0"
obj.add_device(intf)
# obj.devices[3]
balloon = config.LibvirtConfigMemoryBalloon()
balloon.model = 'virtio'
balloon.period = 11
obj.add_device(balloon)
# obj.devices[4]
mouse = config.LibvirtConfigGuestInput()
mouse.type = "mouse"
mouse.bus = "virtio"
obj.add_device(mouse)
# obj.devices[5]
gfx = config.LibvirtConfigGuestGraphics()
gfx.type = "vnc"
gfx.autoport = True
gfx.keymap = "en_US"
gfx.listen = "127.0.0.1"
obj.add_device(gfx)
# obj.devices[6]
video = config.LibvirtConfigGuestVideo()
video.type = 'virtio'
obj.add_device(video)
# obj.devices[7]
serial = config.LibvirtConfigGuestSerial()
serial.type = "file"
serial.source_path = "/tmp/vm.log"
obj.add_device(serial)
# obj.devices[8]
rng = config.LibvirtConfigGuestRng()
rng.backend = '/dev/urandom'
rng.rate_period = '12'
rng.rate_bytes = '34'
obj.add_device(rng)
# obj.devices[9]
controller = config.LibvirtConfigGuestController()
controller.type = 'scsi'
controller.model = 'virtio-scsi' # usually set from image meta
controller.index = 0
obj.add_device(controller)
return obj
FAKE_KVM_GUEST = """
<domain type="kvm">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<memoryBacking>
<hugepages>
<page size="2048" unit="KiB" nodeset="0-3,5"/>
<page size="1048576" unit="KiB" nodeset="4"/>
</hugepages>
</memoryBacking>
<memtune>
<hard_limit unit="KiB">496</hard_limit>
<soft_limit unit="KiB">672</soft_limit>
<swap_hard_limit unit="KiB">1638</swap_hard_limit>
<min_guarantee unit="KiB">2970</min_guarantee>
</memtune>
<numatune>
<memory mode="preferred" nodeset="0-3,8"/>
<memnode cellid="0" mode="preferred" nodeset="0-1"/>
<memnode cellid="1" mode="preferred" nodeset="2-3"/>
<memnode cellid="2" mode="preferred" nodeset="8"/>
</numatune>
<vcpu cpuset="0-1,3-5">2</vcpu>
<sysinfo type='smbios'>
<bios>
<entry name="vendor">Acme</entry>
</bios>
<system>
<entry name="version">1.0.0</entry>
</system>
</sysinfo>
<os>
<type>linux</type>
<boot dev="hd"/>
<boot dev="cdrom"/>
<boot dev="fd"/>
<smbios mode="sysinfo"/>
</os>
<features>
<acpi/>
<apic/>
<kvm>
<hidden state='on'/>
</kvm>
</features>
<cputune>
<shares>100</shares>
<quota>50000</quota>
<period>25000</period>
</cputune>
<devices>
<disk type="file" device="disk">
<source file="/tmp/disk-img"/>
<target bus="virtio" dev="vda"/>
</disk>
<disk type="file" device="cdrom">
<source file="/tmp/cdrom-img"/>
<target bus="sata" dev="sda"/>
</disk>
<interface type='network'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='virbr0'/>
</interface>
<memballoon model='virtio'>
<stats period='11'/>
</memballoon>
<input type="mouse" bus="virtio"/>
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
<video>
<model type='virtio'/>
</video>
<serial type="file">
<source path="/tmp/vm.log"/>
</serial>
<rng model='virtio'>
<rate period='12' bytes='34'/>
<backend model='random'>/dev/urandom</backend>
</rng>
<controller type='scsi' index='0' model='virtio-scsi'>
</controller>
</devices>
<launchSecurity type="sev">
<policy>0x0033</policy>
<cbitpos>47</cbitpos>
<reducedPhysBits>1</reducedPhysBits>
</launchSecurity>
</domain>"""
CAPABILITIES_HOST_X86_64_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<pages unit='KiB' size='4'/>
<pages unit='KiB' size='2048'/>
<pages unit='KiB' size='1048576'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
%(topology)s
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_I686_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>i686</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
CAPABILITIES_HOST_AARCH64_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>aarch64</arch>
<model>host</model>
<topology sockets='1' cores='48' threads='1'/>
<pages unit='KiB' size='4'/>
<pages unit='KiB' size='2048'/>
</cpu>
<power_management/>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
<uri_transport>rdma</uri_transport>
</uri_transports>
</migration_features>
%(topology)s
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
<secmodel>
<model>dac</model>
<doi>0</doi>
<baselabel type='kvm'>+0:+0</baselabel>
<baselabel type='qemu'>+0:+0</baselabel>
</secmodel>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_ARMV7_TEMPLATE = """
<host>
<cpu>
<arch>armv7l</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_PPC_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>ppc</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_PPC64_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>ppc64</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_PPC64LE_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>ppc64le</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
# NOTE(stephenfin): This is incomplete
CAPABILITIES_HOST_S390X_TEMPLATE = """
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>s390x</arch>
</cpu>
<power_management/>
<iommu support='no'/>
</host>"""
CAPABILITIES_HOST_TEMPLATES = {
Architecture.X86_64: CAPABILITIES_HOST_X86_64_TEMPLATE,
Architecture.I686: CAPABILITIES_HOST_I686_TEMPLATE,
Architecture.AARCH64: CAPABILITIES_HOST_AARCH64_TEMPLATE,
Architecture.ARMV7: CAPABILITIES_HOST_ARMV7_TEMPLATE,
Architecture.PPC: CAPABILITIES_HOST_PPC_TEMPLATE,
Architecture.PPC64: CAPABILITIES_HOST_PPC64_TEMPLATE,
Architecture.PPC64LE: CAPABILITIES_HOST_PPC64LE_TEMPLATE,
Architecture.S390X: CAPABILITIES_HOST_S390X_TEMPLATE,
}
# NOTE(aspiers): HostTestCase has tests which assert that for any
# given (arch, domain) listed in the guest capabilities here, all
# canonical machine types (e.g. 'pc' or 'q35') must be a substring of
# the expanded machine type returned in the <machine> element of the
# corresponding fake getDomainCapabilities response for that (arch,
# domain, canonical_machine_type) combination. Those responses are
# defined by the DOMCAPABILITIES_* variables below. While
# DOMCAPABILITIES_X86_64_TEMPLATE can return multiple values for the
# <machine> element, DOMCAPABILITIES_I686 is fixed to fake a response
# of the 'pc-i440fx-2.11' machine type, therefore
# CAPABILITIES_GUEST['i686'] should return 'pc' as the only canonical
# machine type.
#
# CAPABILITIES_GUEST does not include canonical machine types for
# other non-x86 architectures, so these test assertions on apply to
# x86.
CAPABILITIES_GUEST = {
'i686': '''
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-i386</emulator>
<machine maxCpus='255'>pc-i440fx-2.11</machine>
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine maxCpus='1'>isapc</machine>
<machine maxCpus='255'>pc-1.1</machine>
<machine maxCpus='255'>pc-1.2</machine>
<machine maxCpus='255'>pc-1.3</machine>
<machine maxCpus='255'>pc-i440fx-2.8</machine>
<machine maxCpus='255'>pc-1.0</machine>
<machine maxCpus='255'>pc-i440fx-2.9</machine>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine maxCpus='255'>pc-i440fx-2.7</machine>
<machine maxCpus='128'>xenfv</machine>
<machine maxCpus='255'>pc-i440fx-2.3</machine>
<machine maxCpus='255'>pc-i440fx-2.4</machine>
<machine maxCpus='255'>pc-i440fx-2.5</machine>
<machine maxCpus='255'>pc-i440fx-2.1</machine>
<machine maxCpus='255'>pc-i440fx-2.2</machine>
<machine maxCpus='255'>pc-i440fx-2.0</machine>
<machine maxCpus='288'>pc-q35-2.11</machine>
<machine maxCpus='288'>q35</machine>
<machine maxCpus='1'>xenpv</machine>
<machine maxCpus='288'>pc-q35-2.10</machine>
<machine maxCpus='255'>pc-i440fx-1.7</machine>
<machine maxCpus='288'>pc-q35-2.9</machine>
<machine maxCpus='255'>pc-0.15</machine>
<machine maxCpus='255'>pc-i440fx-1.5</machine>
<machine maxCpus='255'>pc-q35-2.7</machine>
<machine maxCpus='255'>pc-i440fx-1.6</machine>
<machine maxCpus='288'>pc-q35-2.8</machine>
<machine maxCpus='255'>pc-0.13</machine>
<machine maxCpus='255'>pc-0.14</machine>
<machine maxCpus='255'>pc-q35-2.4</machine>
<machine maxCpus='255'>pc-q35-2.5</machine>
<machine maxCpus='255'>pc-q35-2.6</machine>
<machine maxCpus='255'>pc-i440fx-1.4</machine>
<machine maxCpus='255'>pc-i440fx-2.10</machine>
<machine maxCpus='255'>pc-0.11</machine>
<machine maxCpus='255'>pc-0.12</machine>
<machine maxCpus='255'>pc-0.10</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.11</machine>
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine maxCpus='1'>isapc</machine>
<machine maxCpus='255'>pc-1.1</machine>
<machine maxCpus='255'>pc-1.2</machine>
<machine maxCpus='255'>pc-1.3</machine>
<machine maxCpus='255'>pc-i440fx-2.8</machine>
<machine maxCpus='255'>pc-1.0</machine>
<machine maxCpus='255'>pc-i440fx-2.9</machine>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine maxCpus='255'>pc-i440fx-2.7</machine>
<machine maxCpus='128'>xenfv</machine>
<machine maxCpus='255'>pc-i440fx-2.3</machine>
<machine maxCpus='255'>pc-i440fx-2.4</machine>
<machine maxCpus='255'>pc-i440fx-2.5</machine>
<machine maxCpus='255'>pc-i440fx-2.1</machine>
<machine maxCpus='255'>pc-i440fx-2.2</machine>
<machine maxCpus='255'>pc-i440fx-2.0</machine>
<machine maxCpus='288'>pc-q35-2.11</machine>
<machine maxCpus='288'>q35</machine>
<machine maxCpus='1'>xenpv</machine>
<machine maxCpus='288'>pc-q35-2.10</machine>
<machine maxCpus='255'>pc-i440fx-1.7</machine>
<machine maxCpus='288'>pc-q35-2.9</machine>
<machine maxCpus='255'>pc-0.15</machine>
<machine maxCpus='255'>pc-i440fx-1.5</machine>
<machine maxCpus='255'>pc-q35-2.7</machine>
<machine maxCpus='255'>pc-i440fx-1.6</machine>
<machine maxCpus='288'>pc-q35-2.8</machine>
<machine maxCpus='255'>pc-0.13</machine>
<machine maxCpus='255'>pc-0.14</machine>
<machine maxCpus='255'>pc-q35-2.4</machine>
<machine maxCpus='255'>pc-q35-2.5</machine>
<machine maxCpus='255'>pc-q35-2.6</machine>
<machine maxCpus='255'>pc-i440fx-1.4</machine>
<machine maxCpus='255'>pc-i440fx-2.10</machine>
<machine maxCpus='255'>pc-0.11</machine>
<machine maxCpus='255'>pc-0.12</machine>
<machine maxCpus='255'>pc-0.10</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
<pae/>
<nonpae/>
</features>
</guest>''',
'x86_64': '''
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine maxCpus='255'>pc-i440fx-2.11</machine>
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine maxCpus='1'>isapc</machine>
<machine maxCpus='255'>pc-1.1</machine>
<machine maxCpus='255'>pc-1.2</machine>
<machine maxCpus='255'>pc-1.3</machine>
<machine maxCpus='255'>pc-i440fx-2.8</machine>
<machine maxCpus='255'>pc-1.0</machine>
<machine maxCpus='255'>pc-i440fx-2.9</machine>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine maxCpus='255'>pc-i440fx-2.7</machine>
<machine maxCpus='128'>xenfv</machine>
<machine maxCpus='255'>pc-i440fx-2.3</machine>
<machine maxCpus='255'>pc-i440fx-2.4</machine>
<machine maxCpus='255'>pc-i440fx-2.5</machine>
<machine maxCpus='255'>pc-i440fx-2.1</machine>
<machine maxCpus='255'>pc-i440fx-2.2</machine>
<machine maxCpus='255'>pc-i440fx-2.0</machine>
<machine maxCpus='288'>pc-q35-2.11</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
<machine maxCpus='1'>xenpv</machine>
<machine maxCpus='288'>pc-q35-2.10</machine>
<machine maxCpus='255'>pc-i440fx-1.7</machine>
<machine maxCpus='288'>pc-q35-2.9</machine>
<machine maxCpus='255'>pc-0.15</machine>
<machine maxCpus='255'>pc-i440fx-1.5</machine>
<machine maxCpus='255'>pc-q35-2.7</machine>
<machine maxCpus='255'>pc-i440fx-1.6</machine>
<machine maxCpus='288'>pc-q35-2.8</machine>
<machine maxCpus='255'>pc-0.13</machine>
<machine maxCpus='255'>pc-0.14</machine>
<machine maxCpus='255'>pc-q35-2.4</machine>
<machine maxCpus='255'>pc-q35-2.5</machine>
<machine maxCpus='255'>pc-q35-2.6</machine>
<machine maxCpus='255'>pc-i440fx-1.4</machine>
<machine maxCpus='255'>pc-i440fx-2.10</machine>
<machine maxCpus='255'>pc-0.11</machine>
<machine maxCpus='255'>pc-0.12</machine>
<machine maxCpus='255'>pc-0.10</machine>
<domain type='qemu'/>
<domain type='kvm'>
<emulator>/usr/bin/qemu-kvm</emulator>
<machine maxCpus='255'>pc-i440fx-2.11</machine>
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine maxCpus='1'>isapc</machine>
<machine maxCpus='255'>pc-1.1</machine>
<machine maxCpus='255'>pc-1.2</machine>
<machine maxCpus='255'>pc-1.3</machine>
<machine maxCpus='255'>pc-i440fx-2.8</machine>
<machine maxCpus='255'>pc-1.0</machine>
<machine maxCpus='255'>pc-i440fx-2.9</machine>
<machine maxCpus='255'>pc-i440fx-2.6</machine>
<machine maxCpus='255'>pc-i440fx-2.7</machine>
<machine maxCpus='128'>xenfv</machine>
<machine maxCpus='255'>pc-i440fx-2.3</machine>
<machine maxCpus='255'>pc-i440fx-2.4</machine>
<machine maxCpus='255'>pc-i440fx-2.5</machine>
<machine maxCpus='255'>pc-i440fx-2.1</machine>
<machine maxCpus='255'>pc-i440fx-2.2</machine>
<machine maxCpus='255'>pc-i440fx-2.0</machine>
<machine maxCpus='288'>pc-q35-2.11</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
<machine maxCpus='1'>xenpv</machine>
<machine maxCpus='288'>pc-q35-2.10</machine>
<machine maxCpus='255'>pc-i440fx-1.7</machine>
<machine maxCpus='288'>pc-q35-2.9</machine>
<machine maxCpus='255'>pc-0.15</machine>
<machine maxCpus='255'>pc-i440fx-1.5</machine>
<machine maxCpus='255'>pc-q35-2.7</machine>
<machine maxCpus='255'>pc-i440fx-1.6</machine>
<machine maxCpus='288'>pc-q35-2.8</machine>
<machine maxCpus='255'>pc-0.13</machine>
<machine maxCpus='255'>pc-0.14</machine>
<machine maxCpus='255'>pc-q35-2.4</machine>
<machine maxCpus='255'>pc-q35-2.5</machine>
<machine maxCpus='255'>pc-q35-2.6</machine>
<machine maxCpus='255'>pc-i440fx-1.4</machine>
<machine maxCpus='255'>pc-i440fx-2.10</machine>
<machine maxCpus='255'>pc-0.11</machine>
<machine maxCpus='255'>pc-0.12</machine>
<machine maxCpus='255'>pc-0.10</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>''',
'aarch64': '''
<guest>
<os_type>hvm</os_type>
<arch name='aarch64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-aarch64</emulator>
<machine maxCpus='1'>integratorcp</machine>
<machine maxCpus='2'>ast2600-evb</machine>
<machine maxCpus='1'>borzoi</machine>
<machine maxCpus='1'>spitz</machine>
<machine maxCpus='255'>virt-2.7</machine>
<machine maxCpus='2'>nuri</machine>
<machine maxCpus='2'>mcimx7d-sabre</machine>
<machine maxCpus='1'>romulus-bmc</machine>
<machine maxCpus='512'>virt-3.0</machine>
<machine maxCpus='512'>virt-5.0</machine>
<machine maxCpus='255'>virt-2.10</machine>
<machine maxCpus='255'>virt-2.8</machine>
<machine maxCpus='2'>musca-b1</machine>
<machine maxCpus='4'>realview-pbx-a9</machine>
<machine maxCpus='1'>versatileab</machine>
<machine maxCpus='1'>kzm</machine>
<machine maxCpus='2'>musca-a</machine>
<machine maxCpus='512'>virt-3.1</machine>
<machine maxCpus='1'>mcimx6ul-evk</machine>
<machine maxCpus='512'>virt-5.1</machine>
<machine canonical='virt-5.1' maxCpus='512'>virt</machine>
<machine maxCpus='2'>smdkc210</machine>
<machine maxCpus='1'>sx1</machine>
<machine maxCpus='4'>raspi2</machine>
<machine maxCpus='255'>virt-2.11</machine>
<machine maxCpus='1'>imx25-pdk</machine>
<machine maxCpus='255'>virt-2.9</machine>
<machine maxCpus='4'>orangepi-pc</machine>
<machine maxCpus='1'>z2</machine>
<machine maxCpus='1'>xilinx-zynq-a9</machine>
<machine maxCpus='6'>xlnx-zcu102</machine>
<machine maxCpus='4'>raspi3</machine>
<machine maxCpus='1'>tosa</machine>
<machine maxCpus='255'>virt-2.12</machine>
<machine maxCpus='2'>mps2-an521</machine>
<machine maxCpus='4'>sabrelite</machine>
<machine maxCpus='1'>mps2-an511</machine>
<machine maxCpus='1'>canon-a1100</machine>
<machine maxCpus='1'>realview-eb</machine>
<machine maxCpus='1'>emcraft-sf2</machine>
<machine maxCpus='1'>realview-pb-a8</machine>
<machine maxCpus='512'>sbsa-ref</machine>
<machine maxCpus='512'>virt-4.0</machine>
<machine maxCpus='1'>palmetto-bmc</machine>
<machine maxCpus='1'>sx1-v1</machine>
<machine maxCpus='1'>n810</machine>
<machine maxCpus='2'>tacoma-bmc</machine>
<machine maxCpus='1'>n800</machine>
<machine maxCpus='512'>virt-4.1</machine>
<machine maxCpus='1'>versatilepb</machine>
<machine maxCpus='1'>terrier</machine>
<machine maxCpus='1'>mainstone</machine>
<machine maxCpus='4'>realview-eb-mpcore</machine>
<machine maxCpus='512'>virt-4.2</machine>
<machine maxCpus='1'>witherspoon-bmc</machine>
<machine maxCpus='1'>swift-bmc</machine>
<machine maxCpus='4'>vexpress-a9</machine>
<machine maxCpus='4'>midway</machine>
<machine maxCpus='1'>musicpal</machine>
<machine maxCpus='1'>lm3s811evb</machine>
<machine maxCpus='1'>lm3s6965evb</machine>
<machine maxCpus='1'>microbit</machine>
<machine maxCpus='1'>mps2-an505</machine>
<machine maxCpus='1'>mps2-an385</machine>
<machine maxCpus='1'>cubieboard</machine>
<machine maxCpus='1'>verdex</machine>
<machine maxCpus='1'>netduino2</machine>
<machine maxCpus='2'>xlnx-versal-virt</machine>
<machine maxCpus='4'>vexpress-a15</machine>
<machine maxCpus='1'>sonorapass-bmc</machine>
<machine maxCpus='1'>cheetah</machine>
<machine maxCpus='255'>virt-2.6</machine>
<machine maxCpus='1'>ast2500-evb</machine>
<machine maxCpus='4'>highbank</machine>
<machine maxCpus='1'>akita</machine>
<machine maxCpus='1'>connex</machine>
<machine maxCpus='1'>netduinoplus2</machine>
<machine maxCpus='1'>collie</machine>
<domain type='qemu'/>
</arch>
<features>
<acpi default='on' toggle='yes'/>
<cpuselection/>
<deviceboot/>
<disksnapshot default='on' toggle='no'/>
</features>
</guest>''',
'armv7l': '''
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>''',
'mips': '''
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>''',
'mipsel': '''
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>''',
'sparc': '''
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>''',
'ppc': '''
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>'''
}
DOMCAPABILITIES_SPARC = """
<domainCapabilities>
<path>/usr/bin/qemu-system-sparc</path>
<domain>qemu</domain>
<machine>SS-5</machine>
<arch>sparc</arch>
<vcpu max='1'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='no'/>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'/>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='no'/>
</features>
</domainCapabilities>
"""
DOMCAPABILITIES_ARMV7 = """
<domainCapabilities>
<path>/usr/bin/qemu-system-arm</path>
<domain>qemu</domain>
<machine>virt-2.11</machine>
<arch>armv7l</arch>
<vcpu max='255'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='yes'>
<model usable='unknown'>pxa262</model>
<model usable='unknown'>pxa270-a0</model>
<model usable='unknown'>arm1136</model>
<model usable='unknown'>cortex-a15</model>
<model usable='unknown'>pxa260</model>
<model usable='unknown'>arm1136-r2</model>
<model usable='unknown'>pxa261</model>
<model usable='unknown'>pxa255</model>
<model usable='unknown'>arm926</model>
<model usable='unknown'>arm11mpcore</model>
<model usable='unknown'>pxa250</model>
<model usable='unknown'>ti925t</model>
<model usable='unknown'>sa1110</model>
<model usable='unknown'>arm1176</model>
<model usable='unknown'>sa1100</model>
<model usable='unknown'>pxa270-c5</model>
<model usable='unknown'>cortex-a9</model>
<model usable='unknown'>cortex-a8</model>
<model usable='unknown'>pxa270-c0</model>
<model usable='unknown'>cortex-a7</model>
<model usable='unknown'>arm1026</model>
<model usable='unknown'>pxa270-b1</model>
<model usable='unknown'>cortex-m3</model>
<model usable='unknown'>cortex-m4</model>
<model usable='unknown'>pxa270-b0</model>
<model usable='unknown'>arm946</model>
<model usable='unknown'>cortex-r5</model>
<model usable='unknown'>pxa270-a1</model>
<model usable='unknown'>pxa270</model>
</mode>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='yes'>
<enum name='version'>
<value>2</value>
<value>3</value>
</enum>
</gic>
</features>
</domainCapabilities>
"""
DOMCAPABILITIES_AARCH64 = """
<domainCapabilities>
<path>/usr/bin/qemu-system-aarch64</path>
<domain>qemu</domain>
<machine>virt-5.1</machine>
<arch>aarch64</arch>
<vcpu max='512'/>
<iothreads supported='yes'/>
<os supported='yes'>
<enum name='firmware'>
<value>efi</value>
</enum>
<loader supported='yes'>
<value>/usr/share/AAVMF/AAVMF_CODE.fd</value>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>no</value>
<value>yes</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='yes'>
<model usable='unknown'>pxa270-c0</model>
<model usable='unknown'>cortex-a15</model>
<model usable='unknown'>pxa270-b0</model>
<model usable='unknown'>cortex-a57</model>
<model usable='unknown'>cortex-m4</model>
<model usable='unknown'>pxa270-a0</model>
<model usable='unknown'>arm1176</model>
<model usable='unknown'>pxa270-b1</model>
<model usable='unknown'>cortex-a7</model>
<model usable='unknown'>pxa270-a1</model>
<model usable='unknown'>cortex-a8</model>
<model usable='unknown'>cortex-r5</model>
<model usable='unknown'>ti925t</model>
<model usable='unknown'>cortex-r5f</model>
<model usable='unknown'>arm1026</model>
<model usable='unknown'>cortex-a9</model>
<model usable='unknown'>cortex-m7</model>
<model usable='unknown'>pxa270</model>
<model usable='unknown'>pxa260</model>
<model usable='unknown'>pxa250</model>
<model usable='unknown'>pxa270-c5</model>
<model usable='unknown'>pxa261</model>
<model usable='unknown'>pxa262</model>
<model usable='unknown'>sa1110</model>
<model usable='unknown'>sa1100</model>
<model usable='unknown'>max</model>
<model usable='unknown'>cortex-a53</model>
<model usable='unknown'>cortex-m0</model>
<model usable='unknown'>cortex-m33</model>
<model usable='unknown'>cortex-a72</model>
<model usable='unknown'>arm946</model>
<model usable='unknown'>pxa255</model>
<model usable='unknown'>arm11mpcore</model>
<model usable='unknown'>arm926</model>
<model usable='unknown'>arm1136</model>
<model usable='unknown'>arm1136-r2</model>
<model usable='unknown'>cortex-m3</model>
</mode>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
<enum name='model'>
<value>virtio</value>
<value>virtio-transitional</value>
<value>virtio-non-transitional</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>cirrus</value>
<value>vmvga</value>
<value>qxl</value>
<value>virtio</value>
<value>none</value>
<value>bochs</value>
<value>ramfb</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
<rng supported='yes'>
<enum name='model'>
<value>virtio</value>
<value>virtio-transitional</value>
<value>virtio-non-transitional</value>
</enum>
<enum name='backendModel'>
<value>random</value>
<value>egd</value>
<value>builtin</value>
</enum>
</rng>
</devices>
<features>
<gic supported='yes'>
<enum name='version'>
<value>2</value>
<value>3</value>
</enum>
</gic>
<vmcoreinfo supported='yes'/>
<genid supported='no'/>
<backingStoreInput supported='yes'/>
<backup supported='no'/>
<sev supported='no'/>
</features>
</domainCapabilities>
"""
DOMCAPABILITIES_PPC = """
<domainCapabilities>
<path>/usr/bin/qemu-system-ppc</path>
<domain>qemu</domain>
<machine>g3beige</machine>
<arch>ppc</arch>
<vcpu max='1'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='no'/>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>ide</value>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='no'/>
</features>
</domainCapabilities>
"""
DOMCAPABILITIES_MIPS = """
<domainCapabilities>
<path>/usr/bin/qemu-system-mips</path>
<domain>qemu</domain>
<machine>malta</machine>
<arch>mips</arch>
<vcpu max='16'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='no'/>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>ide</value>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>cirrus</value>
<value>vmvga</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='no'/>
</features>
</domainCapabilities>
"""
DOMCAPABILITIES_MIPSEL = """
<domainCapabilities>
<path>/usr/bin/qemu-system-mipsel</path>
<domain>qemu</domain>
<machine>malta</machine>
<arch>mipsel</arch>
<vcpu max='16'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='no'/>
<mode name='host-model' supported='no'/>
<mode name='custom' supported='no'/>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>ide</value>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>cirrus</value>
<value>vmvga</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='no'/>
</features>
</domainCapabilities>
"""
# NOTE(sean-k-mooney): yes i686 is actually the i386 emulator
# the qemu-system-i386 binary is used for all 32bit x86
# instruction sets.
DOMCAPABILITIES_I686 = """
<domainCapabilities>
<path>/usr/bin/qemu-system-i386</path>
<domain>kvm</domain>
<machine>pc-i440fx-2.11</machine>
<arch>i686</arch>
<vcpu max='255'/>
<os supported='yes'>
<loader supported='yes'>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='yes'/>
<mode name='host-model' supported='yes'>
<model fallback='forbid'>Skylake-Client-IBRS</model>
<vendor>Intel</vendor>
<feature policy='require' name='ss'/>
<feature policy='require' name='vmx'/>
<feature policy='require' name='hypervisor'/>
<feature policy='require' name='tsc_adjust'/>
<feature policy='require' name='clflushopt'/>
<feature policy='require' name='md-clear'/>
<feature policy='require' name='ssbd'/>
<feature policy='require' name='xsaves'/>
<feature policy='require' name='pdpe1gb'/>
</mode>
<mode name='custom' supported='yes'>
<model usable='no'>qemu64</model>
<model usable='yes'>qemu32</model>
<model usable='no'>phenom</model>
<model usable='yes'>pentium3</model>
<model usable='yes'>pentium2</model>
<model usable='yes'>pentium</model>
<model usable='yes'>n270</model>
<model usable='yes'>kvm64</model>
<model usable='yes'>kvm32</model>
<model usable='yes'>coreduo</model>
<model usable='yes'>core2duo</model>
<model usable='no'>athlon</model>
<model usable='yes'>Westmere</model>
<model usable='yes'>Westmere-IBRS</model>
<model usable='no'>Skylake-Server</model>
<model usable='no'>Skylake-Server-IBRS</model>
<model usable='yes'>Skylake-Client</model>
<model usable='yes'>Skylake-Client-IBRS</model>
<model usable='yes'>SandyBridge</model>
<model usable='yes'>SandyBridge-IBRS</model>
<model usable='yes'>Penryn</model>
<model usable='no'>Opteron_G5</model>
<model usable='no'>Opteron_G4</model>
<model usable='no'>Opteron_G3</model>
<model usable='no'>Opteron_G2</model>
<model usable='yes'>Opteron_G1</model>
<model usable='yes'>Nehalem</model>
<model usable='yes'>Nehalem-IBRS</model>
<model usable='yes'>IvyBridge</model>
<model usable='yes'>IvyBridge-IBRS</model>
<model usable='yes'>Haswell-noTSX</model>
<model usable='yes'>Haswell-noTSX-IBRS</model>
<model usable='yes'>Haswell</model>
<model usable='yes'>Haswell-IBRS</model>
<model usable='no'>EPYC</model>
<model usable='no'>EPYC-IBPB</model>
<model usable='yes'>Conroe</model>
<model usable='yes'>Broadwell-noTSX</model>
<model usable='yes'>Broadwell-noTSX-IBRS</model>
<model usable='yes'>Broadwell</model>
<model usable='yes'>Broadwell-IBRS</model>
<model usable='yes'>486</model>
</mode>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>ide</value>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>cirrus</value>
<value>vmvga</value>
<value>qxl</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'/>
</hostdev>
</devices>
<features>
<gic supported='no'/>
</features>
</domainCapabilities>
"""
STATIC_DOMCAPABILITIES = {
Architecture.ARMV7: DOMCAPABILITIES_ARMV7,
Architecture.AARCH64: DOMCAPABILITIES_AARCH64,
Architecture.SPARC: DOMCAPABILITIES_SPARC,
Architecture.PPC: DOMCAPABILITIES_PPC,
Architecture.MIPS: DOMCAPABILITIES_MIPS,
Architecture.MIPSEL: DOMCAPABILITIES_MIPSEL,
Architecture.I686: DOMCAPABILITIES_I686
}
# NOTE(aspiers): see the above note for CAPABILITIES_GUEST which
# explains why the <machine> element here needs to be parametrised.
#
# The <features> element needs to be parametrised for emulating
# environments with and without the SEV feature.
DOMCAPABILITIES_X86_64_TEMPLATE = """
<domainCapabilities>
<path>/usr/bin/qemu-kvm</path>
<domain>kvm</domain>
<machine>%(mtype)s</machine>
<arch>x86_64</arch>
<vcpu max='255'/>
<os supported='yes'>
<enum name='firmware'>
<value>efi</value>
</enum>
<loader supported='yes'>
<value>/usr/share/edk2/ovmf/OVMF_CODE.fd</value>
<value>/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd</value>
<enum name='type'>
<value>rom</value>
<value>pflash</value>
</enum>
<enum name='readonly'>
<value>yes</value>
<value>no</value>
</enum>
<enum name='secure'>
<value>yes</value>
<value>no</value>
</enum>
</loader>
</os>
<cpu>
<mode name='host-passthrough' supported='yes'/>
<mode name='host-model' supported='yes'>
<model fallback='forbid'>EPYC-IBPB</model>
<vendor>AMD</vendor>
<feature policy='require' name='x2apic'/>
<feature policy='require' name='tsc-deadline'/>
<feature policy='require' name='hypervisor'/>
<feature policy='require' name='tsc_adjust'/>
<feature policy='require' name='cmp_legacy'/>
<feature policy='require' name='invtsc'/>
<feature policy='require' name='virt-ssbd'/>
<feature policy='disable' name='monitor'/>
</mode>
<mode name='custom' supported='yes'>
<model usable='yes'>qemu64</model>
<model usable='yes'>qemu32</model>
<model usable='no'>phenom</model>
<model usable='yes'>pentium3</model>
<model usable='yes'>pentium2</model>
<model usable='yes'>pentium</model>
<model usable='no'>n270</model>
<model usable='yes'>kvm64</model>
<model usable='yes'>kvm32</model>
<model usable='no'>coreduo</model>
<model usable='no'>core2duo</model>
<model usable='no'>athlon</model>
<model usable='yes'>Westmere</model>
<model usable='no'>Westmere-IBRS</model>
<model usable='no'>Skylake-Server</model>
<model usable='no'>Skylake-Server-IBRS</model>
<model usable='no'>Skylake-Client</model>
<model usable='no'>Skylake-Client-IBRS</model>
<model usable='yes'>SandyBridge</model>
<model usable='no'>SandyBridge-IBRS</model>
<model usable='yes'>Penryn</model>
<model usable='no'>Opteron_G5</model>
<model usable='no'>Opteron_G4</model>
<model usable='yes'>Opteron_G3</model>
<model usable='yes'>Opteron_G2</model>
<model usable='yes'>Opteron_G1</model>
<model usable='yes'>Nehalem</model>
<model usable='no'>Nehalem-IBRS</model>
<model usable='no'>IvyBridge</model>
<model usable='no'>IvyBridge-IBRS</model>
<model usable='no'>Haswell</model>
<model usable='no'>Haswell-noTSX</model>
<model usable='no'>Haswell-noTSX-IBRS</model>
<model usable='no'>Haswell-IBRS</model>
<model usable='yes'>EPYC</model>
<model usable='yes'>EPYC-IBPB</model>
<model usable='yes'>Conroe</model>
<model usable='no'>Broadwell</model>
<model usable='no'>Broadwell-noTSX</model>
<model usable='no'>Broadwell-noTSX-IBRS</model>
<model usable='no'>Broadwell-IBRS</model>
<model usable='yes'>486</model>
</mode>
</cpu>
<devices>
<disk supported='yes'>
<enum name='diskDevice'>
<value>disk</value>
<value>cdrom</value>
<value>floppy</value>
<value>lun</value>
</enum>
<enum name='bus'>
<value>ide</value>
<value>fdc</value>
<value>scsi</value>
<value>virtio</value>
<value>usb</value>
<value>sata</value>
</enum>
</disk>
<graphics supported='yes'>
<enum name='type'>
<value>sdl</value>
<value>vnc</value>
<value>spice</value>
</enum>
</graphics>
<video supported='yes'>
<enum name='modelType'>
<value>vga</value>
<value>cirrus</value>
<value>vmvga</value>
<value>qxl</value>
<value>virtio</value>
</enum>
</video>
<hostdev supported='yes'>
<enum name='mode'>
<value>subsystem</value>
</enum>
<enum name='startupPolicy'>
<value>default</value>
<value>mandatory</value>
<value>requisite</value>
<value>optional</value>
</enum>
<enum name='subsysType'>
<value>usb</value>
<value>pci</value>
<value>scsi</value>
</enum>
<enum name='capsType'/>
<enum name='pciBackend'>
<value>default</value>
<value>vfio</value>
</enum>
</hostdev>
</devices>
%(features)s
</domainCapabilities>
"""
_fake_NodeDevXml = {
"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_04_00_3</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_04_00_3</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_00_1": """
<device>
<name>pci_0000_04_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
# libvirt >= 1.3.0 nodedev-dumpxml
"pci_0000_03_00_0": """
<device>
<name>pci_0000_03_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>0</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
</capability>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
"pci_0000_03_00_1": """
<device>
<name>pci_0000_03_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'/>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
"net_enp2s1_02_9a_a1_37_be_54": """
<device>
<name>net_enp2s1_02_9a_a1_37_be_54</name>
<path>/sys/devices/pci0000:00/0000:04:00.3/0000:04:10.7/net/enp2s1</path>
<parent>pci_0000_04_10_7</parent>
<capability type='net'>
<interface>enp2s1</interface>
<address>02:9a:a1:37:be:54</address>
<link state='down'/>
<feature name='rx'/>
<feature name='tx'/>
<feature name='sg'/>
<feature name='tso'/>
<feature name='gso'/>
<feature name='gro'/>
<feature name='rxvlan'/>
<feature name='txvlan'/>
<capability type='80203'/>
</capability>
</device>""",
"net_enp2s2_02_9a_a1_37_be_54": """
<device>
<name>net_enp2s2_02_9a_a1_37_be_54</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:02:02.0/net/enp2s2</path>
<parent>pci_0000_04_11_7</parent>
<capability type='net'>
<interface>enp2s2</interface>
<address>02:9a:a1:37:be:54</address>
<link state='down'/>
<feature name='rx'/>
<feature name='tx'/>
<feature name='sg'/>
<feature name='tso'/>
<feature name='gso'/>
<feature name='gro'/>
<feature name='rxvlan'/>
<feature name='txvlan'/>
<capability type='80203'/>
</capability>
</device>""",
"pci_0000_06_00_0": """
<device>
<name>pci_0000_06_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:06.0</path>
<parent></parent>
<driver>
<name>nvidia</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>10</bus>
<slot>1</slot>
<function>5</function>
<product id="0x0FFE">GRID M60-0B</product>
<vendor id="0x10DE">Nvidia</vendor>
<numa node="8"/>
<capability type='mdev_types'>
<type id='nvidia-11'>
<name>GRID M60-0B</name>
<deviceAPI>vfio-pci</deviceAPI>
<availableInstances>16</availableInstances>
</type>
</capability>
</capability>
</device>""",
"pci_0000_06_00_1": """
<device>
<name>pci_0000_06_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:06.1</path>
<parent></parent>
<driver>
<name>i915</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>6</bus>
<slot>0</slot>
<function>1</function>
<product id="0x591d">HD Graphics P630</product>
<vendor id="0x8086">Intel Corporation</vendor>
<capability type='mdev_types'>
<type id='i915-GVTg_V5_8'>
<deviceAPI>vfio-pci</deviceAPI>
<availableInstances>2</availableInstances>
</type>
</capability>
</capability>
</device>""",
"mdev_4b20d080_1b54_4048_85b3_a6a62d165c01": """
<device>
<name>mdev_4b20d080_1b54_4048_85b3_a6a62d165c01</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/4b20d080-1b54-4048-85b3-a6a62d165c01</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>vfio_mdev</name>
</driver>
<capability type='mdev'>
<type id='nvidia-11'/>
<iommuGroup number='12'/>
</capability>
</device>
""",
}
_fake_NodeDevXml_parents = {
name: etree.fromstring(xml).find("parent").text
for name, xml in _fake_NodeDevXml.items()
}
_fake_NodeDevXml_children = collections.defaultdict(list)
for key, val in _fake_NodeDevXml_parents.items():
_fake_NodeDevXml_children[val].append(key)
| 33.132638 | 92 | 0.571309 |
7940aea0fe34deaa2168095ff2772bcff833d572 | 2,914 | py | Python | src/kumo.py | rekyuu/kumo-manga | 81c061f13fe678f05bb70e5b57bb7fa049b59d19 | [
"MIT"
] | 3 | 2015-09-15T23:56:39.000Z | 2016-09-04T22:57:50.000Z | src/kumo.py | bo1g/kumo-manga | 81c061f13fe678f05bb70e5b57bb7fa049b59d19 | [
"MIT"
] | 8 | 2015-09-16T02:35:18.000Z | 2016-10-16T04:39:32.000Z | src/kumo.py | bo1g/kumo-manga | 81c061f13fe678f05bb70e5b57bb7fa049b59d19 | [
"MIT"
] | 4 | 2015-09-16T01:29:08.000Z | 2016-11-30T04:27:53.000Z | import io, json, imp
from lists import folder, manga
from flask import Flask, render_template, send_file
from flask_httpauth import HTTPBasicAuth
app = Flask(__name__)
auth = HTTPBasicAuth()
using_redis = True
try:
import redis
except ImportError:
using_redis = False
r = redis.Redis(host='localhost', port=6379, db=0)
try:
# There's probably a better way to do this, but
# this should suffice for now
r.get("test")
except redis.exceptions.ConnectionError:
using_redis = False
"""
Helper definitions.
"""
def url_parent (url):
try:
fork = url.split('/')
del fork[-1]
path = '/'.join(fork)
except IndexError:
path = ''
return path
def get_title (url):
fork = url.split('/')
title = fork[-1]
if len(title.split('.')) >= 2:
title = title.split('.')[0]
return title
"""
Authentication
Add @auth.login_required below the last @app.route()
line to add simple HTML auth.
"""
with open('./config.json', encoding='utf8') as file_:
config = json.loads(file_.read())
users = config['auth']
@auth.get_password
def get_pw (username):
if username in users:
return users.get(username)
return None
"""
Routing
"""
@app.route('/')
def index ():
return render_template('index.html')
@app.route('/list')
@app.route('/list/')
def manga_list ():
return render_template(
'list.html',
items = folder.return_listing()
)
@app.route('/list/<path:subfolder>')
def manga_list_folder (subfolder):
return render_template(
'list.html',
current = subfolder,
parent = url_parent(subfolder),
items = folder.return_listing(subfolder)
)
@app.route('/read/<path:filename>')
@app.route('/read/<path:filename>/<int:pagenum>')
def manga_read (filename, pagenum=1):
return render_template(
'read.html',
current = filename,
current_page = pagenum,
manga_title = get_title(filename),
total_pages = manga.get_total_pages(filename),
parent = url_parent(filename)
)
# Image handling
@app.route('/cover/<path:filepath>')
def manga_cover (filepath):
if using_redis:
cover = r.get(filepath)
if not cover:
cover = manga.get_cover(filepath)
r.set(filepath, cover)
else:
cover = manga.get_cover(filepath)
return send_file(io.BytesIO(cover))
@app.route('/page/<path:filepath>/<int:pagenum>')
def manga_page (filepath, pagenum=1):
if using_redis:
page = r.get(filepath + str(pagenum))
if not page:
page = manga.get_page(filepath, pagenum)
r.set(filepath + str(pagenum), page)
else:
page = manga.get_page(filepath, pagenum)
return send_file(io.BytesIO(page))
# Error handling
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
"""
Server
"""
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 5266, debug = False)
| 21.115942 | 56 | 0.653054 |
7940aee65ddab890ecf88b2c88b4fdf1e07ce0f7 | 3,140 | py | Python | src/models/record.py | MarkHershey/Networks_Lab2 | 9c2d6b4fad9dfe237aacae955d5d782ac16dbe71 | [
"MIT"
] | null | null | null | src/models/record.py | MarkHershey/Networks_Lab2 | 9c2d6b4fad9dfe237aacae955d5d782ac16dbe71 | [
"MIT"
] | null | null | null | src/models/record.py | MarkHershey/Networks_Lab2 | 9c2d6b4fad9dfe237aacae955d5d782ac16dbe71 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import List, Optional, Tuple
from puts.logger import logger
from pydantic import BaseModel, validator
from ..functional import uid_gen
class Record(BaseModel):
uid: Optional[str] = None # imply time of record creation
username: Optional[str] = None
date_time: Optional[datetime] = None # time of purchase
account_id: Optional[str] = None # account
amount: float = None # debit amount or credit amount
amount_abs: Optional[float] = None # absolute amount >= 0
merchant: Optional[str] = None # seller
label: Optional[str] = None # seller label
bank_ref_code: Optional[str] = None # bank ref code
category: Optional[str] = None
subcategory: Optional[str] = None
location: Optional[str] = None # geographic location of the transaction
link: Optional[str] = None # online product link
tags: List[str] = [] # user-defined tags
reference: Optional[str] = None # reference number from the receipt / bank record
remarks: Optional[str] = None
imported: bool = False
starred: bool = False # init to false
confirmed: bool = False # init to false if record is auto generated
excluded: bool = False # init to false, excluded from amount total computation
archived: bool = False # init to false
@validator("uid", pre=True, always=True)
def default_uid(cls, v):
return v or uid_gen("R")
@validator("date_time", pre=True, always=True)
def default_date_time(cls, v):
return v or datetime.now()
@validator("amount_abs", pre=True, always=True)
def absolute_amount(cls, v, *, values, **kwargs):
if values.get("amount") is not None:
return v or abs(values["amount"])
else:
return None
class RecordEdit(BaseModel):
date_time: Optional[datetime] = None
account_id: Optional[str] = None
amount: Optional[float] = None
amount_abs: Optional[float] = None
merchant: Optional[str] = None
label: Optional[str] = None
bank_ref_code: Optional[str] = None
category: Optional[str] = None
subcategory: Optional[str] = None
location: Optional[str] = None
link: Optional[str] = None
tags: Optional[List[str]] = None
reference: Optional[str] = None
remarks: Optional[str] = None
imported: Optional[bool] = None
starred: Optional[bool] = None
confirmed: Optional[bool] = None
excluded: Optional[bool] = None
archived: Optional[bool] = None
@validator("amount_abs", pre=True, always=True)
def absolute_amount(cls, v, *, values, **kwargs):
if values.get("amount") is not None:
return v or abs(values["amount"])
else:
return None
class RecordsQueryResponse(BaseModel):
query_time: datetime = None
count: int = 0
username: str = ""
date_range_start: datetime = None
date_range_end: datetime = None
total_amount: float = 0
sorted_by: str = ""
records: List[Record] = []
@validator("query_time", pre=True, always=True)
def default_query_time(cls, v):
return v or datetime.now()
| 34.505495 | 86 | 0.661465 |
7940afb762dd41e104d6dd3f4408ab3dd625b60f | 2,456 | py | Python | lib/googlecloudsdk/api_lib/bigtable/clusters.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/bigtable/clusters.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/api_lib/bigtable/clusters.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigtable clusters API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.bigtable import util
def Delete(cluster_ref):
"""Delete a cluster.
Args:
cluster_ref: A resource reference to the cluster to delete.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
msg = msgs.BigtableadminProjectsInstancesClustersDeleteRequest(
name=cluster_ref.RelativeName())
client.projects_instances_clusters.Delete(msg)
def Create(cluster_ref, zone, serve_nodes=3):
"""Create a cluster.
Args:
cluster_ref: A resource reference to the cluster to create.
zone: string, The zone this cluster should be inside.
serve_nodes: int, The number of nodes in this cluster.
Returns:
Long running operation.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
storage_type = (
msgs.Cluster.DefaultStorageTypeValueValuesEnum.STORAGE_TYPE_UNSPECIFIED)
cluster_msg = msgs.Cluster(
serveNodes=serve_nodes,
location=util.LocationUrl(zone),
defaultStorageType=storage_type)
msg = msgs.BigtableadminProjectsInstancesClustersCreateRequest(
cluster=cluster_msg,
clusterId=cluster_ref.Name(),
parent=cluster_ref.Parent().RelativeName())
return client.projects_instances_clusters.Create(msg)
def Update(cluster_ref, serve_nodes):
"""Update a cluster.
Args:
cluster_ref: A resource reference to the cluster to update.
serve_nodes: int, The number of nodes in this cluster.
Returns:
Long running operation.
"""
client = util.GetAdminClient()
msgs = util.GetAdminMessages()
msg = msgs.Cluster(name=cluster_ref.RelativeName(), serveNodes=serve_nodes)
return client.projects_instances_clusters.Update(msg)
| 30.7 | 78 | 0.751629 |
7940b12a836409ac5e37a59260e28a2a75037c66 | 7,262 | py | Python | test.py | starlightdreamer/CommonRegex | 7c40ebd86331bd550e415687d5f4338df1ccc358 | [
"MIT"
] | null | null | null | test.py | starlightdreamer/CommonRegex | 7c40ebd86331bd550e415687d5f4338df1ccc358 | [
"MIT"
] | null | null | null | test.py | starlightdreamer/CommonRegex | 7c40ebd86331bd550e415687d5f4338df1ccc358 | [
"MIT"
] | null | null | null | # coding: utf-8
from commonregex import CommonRegex
import unittest
class RegexTestCase(unittest.TestCase):
def setUp(self):
self.parser = CommonRegex()
class TestDates(RegexTestCase):
def test_numeric(self):
matching = ["1-19-14", "1.19.14", "1.19.14", "01.19.14"]
for s in matching:
self.assertEqual(self.parser.dates(s), [s])
def test_verbose(self):
matching = ["January 19th, 2014", "Jan. 19th, 2014", "Jan 19 2014", "19 Jan 2014"]
for s in matching:
self.assertEqual(self.parser.dates(s), [s])
class TestTimes(RegexTestCase):
def test_times(self):
matching = ["09:45", "9:45", "23:45", "9:00am", "9am", "9:00 A.M.", "9:00 pm"]
for s in matching:
self.assertEqual(self.parser.times(s), [s])
class TestPhones(RegexTestCase):
def test_phones(self):
matching = ["12345678900", "1234567890", "+1 234 567 8900", "234-567-8900",
"1-234-567-8900", "1.234.567.8900", "5678900", "567-8900",
"(123) 456 7890", "+41 22 730 5989", "(+41) 22 730 5989",
"+442345678900"]
for s in matching:
self.assertEqual(self.parser.phones(s), [s])
class TestPhonesWithExts(RegexTestCase):
def test_phones_with_extensions(self):
matching = ["(523)222-8888 ext 527", "(523)222-8888x623", "(523)222-8888 x623",
"(523)222-8888 x 623", "(523)222-8888EXT623", "523-222-8888EXT623",
"(523) 222-8888 x 623"]
non_matching = ["222-5555", "222-8888 x 623", '333-333-5555 dial 3']
for s in matching:
self.assertEqual(self.parser.phones_with_exts(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.links(s), [s])
class TestLinks(RegexTestCase):
def test_links(self):
matching = ["www.google.com", "http://www.google.com", "www.google.com/?query=dog"
"sub.example.com", "http://www.google.com/%&#/?q=dog", "google.com"]
non_matching = ["www.google.con"]
for s in matching:
self.assertEqual(self.parser.links(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.links(s), [s])
class TestEmails(RegexTestCase):
def test_emails(self):
matching = ["[email protected]", "[email protected]", "[email protected]"]
non_matching = ["[email protected]"]
for s in matching:
self.assertEqual(self.parser.emails(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.emails(s), [s])
class TestIPs(RegexTestCase):
def test_ips(self):
matching = ["127.0.0.1", "192.168.1.1", "8.8.8.8"]
for s in matching:
self.assertEqual(self.parser.ips(s), [s])
class TestIPv6s(RegexTestCase):
def test_ipv6s(self):
matching = ["fe80:0000:0000:0000:0204:61ff:fe9d:f156", "fe80:0:0:0:204:61ff:fe9d:f156",
"fe80::204:61ff:fe9d:f156", "fe80:0000:0000:0000:0204:61ff:254.157.241.86",
"fe80:0:0:0:0204:61ff:254.157.241.86", "fe80::204:61ff:254.157.241.86", "::1"]
for s in matching:
self.assertEqual(self.parser.ipv6s(s), [s])
class TestMacAddresses(RegexTestCase):
def test_mac_addresses(self):
matching = ["01-23-45-67-89-AB", "FE:23:45:67:89:ab", "0123.4567.89aB"]
non_matching = ["01-23-45-67-89-FG", "GF:23:45:67:89:AB", "G123.4567.89AB",
"101-23-45-67-89-AA", "1FF:23:45:67:89:AB", "F123.4567.89ABC",
"1123-45-67-89-AA", "11:23::45:67:89:AB", "F123..4567.1111"]
for s in matching:
self.assertEqual(self.parser.mac_addresses(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.mac_addresses(s), [s])
class TestPrices(RegexTestCase):
def test_prices(self):
matching = ["$1.23", "$1", "$1,000", "$10,000.00"]
non_matching = ["$1,10,0", "$100.000"]
for s in matching:
self.assertEqual(self.parser.prices(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.prices(s), [s])
class TestHexColors(RegexTestCase):
def test_hexcolors(self):
matching = ["#fff", "#123", "#4e32ff", "#12345678"]
for s in matching:
self.assertEqual(self.parser.hex_colors(s), [s])
class TestCreditCards(RegexTestCase):
def test_creditcards(self):
matching = ["0000-0000-0000-0000", "0123456789012345",
"0000 0000 0000 0000", "012345678901234"]
for s in matching:
self.assertEqual(self.parser.credit_cards(s), [s])
class TestBTCAddresses(RegexTestCase):
def test_btc_addresses(self):
matching = ["1LgqButDNV2rVHe9DATt6WqD8tKZEKvaK2", "19P6EYhu6kZzRy9Au4wRRZVE8RemrxPbZP",
"1bones8KbQge9euDn523z5wVhwkTP3uc1", "1Bow5EMqtDGV5n5xZVgdpRPJiiDK6XSjiC"]
non_matching = ["2LgqButDNV2rVHe9DATt6WqD8tKZEKvaK2", "19Ry9Au4wRRZVE8RemrxPbZP",
"1bones8KbQge9euDn523z5wVhwkTP3uc12939", "1Bow5EMqtDGV5n5xZVgdpR"]
for s in matching:
self.assertEqual(self.parser.btc_addresses(s), [s])
for s in non_matching:
self.assertNotEqual(self.parser.btc_addresses(s), [s])
class TestStreetAddresses(RegexTestCase):
def test_street_addresses(self):
matching = ["checkout the new place at 101 main st.", "504 parkwood drive", "3 elm boulevard",
"500 elm street "]
non_matching = ["101 main straight"]
for s in matching:
self.assertTrue(self.parser.street_addresses(s))
for s in non_matching:
self.assertFalse(self.parser.street_addresses(s))
class TestPoBoxes(RegexTestCase):
def test_po_boxes(self):
matching = ["PO Box 123456",
"hey p.o. box 234234 hey"]
non_matching = ["101 main straight"]
for s in matching:
self.assertTrue(self.parser.po_boxes(s))
for s in non_matching:
self.assertFalse(self.parser.po_boxes(s))
class TestZipCodes(RegexTestCase):
def test_zip_codes(self):
matching = ["02540", "02540-4119"]
non_matching = ["101 main straight", "123456"]
for s in matching:
self.assertTrue(self.parser.zip_codes(s))
for s in non_matching:
self.assertFalse(self.parser.zip_codes(s))
class TestSSN(RegexTestCase):
def test_ssn(self):
matching = ["523 23 4566", "523-04-1234"]
non_matching = ["774 00 1245", "666-12-7856"]
for s in matching:
self.assertEqual(self.parser.ssn_number(s), [s])
for s in non_matching:
self.assertFalse(self.parser.ssn_number(s), [s])
if __name__ == '__main__':
# Auto-detect test classes to reduce friction of adding a new one.
test_cases = [clas for name, clas in list(locals().items()) if name.startswith('Test')]
suites = []
for case in test_cases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(case))
all_tests = unittest.TestSuite(suites)
unittest.TextTestRunner(verbosity=2).run(all_tests)
| 35.598039 | 102 | 0.605205 |
7940b1f21f13a2395d436196b9a0d8fe4901563e | 1,384 | py | Python | superset/migrations/versions/1a48a5411020_adding_slug_to_dash.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 108 | 2018-01-22T11:09:59.000Z | 2021-01-15T10:53:04.000Z | superset/migrations/versions/1a48a5411020_adding_slug_to_dash.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 112 | 2018-01-25T22:57:21.000Z | 2019-08-22T20:08:48.000Z | superset/migrations/versions/1a48a5411020_adding_slug_to_dash.py | zhwXF/Superset | 14c3488c79f8f3cdbd1123e8f7a92f8746c2db09 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 24 | 2018-01-19T22:54:39.000Z | 2020-11-12T13:04:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""adding slug to dash
Revision ID: 1a48a5411020
Revises: 289ce07647b
Create Date: 2015-12-04 09:42:16.973264
"""
# revision identifiers, used by Alembic.
revision = '1a48a5411020'
down_revision = '289ce07647b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('dashboards', sa.Column('slug', sa.String(length=255), nullable=True))
try:
op.create_unique_constraint('idx_unique_slug', 'dashboards', ['slug'])
except:
pass
def downgrade():
op.drop_constraint(None, 'dashboards', type_='unique')
op.drop_column('dashboards', 'slug')
| 32.186047 | 88 | 0.74422 |
7940b23a55bee5e8b155568f62db07fedbbcdd49 | 5,018 | py | Python | cli/src/pcluster/validators/s3_validators.py | rdimaio/aws-parallelcluster | b12f6fb3c6887a9040ff7b03d6c12216e6cc8577 | [
"Apache-2.0"
] | 415 | 2018-11-13T15:02:15.000Z | 2022-03-31T15:26:06.000Z | cli/src/pcluster/validators/s3_validators.py | rdimaio/aws-parallelcluster | b12f6fb3c6887a9040ff7b03d6c12216e6cc8577 | [
"Apache-2.0"
] | 2,522 | 2018-11-13T16:16:27.000Z | 2022-03-31T13:57:10.000Z | cli/src/pcluster/validators/s3_validators.py | lukeseawalker/aws-parallelcluster | 16d427429dae8e638f4350fc3369b751afeeb69b | [
"Apache-2.0"
] | 164 | 2018-11-14T22:47:46.000Z | 2022-03-22T11:33:22.000Z | import re
import time
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
from pcluster.aws.aws_api import AWSApi
from pcluster.aws.common import AWSClientError
from pcluster.utils import get_url_scheme
from pcluster.validators.common import FailureLevel, Validator
from pcluster.validators.utils import get_bucket_name_from_s3_url
class UrlValidator(Validator):
"""
Url Validator.
Validate given url with s3 or https prefix.
"""
def _validate(self, url, retries=3, fail_on_https_error: bool = False, fail_on_s3_error: bool = False):
scheme = get_url_scheme(url)
if scheme in ["https", "s3"]:
try:
if scheme == "s3":
self._validate_s3_uri(url, fail_on_error=fail_on_s3_error)
else:
self._validate_https_uri(url, fail_on_error=fail_on_https_error)
except ConnectionError as e:
if retries > 0:
time.sleep(5)
self._validate(url, retries=retries - 1)
else:
self._add_failure(f"The url '{url}' causes ConnectionError: {e}.", FailureLevel.WARNING)
else:
self._add_failure(
f"The value '{url}' is not a valid URL, choose URL with 'https' or 's3' prefix.",
FailureLevel.ERROR,
)
def _validate_s3_uri(self, url: str, fail_on_error: bool):
try:
match = re.match(r"s3://(.*?)/(.*)", url)
if not match or len(match.groups()) < 2:
self._add_failure(f"s3 url '{url}' is invalid.", FailureLevel.ERROR)
else:
bucket_name, object_name = match.group(1), match.group(2)
AWSApi.instance().s3.head_object(bucket_name=bucket_name, object_name=object_name)
except AWSClientError:
# Todo: Check that bucket is in s3_read_resource or s3_read_write_resource.
self._add_failure(
f"The S3 object '{url}' does not exist or you do not have access to it.",
FailureLevel.ERROR if fail_on_error else FailureLevel.WARNING,
)
def _validate_https_uri(self, url: str, fail_on_error: bool):
try:
with urlopen(url): # nosec nosemgrep
pass
except HTTPError as e:
self._add_failure(
f"The url '{url}' causes HTTPError, the error code is '{e.code}',"
f" the error reason is '{e.reason}'.",
FailureLevel.ERROR if fail_on_error else FailureLevel.WARNING,
)
except URLError as e:
self._add_failure(
f"The url '{url}' causes URLError, the error reason is '{e.reason}'.",
FailureLevel.ERROR if fail_on_error else FailureLevel.WARNING,
)
except ValueError:
self._add_failure(
f"The value '{url}' is not a valid URL.",
FailureLevel.ERROR,
)
class S3BucketUriValidator(Validator):
"""S3 Bucket Url Validator."""
def _validate(self, url):
if get_url_scheme(url) == "s3":
try:
bucket = get_bucket_name_from_s3_url(url)
AWSApi.instance().s3.head_bucket(bucket_name=bucket)
except AWSClientError as e:
self._add_failure(str(e), FailureLevel.ERROR)
else:
self._add_failure(f"The value '{url}' is not a valid S3 URI.", FailureLevel.ERROR)
class S3BucketValidator(Validator):
"""S3 Bucket Validator."""
def _validate(self, bucket):
try:
AWSApi.instance().s3.head_bucket(bucket_name=bucket)
# Check versioning is enabled on the bucket
bucket_versioning_status = AWSApi.instance().s3.get_bucket_versioning_status(bucket)
if bucket_versioning_status != "Enabled":
self._add_failure(
"The S3 bucket {0} specified cannot be used by cluster "
"because versioning setting is: {1}, not 'Enabled'. Please enable bucket versioning.".format(
bucket, bucket_versioning_status
),
FailureLevel.ERROR,
)
except AWSClientError as e:
self._add_failure(str(e), FailureLevel.ERROR)
class S3BucketRegionValidator(Validator):
"""Validate S3 bucket is in the same region with the cloudformation stack."""
def _validate(self, bucket, region):
try:
bucket_region = AWSApi.instance().s3.get_bucket_region(bucket)
if bucket_region != region:
self._add_failure(
f"The S3 bucket {bucket} specified cannot be used because "
"it is not in the same region of the cluster.",
FailureLevel.ERROR,
)
except AWSClientError as e:
self._add_failure(str(e), FailureLevel.ERROR)
| 39.203125 | 113 | 0.589876 |
7940b345cffc4b38e2b8ffa015b2374c612f11ba | 1,439 | py | Python | view_get_entries.py | masaminh/notify_sire | e20405de1fce924a841bf105ac99529dfc337ab4 | [
"MIT"
] | null | null | null | view_get_entries.py | masaminh/notify_sire | e20405de1fce924a841bf105ac99529dfc337ab4 | [
"MIT"
] | 1 | 2019-10-28T15:39:21.000Z | 2019-10-28T15:39:21.000Z | view_get_entries.py | masaminh/notify_sire | e20405de1fce924a841bf105ac99529dfc337ab4 | [
"MIT"
] | null | null | null | """出走予定のビュー."""
from itertools import groupby
import line
import mojimoji
from interface_view_get_entries import IViewGetEntries
from viewmodel_get_entries import ViewModelGetEntries
import settings
class ViewGetEntriesConsole(IViewGetEntries):
"""出走予定のビュー."""
def update(self, viewmodel: ViewModelGetEntries)->None:
"""出力."""
content = get_content(viewmodel)
print(content)
class ViewGetEntriesLine(IViewGetEntries):
"""出走予定のビュー."""
def update(self, viewmodel: ViewModelGetEntries)->None:
"""出力."""
content = get_content(viewmodel)
line.notify(settings.NOTIFY_ACCESS_TOKEN, content)
def get_racename_line(course: str, raceno: int, racename: str) -> str:
"""レース名行の取得."""
racename = mojimoji.zen_to_han(racename, kana=False)
return f'{course}{raceno}R {racename}'
def get_content(viewmodel: ViewModelGetEntries) -> str:
"""出力文字列の取得."""
content = f'{viewmodel.name}産駒の出走予定\n'
if viewmodel.entries:
for date, group in groupby(viewmodel.entries, lambda x: x.date):
content += f'\n{date.strftime("%m/%d")}\n'
for race, group2 in groupby(group, lambda x: (x.course, x.raceno, x.racename)):
content += ' ' + get_racename_line(*race) + '\n'
for scheduled in group2:
content += f' {scheduled.horsename}\n'
else:
content += ' なし\n'
return content
| 28.78 | 91 | 0.650452 |
7940b3b66f93d329f86da4870a43d79432a4fd0d | 328 | py | Python | arcade/examples/t_2.py | philipbergen/arcade | d4741f6fcb82ca72de021bd71df17a8219da8549 | [
"MIT"
] | null | null | null | arcade/examples/t_2.py | philipbergen/arcade | d4741f6fcb82ca72de021bd71df17a8219da8549 | [
"MIT"
] | null | null | null | arcade/examples/t_2.py | philipbergen/arcade | d4741f6fcb82ca72de021bd71df17a8219da8549 | [
"MIT"
] | null | null | null | import arcade
class MyGame(arcade.Window):
""" Our custom Window Class"""
def __init__(self):
super().__init__(800, 600, "t_2")
self.coin_list = arcade.SpriteList()
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
arcade.start_render()
MyGame()
arcade.run()
| 17.263158 | 56 | 0.643293 |
7940b3c9e4d20a830cb2ee6d154e44179ad581df | 1,887 | py | Python | molecule/resources/tests/test_default.py | popstas/ansible-role-kapacitor | d5896000522359f93f3ec55377250badf3e11819 | [
"MIT"
] | null | null | null | molecule/resources/tests/test_default.py | popstas/ansible-role-kapacitor | d5896000522359f93f3ec55377250badf3e11819 | [
"MIT"
] | null | null | null | molecule/resources/tests/test_default.py | popstas/ansible-role-kapacitor | d5896000522359f93f3ec55377250badf3e11819 | [
"MIT"
] | null | null | null | import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_kapacitor_running_and_enabled(host):
kapacitor = host.service("kapacitor")
assert kapacitor.is_running
assert kapacitor.is_enabled
@pytest.mark.parametrize("teststring", [
('test_db = \\["rp_test_db"\\]'),
('test_db_2 = \\["rp_test_db_one", "rp_test_db_two"\\]'),
('https-certificate = "/etc/ssl/kapacitor.pem"'),
('log-enabled = true'),
('write-tracing = false'),
('pprof-enabled = false'),
('ttps-enabled = false'),
('stats-interval = "10s"'),
('database = "_kapacitor"'),
('retention-policy= "default"'),
('url = "https://usage.influxdata.com"'),
('dir = "/var/lib/kapacitor/replay"'),
('dir = "/var/lib/kapacitor/tasks"'),
('snapshot-interval = "60s"'),
('boltdb = "/var/lib/kapacitor/kapacitor.db"'),
('file = "/var/log/kapacitor/kapacitor.log"'),
('level = "INFO"'),
('urls = \\["http://localhost:8086"\\]')
])
def test_kapacitor_config(host, teststring):
kap_config = host.file("/etc/kapacitor/kapacitor.conf")
assert kap_config.exists
assert kap_config.contains(teststring)
def test_tick_file(host):
for alert in (
"kapacitor/alert_load_average",
"cpu_alert",
"disk_alert",
"cpu_alert_batch"
):
tick_script = host.file("/tmp/" + alert + ".tick")
assert tick_script.exists
def test_tick_load(host):
tick_load = host.command("kapacitor list tasks")
for alert in (
"alert_load_average",
"cpu_alert",
"disk_alert",
"cpu_alert_batch"
):
assert alert in tick_load.stdout
def test_kapacitor_listener(host):
assert host.socket('tcp://:::9092').is_listening
| 29.030769 | 63 | 0.63328 |
7940b47648cab52ab45cef2d1dd867992913aef2 | 2,445 | py | Python | setup.py | pbaustert/brightway2-data | bcf62306b069e68bb67948874c06bd049f8e8fb5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | pbaustert/brightway2-data | bcf62306b069e68bb67948874c06bd049f8e8fb5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | pbaustert/brightway2-data | bcf62306b069e68bb67948874c06bd049f8e8fb5 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
import os
packages = []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('bw2data'):
# Ignore dirnames that start with '.'
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
v_temp = {}
with open("bw2data/version.py") as fp:
exec(fp.read(), v_temp)
version = ".".join((str(x) for x in v_temp["version"]))
setup(
name='bw2data',
version=version,
packages=packages,
python_requires='>=3.5',
author="Chris Mutel",
author_email="[email protected]",
license="3-clause BSD",
install_requires=[
"appdirs",
"bw2parameters",
"bw_processing",
"docopt",
"fasteners",
"lxml",
"numpy",
"peewee>=3.9.4",
"psutil",
"pyprind",
"requests>=1.1.0",
"scipy",
"stats_arrays",
"unicodecsv",
"voluptuous",
"whoosh",
"wrapt",
],
url="https://github.com/brightway-lca/brightway2-data",
long_description=open('README.rst').read(),
description=('Tools for the management of inventory databases '
'and impact assessment methods. Part of the Brightway2 LCA Framework'),
entry_points = {
'console_scripts': [
'bw2-uptodate = bw2data.bin.bw2_uptodate:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],)
| 31.346154 | 88 | 0.582004 |
7940b49bcef5cf8691b89397cf9d2e91293f0828 | 2,904 | py | Python | ADE Eval Shared Resources/OSE_AE_evaluation_v1_0/scripts/visualize.py | Xiaoyao-Bao/UoM-Y3-ADR_Extractor | 3afe206abe85c7524e502fe5a347d97e43a9ad94 | [
"Apache-2.0"
] | 1 | 2020-03-02T13:36:29.000Z | 2020-03-02T13:36:29.000Z | ADE Eval Shared Resources/OSE_AE_evaluation_v1_0/scripts/visualize.py | Xiaoyao-Bao/UoM-Y3-ADR_Extractor | 3afe206abe85c7524e502fe5a347d97e43a9ad94 | [
"Apache-2.0"
] | null | null | null | ADE Eval Shared Resources/OSE_AE_evaluation_v1_0/scripts/visualize.py | Xiaoyao-Bao/UoM-Y3-ADR_Extractor | 3afe206abe85c7524e502fe5a347d97e43a9ad94 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 The MITRE Corporation. See the toplevel
# file LICENSE.txt for license terms.
# The goal here is to generate BRAT visualizations, but embed the whole thing
# in non-server HTML.
import os, sys, shutil, csv, json
def Usage():
print >> sys.stderr, "Usage: visualize.py [ 'ose_gold' | 'ose_submission' ] ose_corpus brat_installation outdir"
sys.exit(1)
if len(sys.argv) != 5:
Usage()
# I can't use the QC dir to get the docs because in the most recent version,
# I removed some of the workspaces.
CORPUS_TYPE = sys.argv[1]
if CORPUS_TYPE not in ('ose_gold', 'ose_submission'):
print >> sys.stderr, "Bad value for corpus type."
sys.exit(1)
[XML_DIR, BRAT_INSTALLATION, OUTDIR] = [os.path.abspath(x) for x in sys.argv[2:]]
if os.path.exists(OUTDIR):
shutil.rmtree(OUTDIR)
os.makedirs(OUTDIR)
ROOTDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
RESOURCE_DIR = os.path.join(ROOTDIR, "resources", "visualization")
sys.path.insert(0, os.path.join(ROOTDIR, "lib", "python"))
from OSE_AE.corpus import Corpus
from OSE_AE.ose_eval_format import OSEGoldEvalFormat, OSESubmissionEvalFormat
if CORPUS_TYPE == "ose_gold":
corpus = Corpus(XML_DIR, OSEGoldEvalFormat())
else:
corpus = Corpus(XML_DIR, OSESubmissionEvalFormat())
corpus.load()
os.makedirs(os.path.join(OUTDIR, "js"))
shutil.copy(os.path.join(RESOURCE_DIR, "labelviz.js"), os.path.join(OUTDIR, "js"))
os.makedirs(os.path.join(OUTDIR, "brat_13"))
shutil.copytree(os.path.join(BRAT_INSTALLATION, "client"), os.path.join(OUTDIR, "brat_13", "client"))
shutil.copytree(os.path.join(BRAT_INSTALLATION, "static", "fonts"), os.path.join(OUTDIR, "brat_13", "static", "fonts"))
shutil.copy(os.path.join(BRAT_INSTALLATION, "style-vis.css"), os.path.join(OUTDIR, "brat_13"))
# I'm going to generate one document per section.
# OSE_AE: "background: limegreen"
# NonOSE_AE: "background: yellow"
# Not_AE_Candidate: "background: red; color: white"
for drugName, label in corpus.labels.items():
for section in label.sections:
mentions = [m for m in label.mentions if section.id == m.section]
docData = {"text": section.text,
"entities": [["T%d" % i, m.type, [[sp[0], sp[1] + sp[0]] for sp in m.spans]] for (i, m) in enumerate(mentions)],
"normalizations": [["N%d" % i, "Reference", "T%d" % i, "MedDRA", m.meddraEntries[0][1], m.meddraEntries[0][0]] for (i, m) in enumerate(mentions) if m.meddraEntries],
"attributes": [["A%d" % i, "Reason", "T%d" % i, m.reason] for (i, m) in enumerate(mentions) if m.reason]}
fp = open(os.path.join(RESOURCE_DIR, "template.html"), "r")
s = fp.read()
fp.close()
fp = open(os.path.join(OUTDIR, drugName + "_" + section.name.replace(" ", "_") + ".html"), "w")
fp.write(s.replace("TPL_DOCDATA", json.dumps(docData)))
fp.close()
| 41.485714 | 184 | 0.671832 |
7940b5389b299b65a3a12362c79acfd6e40ec586 | 1,475 | py | Python | mathematical_model_exercises/main_problem4.py | dreaming-qin/python_algorithm | f5277cec71aad6f62e665e171e0a96f33abd1671 | [
"Apache-2.0"
] | 1 | 2022-01-13T15:21:51.000Z | 2022-01-13T15:21:51.000Z | mathematical_model_exercises/main_problem4.py | dreaming-qin/python_algorithm | f5277cec71aad6f62e665e171e0a96f33abd1671 | [
"Apache-2.0"
] | null | null | null | mathematical_model_exercises/main_problem4.py | dreaming-qin/python_algorithm | f5277cec71aad6f62e665e171e0a96f33abd1671 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import math
import sys
import random
from term_influnce import format_data, get_follower, get_music_feature, get_term_MS
filename='../2021_ICM_Problem_D_Data/full_music_data.csv'
df = pd.read_csv(filename)
data = np.array(df.iloc[:,:].values)
# 名称
influnce_name='The Rolling Stones'
follower_name_set=set(['Devo','Alice Cooper'])
# follower_name_set=set(['The Stone Roses','Alice Cooper'])
# 不正确的样例,可以用作对比
influnce_name='Black Box'
follower_name_set=set(['David Guetta','Soda Stereo'])
# 第四题第一部分
str1='influncer,follower,LTS,STS\n'
for follower_name in follower_name_set:
MS=get_term_MS(data,influnce_name,follower_name,step=3)
delta_MS=[MS[i+1]-MS[i] for i in range(len(MS)-1)]
STS=np.max(delta_MS)
LTS=np.sum(delta_MS)
str1=str1+"{},{},{},{}\n".format(influnce_name,follower_name,LTS,STS)
print(str1)
file=open('data/problem4_term_influnce.csv','w',errors='ignore')
file.write(str1)
file.close()
# 第四题第二部分
str1=''
for follower_name in follower_name_set:
attr_name,score=get_music_feature(data,influnce_name,follower_name)
str1=str1+"{},{}".format(influnce_name,follower_name)
for item in score:
str1=str1+',{}'.format(item)
str1=str1+'\n'
str1='\n'+str1
for i in range(len(attr_name)-1,-1,-1):
str1=',{}'.format(attr_name[i])+str1
str1='influnce_name,follower_name'+str1
print(str1)
file=open('data/problem4_music_feature.csv','w',errors='ignore')
file.write(str1)
file.close()
| 27.830189 | 83 | 0.734915 |
7940b54645d8da5f58b9eaa5f2a861dc79455ca9 | 4,367 | py | Python | tgap_ng/edge_simplify/ontriangle.py | erbasualex/tgap-ng | 3e9ec9e93ee8dfd79d9878b34ed12296a8af6301 | [
"MIT"
] | 2 | 2022-01-12T17:11:49.000Z | 2022-01-14T09:23:46.000Z | tgap_ng/edge_simplify/ontriangle.py | erbasualex/tgap-ng | 3e9ec9e93ee8dfd79d9878b34ed12296a8af6301 | [
"MIT"
] | null | null | null | tgap_ng/edge_simplify/ontriangle.py | erbasualex/tgap-ng | 3e9ec9e93ee8dfd79d9878b34ed12296a8af6301 | [
"MIT"
] | 1 | 2022-02-26T16:50:45.000Z | 2022-02-26T16:50:45.000Z | def orient2d(pa, pb, pc):
"""Twice signed area of triangle formed by points a, b and c
Direction from pa to pc, via pb, where returned value is as follows:
left : + [ = ccw ]
straight : 0.
right : - [ = cw ]
:param pa: point
:type pa: sequence
:param pb: point
:type pb: sequence
:param pc: point
:type pc: sequence
:returns: double
"""
acx = pa[0] - pc[0]
bcx = pb[0] - pc[0]
acy = pa[1] - pc[1]
bcy = pb[1] - pc[1]
return acx * bcy - acy * bcx
def sign(value):
if value > 0:
return 1
elif value < 0:
return -1
else:
return 0
# def on_triangle(pt, a, b, c):
# """
# Using signed distances to edges (halfplanes)
# Returns true when ``pt'' is on triangle (formed by point ``a'', ``b''
# and ``c''), which means: not on exterior (returns False); On interior or
# on boundary (returns True)
# """
# dists = orient2d(a, b, pt), orient2d(b, c, pt), orient2d(c, a, pt)
# # find a non-zero distance
# for d in dists:
# if d != 0:
# s = sign(d)
# break
# else:
# # all distances zero, so point tested is on the 3 edges
# # of the triangle, assume it is inside
# # (although if degenerate triangle - where triangle is a line segment -
# # the point can be outside as well)
# return True
# # here we have a non-zero distance
# # compare whether other non-zero distances have same sign
# for d in dists:
# if d == 0:
# continue
# elif sign(d) != s:
# # if not, then we are outside
# return False
# else:
# continue
# # otherwise, we are inside
# return True
# def on_triangle(p, p0, p1, p2):
# """
# Using bary-centric logic (twice as fast as halfplane based method)
# Returns true when ``pt'' is on triangle (formed by point ``a'', ``b''
# and ``c''), which means: not on exterior (returns False); On interior or
# on boundary (returns True)
# From: https://stackoverflow.com/a/34093754
# """
# dX = p[0] - p2[0]
# dY = p[1] - p2[1]
# dX21 = p2[0] - p1[0]
# dY12 = p1[1] - p2[1]
# D = dY12 * (p0[0] - p2[0]) + dX21 * (p0[1] - p2[1])
# s = dY12 * dX + dX21 * dY
# t = (p2[1] - p0[1]) * dX + (p0[0] - p2[0]) * dY
# if D < 0:
# return s <= 0 and t <= 0 and s+t >= D
# else:
# return s >= 0 and t >= 0 and s+t <= D
def on_triangle(p_test, p0, p1, p2):
"""
Using bary-centric logic (twice as fast as halfplane based method)
Returns true when ``pt'' is on triangle (formed by point ``a'', ``b''
and ``c''), which means: not on exterior (returns False); On interior or
on boundary (returns True)
From: https://stackoverflow.com/a/34093754
"""
dX = p_test[0] - p0[0]
dY = p_test[1] - p0[1]
dX20 = p2[0] - p0[0]
dY20 = p2[1] - p0[1]
dX10 = p1[0] - p0[0]
dY10 = p1[1] - p0[1]
s_p = (dY20 * dX) - (dX20 * dY)
t_p = (dX10 * dY) - (dY10 * dX)
D = (dX10 * dY20) - (dY10 * dX20)
if D > 0:
return (s_p >= 0) and (t_p >= 0) and (s_p + t_p) <= D
else:
return (s_p <= 0) and (t_p <= 0) and (s_p + t_p) >= D
def test_degenerate():
assert not on_triangle([11, 45], [45, 45], [45, 45], [44, 45])
def _test():
"""unit test for on_triangle"""
a, b, c = (10, 10), (25, 10), (25, 25)
# 6 permutations to form the triangle
T = [(a, b, c), (a, c, b), (b, a, c), (b, c, a), (c, a, b), (c, b, a)]
inside = set(
[
(10, 10),
(15, 10),
(20, 10),
(25, 10),
(15, 15),
(20, 15),
(25, 15),
(20, 20),
(25, 20),
(25, 25),
]
)
for _ in range(10000):
for t in range(6):
p0, p1, p2 = T[t]
for i in range(-10, 50, 5):
for j in range(0, 31, 5):
pt = (i, j)
on = on_triangle(pt, p0, p1, p2)
if pt in inside:
assert on, "p<{0}>, t<{1}, {2}, {3}>".format(pt, p0, p1, p2)
else:
assert not on, "p<{0}>, t<{1}, {2}, {3}>".format(pt, p0, p1, p2)
if __name__ == "__main__":
# _test()
test_degenerate()
| 27.639241 | 88 | 0.481795 |
7940b5d4b064e05d672afe1358a0bee256336b38 | 1,873 | py | Python | src/automation/setup.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/automation/setup.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/automation/setup.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.1'
try:
from azext_automation.manual.version import VERSION
except ImportError:
pass
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
try:
from azext_automation.manual.dependency import DEPENDENCIES
except ImportError:
pass
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='automation',
version=VERSION,
description='Microsoft Azure Command-Line Tools AutomationClient Extension',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/automation',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_automation': ['azext_metadata.json']},
)
| 31.745763 | 94 | 0.632675 |
7940b7b301162ee9b1ab5d50ef09045b29d1f1a3 | 687 | py | Python | pymtl3/dsl/__init__.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T06:22:11.000Z | 2022-01-03T06:22:11.000Z | pymtl3/dsl/__init__.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | null | null | null | pymtl3/dsl/__init__.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | null | null | null | """
These objects are only for developers to use. Other objects are exposed
to users in pymtl/__init__.py
"""
from .Component import Component
from .ComponentLevel3 import connect
from .ComponentLevel5 import method_port
from .ComponentLevel6 import non_blocking
from .ComponentLevel7 import blocking
from .Connectable import (
BlockingIfc,
CalleeIfcCL,
CalleeIfcFL,
CalleeIfcRTL,
CalleePort,
CallerIfcCL,
CallerIfcFL,
CallerIfcRTL,
CallerPort,
CallIfcRTL,
Const,
InPort,
Interface,
MethodPort,
NonBlockingIfc,
OutPort,
Signal,
Wire,
)
from .ConstraintTypes import RD, WR, M, U
from .Placeholder import Placeholder
| 21.46875 | 71 | 0.732169 |
7940b7e896648caad90ad6b5b4813a8069b152d6 | 648 | py | Python | 121-best-time-to-buy-and-sell-stock.py | daicang/Leetcode | 676b05c1222670f73294eb2ed2665433eac148f4 | [
"MIT"
] | null | null | null | 121-best-time-to-buy-and-sell-stock.py | daicang/Leetcode | 676b05c1222670f73294eb2ed2665433eac148f4 | [
"MIT"
] | null | null | null | 121-best-time-to-buy-and-sell-stock.py | daicang/Leetcode | 676b05c1222670f73294eb2ed2665433eac148f4 | [
"MIT"
] | null | null | null | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
size = len(prices)
min_before = [prices[0]] * size
max_after = [prices[-1]] * size
for idx, p in enumerate(prices):
if idx > 0:
min_before[idx] = min(min_before[idx-1], p)
for idx in range(size-2, -1, -1):
max_after[idx] = max(max_after[idx+1], prices[idx])
maxp = 0
for i in range(size):
maxp = max(maxp, max_after[i]-min_before[i])
return maxp
| 25.92 | 63 | 0.492284 |
7940b95428f2bffbd009b433fa2a5e2c28be0c92 | 3,365 | py | Python | zosapi/base.py | sashank27/Aulign | 51e34746afb5ea796d36dcf7dd3ad79673ad633c | [
"MIT"
] | null | null | null | zosapi/base.py | sashank27/Aulign | 51e34746afb5ea796d36dcf7dd3ad79673ad633c | [
"MIT"
] | null | null | null | zosapi/base.py | sashank27/Aulign | 51e34746afb5ea796d36dcf7dd3ad79673ad633c | [
"MIT"
] | null | null | null | from win32com.client.gencache import EnsureDispatch, EnsureModule
from win32com.client import constants
"""
The base application to extablish connection to the
Zemax win32 client using ZOS-API.
"""
class PythonStandaloneApplication(object):
class LicenseException(Exception):
pass
class ConnectionException(Exception):
pass
class InitializationException(Exception):
pass
class SystemNotPresentException(Exception):
pass
def __init__(self):
'''
make sure the Python wrappers are available for the COM client and
interfaces
# EnsureModule('ZOSAPI_Interfaces', 0, 1, 0)
Note - the above can also be accomplished using 'makepy.py' in the
following directory:
{PythonEnv}\Lib\site-packages\wind32com\client\
Also note that the generate wrappers do not get refreshed when the
COM library changes.
To refresh the wrappers, you can manually delete everything in the
cache directory:
{PythonEnv}\Lib\site-packages\win32com\gen_py\*.*
'''
self.TheConnection = EnsureDispatch("ZOSAPI.ZOSAPI_Connection")
if self.TheConnection is None:
raise PythonStandaloneApplication.ConnectionException(
"Unable to intialize COM connection to ZOSAPI")
self.TheApplication = self.TheConnection.CreateNewApplication()
if self.TheApplication is None:
raise PythonStandaloneApplication.InitializationException(
"Unable to acquire ZOSAPI application")
if self.TheApplication.IsValidLicenseForAPI is False:
raise PythonStandaloneApplication.LicenseException(
"License is not valid for ZOSAPI use")
self.TheSystem = self.TheApplication.PrimarySystem
if self.TheSystem is None:
raise PythonStandaloneApplication.SystemNotPresentException(
"Unable to acquire Primary system")
def __del__(self):
if self.TheApplication is not None:
self.TheApplication.CloseApplication()
self.TheApplication = None
self.TheConnection = None
def OpenFile(self, filepath, saveIfNeeded):
if self.TheSystem is None:
raise PythonStandaloneApplication.SystemNotPresentException(
"Unable to acquire Primary system")
self.TheSystem.LoadFile(filepath, saveIfNeeded)
def CloseFile(self, save):
if self.TheSystem is None:
raise PythonStandaloneApplication.SystemNotPresentException(
"Unable to acquire Primary system")
self.TheSystem.Close(save)
def SamplesDir(self):
if self.TheApplication is None:
raise PythonStandaloneApplication.InitializationException(
"Unable to acquire ZOSAPI application")
return self.TheApplication.SamplesDir
def ExampleConstants(self):
if self.TheApplication.LicenseStatus is constants.LicenseStatusType_PremiumEdition:
return "Premium"
elif self.TheApplication.LicenseStatus is constants.LicenseStatusType_ProfessionalEdition:
return "Professional"
elif self.TheApplication.LicenseStatus is constants.LicenseStatusType_StandardEdition:
return "Standard"
else:
return "Invalid"
| 36.576087 | 98 | 0.683804 |
7940b97b6370969e99c1c92252bdc302c750ebd6 | 10,660 | py | Python | Lib/test/test_threadsignals.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | 1 | 2020-10-25T16:33:22.000Z | 2020-10-25T16:33:22.000Z | Lib/test/test_threadsignals.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | Lib/test/test_threadsignals.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | """PyUnit testing that threads honor our signal semantics"""
import unittest
import signal
import os
import sys
from test import support
from test.support import threading_helper
import _thread as thread
import time
if (sys.platform[:3] == 'win'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
process_pid = os.getpid()
signalled_all=thread.allocate_lock()
USING_PTHREAD_COND = (sys.thread_info.name == 'pthread'
and sys.thread_info.lock == 'mutex+cond')
def registerSignals(for_usr1, for_usr2, for_alrm):
usr1 = signal.signal(signal.SIGUSR1, for_usr1)
usr2 = signal.signal(signal.SIGUSR2, for_usr2)
alrm = signal.signal(signal.SIGALRM, for_alrm)
return usr1, usr2, alrm
# The signal handler. Just note that the signal occurred and
# from who.
def handle_signals(sig,frame):
signal_blackboard[sig]['tripped'] += 1
signal_blackboard[sig]['tripped_by'] = thread.get_ident()
# a function that will be spawned as a separate thread.
def send_signals():
os.kill(process_pid, signal.SIGUSR1)
os.kill(process_pid, signal.SIGUSR2)
signalled_all.release()
class ThreadSignals(unittest.TestCase):
def test_signals(self):
with threading_helper.wait_threads_exit():
# Test signal handling semantics of threads.
# We spawn a thread, have the thread send two signals, and
# wait for it to finish. Check that we got both signals
# and that they were run by the main thread.
signalled_all.acquire()
self.spawnSignallingThread()
signalled_all.acquire()
# the signals that we asked the kernel to send
# will come back, but we don't know when.
# (it might even be after the thread exits
# and might be out of order.) If we haven't seen
# the signals yet, send yet another signal and
# wait for it return.
if signal_blackboard[signal.SIGUSR1]['tripped'] == 0 \
or signal_blackboard[signal.SIGUSR2]['tripped'] == 0:
try:
signal.alarm(1)
signal.pause()
finally:
signal.alarm(0)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped_by'],
thread.get_ident())
self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped_by'],
thread.get_ident())
signalled_all.release()
def spawnSignallingThread(self):
thread.start_new_thread(send_signals, ())
def alarm_interrupt(self, sig, frame):
raise KeyboardInterrupt
@unittest.skipIf(USING_PTHREAD_COND,
'POSIX condition variables cannot be interrupted')
@unittest.skipIf(sys.platform.startswith('linux') and
not sys.thread_info.version,
'Issue 34004: musl does not allow interruption of locks '
'by signals.')
# Issue #20564: sem_timedwait() cannot be interrupted on OpenBSD
@unittest.skipIf(sys.platform.startswith('openbsd'),
'lock cannot be interrupted on OpenBSD')
def test_lock_acquire_interruption(self):
# Mimic receiving a SIGINT (KeyboardInterrupt) with SIGALRM while stuck
# in a deadlock.
# XXX this test can fail when the legacy (non-semaphore) implementation
# of locks is used in thread_pthread.h, see issue #11223.
oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
try:
lock = thread.allocate_lock()
lock.acquire()
signal.alarm(1)
t1 = time.monotonic()
self.assertRaises(KeyboardInterrupt, lock.acquire, timeout=5)
dt = time.monotonic() - t1
# Checking that KeyboardInterrupt was raised is not sufficient.
# We want to assert that lock.acquire() was interrupted because
# of the signal, not that the signal handler was called immediately
# after timeout return of lock.acquire() (which can fool assertRaises).
self.assertLess(dt, 3.0)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, oldalrm)
@unittest.skipIf(USING_PTHREAD_COND,
'POSIX condition variables cannot be interrupted')
@unittest.skipIf(sys.platform.startswith('linux') and
not sys.thread_info.version,
'Issue 34004: musl does not allow interruption of locks '
'by signals.')
# Issue #20564: sem_timedwait() cannot be interrupted on OpenBSD
@unittest.skipIf(sys.platform.startswith('openbsd'),
'lock cannot be interrupted on OpenBSD')
def test_rlock_acquire_interruption(self):
# Mimic receiving a SIGINT (KeyboardInterrupt) with SIGALRM while stuck
# in a deadlock.
# XXX this test can fail when the legacy (non-semaphore) implementation
# of locks is used in thread_pthread.h, see issue #11223.
oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
try:
rlock = thread.RLock()
# For reentrant locks, the initial acquisition must be in another
# thread.
def other_thread():
rlock.acquire()
with threading_helper.wait_threads_exit():
thread.start_new_thread(other_thread, ())
# Wait until we can't acquire it without blocking...
while rlock.acquire(blocking=False):
rlock.release()
time.sleep(0.01)
signal.alarm(1)
t1 = time.monotonic()
self.assertRaises(KeyboardInterrupt, rlock.acquire, timeout=5)
dt = time.monotonic() - t1
# See rationale above in test_lock_acquire_interruption
self.assertLess(dt, 3.0)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, oldalrm)
def acquire_retries_on_intr(self, lock):
self.sig_recvd = False
def my_handler(signal, frame):
self.sig_recvd = True
old_handler = signal.signal(signal.SIGUSR1, my_handler)
try:
def other_thread():
# Acquire the lock in a non-main thread, so this test works for
# RLocks.
lock.acquire()
# Wait until the main thread is blocked in the lock acquire, and
# then wake it up with this.
time.sleep(0.5)
os.kill(process_pid, signal.SIGUSR1)
# Let the main thread take the interrupt, handle it, and retry
# the lock acquisition. Then we'll let it run.
time.sleep(0.5)
lock.release()
with threading_helper.wait_threads_exit():
thread.start_new_thread(other_thread, ())
# Wait until we can't acquire it without blocking...
while lock.acquire(blocking=False):
lock.release()
time.sleep(0.01)
result = lock.acquire() # Block while we receive a signal.
self.assertTrue(self.sig_recvd)
self.assertTrue(result)
finally:
signal.signal(signal.SIGUSR1, old_handler)
def test_lock_acquire_retries_on_intr(self):
self.acquire_retries_on_intr(thread.allocate_lock())
def test_rlock_acquire_retries_on_intr(self):
self.acquire_retries_on_intr(thread.RLock())
def test_interrupted_timed_acquire(self):
# Test to make sure we recompute lock acquisition timeouts when we
# receive a signal. Check this by repeatedly interrupting a lock
# acquire in the main thread, and make sure that the lock acquire times
# out after the right amount of time.
# NOTE: this test only behaves as expected if C signals get delivered
# to the main thread. Otherwise lock.acquire() itself doesn't get
# interrupted and the test trivially succeeds.
self.start = None
self.end = None
self.sigs_recvd = 0
done = thread.allocate_lock()
done.acquire()
lock = thread.allocate_lock()
lock.acquire()
def my_handler(signum, frame):
self.sigs_recvd += 1
old_handler = signal.signal(signal.SIGUSR1, my_handler)
try:
def timed_acquire():
self.start = time.monotonic()
lock.acquire(timeout=0.5)
self.end = time.monotonic()
def send_signals():
for _ in range(40):
time.sleep(0.02)
os.kill(process_pid, signal.SIGUSR1)
done.release()
with threading_helper.wait_threads_exit():
# Send the signals from the non-main thread, since the main thread
# is the only one that can process signals.
thread.start_new_thread(send_signals, ())
timed_acquire()
# Wait for thread to finish
done.acquire()
# This allows for some timing and scheduling imprecision
self.assertLess(self.end - self.start, 2.0)
self.assertGreater(self.end - self.start, 0.3)
# If the signal is received several times before PyErr_CheckSignals()
# is called, the handler will get called less than 40 times. Just
# check it's been called at least once.
self.assertGreater(self.sigs_recvd, 0)
finally:
signal.signal(signal.SIGUSR1, old_handler)
def test_main():
global signal_blackboard
signal_blackboard = { signal.SIGUSR1 : {'tripped': 0, 'tripped_by': 0 },
signal.SIGUSR2 : {'tripped': 0, 'tripped_by': 0 },
signal.SIGALRM : {'tripped': 0, 'tripped_by': 0 } }
oldsigs = registerSignals(handle_signals, handle_signals, handle_signals)
try:
support.run_unittest(ThreadSignals)
finally:
registerSignals(*oldsigs)
if __name__ == '__main__':
test_main()
| 42.811245 | 86 | 0.59409 |
7940ba03b8455150ccafa31f7adfc4f5f4ca11ad | 18,515 | py | Python | tests_python/tests_alpha/test_basic.py | d4hines/marigold-tezos | d106ccb9208af16d673aaa4ca48878eeba37527b | [
"MIT"
] | null | null | null | tests_python/tests_alpha/test_basic.py | d4hines/marigold-tezos | d106ccb9208af16d673aaa4ca48878eeba37527b | [
"MIT"
] | 1 | 2022-02-18T09:54:32.000Z | 2022-02-18T09:54:32.000Z | tests_python/tests_alpha/test_basic.py | d4hines/marigold-tezos | d106ccb9208af16d673aaa4ca48878eeba37527b | [
"MIT"
] | null | null | null | from os import path
import pytest
from client.client import Client
from tools.paths import ACCOUNT_PATH
from tools.utils import assert_run_failure
from .contract_paths import CONTRACT_PATH
BAKE_ARGS = ['--max-priority', '512', '--minimal-timestamp']
TRANSFER_ARGS = ['--burn-cap', '0.257']
@pytest.mark.incremental
class TestRawContext:
def test_delegates(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/delegates/?depth=3'
res = client.rpc('get', path)
expected = {
"ed25519": {
"02": {"29": None},
"a9": {"ce": None},
"c5": {"5c": None},
"da": {"c9": None},
"e7": {"67": None},
}
}
assert res == expected
def test_no_service_1(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_no_service_2(self, client: Client):
path = (
'/chains/main/blocks/head/context/raw/bytes/'
'non-existent?depth=-1'
)
expected = 'Command failed : Extraction depth -1 is invalid'
with assert_run_failure(expected):
client.rpc('get', path)
def test_no_service_3(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent?depth=0'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_bake(self, client: Client):
client.bake('bootstrap4', BAKE_ARGS)
@pytest.mark.parametrize(
"identity, message, expected_signature",
[
(
'bootstrap1',
'msg1',
'edsigtz68o4FdbpvycnAMDLaa7hpmmhjDx'
'hx4Zu3QWHLYJtcY1mVhW9m6CCvsciFXwf1'
'zLmah8fJP51cqaeaciBPGy5osH11AnR',
),
(
'bootstrap2',
'msg2',
'edsigtZqhR5SW6vbRSmqwzfS1KiJZLYLe'
'FhLcCEw7WxjBDxotVx83M2rLe4Baq52SUT'
'jxfXhQ5J3TabCwqt78kNpoU8j42GDEk4',
),
(
'bootstrap3',
'msg3',
'edsigu2PvAWxVYY3jQFVfBRW2Dg61xZMN'
'esHiNbwCTmpJSyfcJMW8Ch9WABHqsgHQRB'
'aSs6zZNHVGXfHSBnGCxT9x2b49L2zpMW',
),
(
'bootstrap4',
'msg4',
'edsigu5jieost8eeD3JwVrpPuSnKzLLvR3'
'aqezLPDTvxC3p41qwBEpxuViKriipxig5'
'2NQmJ7AFXTzhM3xgKM2ZaADcSMYWztuJ',
),
],
)
def test_sign_message(self, client, identity, message, expected_signature):
assert client.sign_message(message, identity) == expected_signature
@pytest.mark.parametrize(
"identity, message, signature",
[
(
'bootstrap1',
'msg1',
'edsigtz68o4FdbpvycnAMDLaa7hpmmhjDx'
'hx4Zu3QWHLYJtcY1mVhW9m6CCvsciFXwf1'
'zLmah8fJP51cqaeaciBPGy5osH11AnR',
),
(
'bootstrap2',
'msg2',
'edsigtZqhR5SW6vbRSmqwzfS1KiJZLYLe'
'FhLcCEw7WxjBDxotVx83M2rLe4Baq52SUT'
'jxfXhQ5J3TabCwqt78kNpoU8j42GDEk4',
),
(
'bootstrap3',
'msg3',
'edsigu2PvAWxVYY3jQFVfBRW2Dg61xZMN'
'esHiNbwCTmpJSyfcJMW8Ch9WABHqsgHQRB'
'aSs6zZNHVGXfHSBnGCxT9x2b49L2zpMW',
),
(
'bootstrap4',
'msg4',
'edsigu5jieost8eeD3JwVrpPuSnKzLLvR3'
'aqezLPDTvxC3p41qwBEpxuViKriipxig5'
'2NQmJ7AFXTzhM3xgKM2ZaADcSMYWztuJ',
),
],
)
def test_check_message(self, client, identity, message, signature):
assert client.check_message(message, identity, signature)
@pytest.mark.parametrize(
"identity, message, head_block",
[
("bootstrap1", "msg1", False),
("bootstrap2", "msg2", False),
("bootstrap3", "msg3", True),
("bootstrap4", "msg4", True),
],
)
def test_fail_inject_signed_arbitrary_ope(
self, client, identity, message, head_block
):
if head_block:
signature = client.sign_message(message, identity, block="head")
else:
signature = client.sign_message(message, identity)
chain_id = client.rpc('get', '/chains/main/chain_id')
head_hash = client.rpc('get', '/chains/main/blocks/head/hash')
run_json = {
'operation': {
"branch": head_hash,
"contents": [{"kind": "failing_noop", "arbitrary": message}],
'signature': signature,
},
'chain_id': chain_id,
}
run_operation_path = (
'/chains/main/blocks/head/helpers/scripts/run_operation'
)
with assert_run_failure(
'The failing_noop operation cannot be executed by the protocol'
):
client.rpc('post', run_operation_path, data=run_json)
def test_gen_keys(self, client: Client, session):
session['keys'] = ['foo', 'bar', 'boo']
sigs = [None, 'secp256k1', 'ed25519']
for key, sig in zip(session['keys'], sigs):
args = [] if sig is None else ['--sig', sig]
client.gen_key(key, args)
def test_transfers(self, client: Client, session):
client.transfer(1000, 'bootstrap1', session['keys'][0], TRANSFER_ARGS)
client.bake('bootstrap1', BAKE_ARGS)
client.transfer(2000, 'bootstrap1', session['keys'][1], TRANSFER_ARGS)
client.bake('bootstrap1', BAKE_ARGS)
client.transfer(3000, 'bootstrap1', session['keys'][2], TRANSFER_ARGS)
client.bake('bootstrap1', BAKE_ARGS)
def test_balances(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 1000
assert client.get_balance(session['keys'][1]) == 2000
assert client.get_balance(session['keys'][2]) == 3000
def test_transfer_bar_foo(self, client: Client, session):
client.transfer(
1000,
session['keys'][1],
session['keys'][0],
['--fee', '0', '--force-low-fee'],
)
client.bake(
'bootstrap1',
BAKE_ARGS
+ [
'--minimal-fees',
'0',
'--minimal-nanotez-per-byte',
'0',
'--minimal-nanotez-per-gas-unit',
'0',
],
)
def test_balances_bar_foo(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 2000
assert client.get_balance(session['keys'][1]) == 1000
def test_transfer_foo_bar(self, client: Client, session):
client.transfer(
1000, session['keys'][0], session['keys'][1], ['--fee', '0.05']
)
client.bake('bootstrap1', BAKE_ARGS)
def test_balances_foo_bar(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 999.95
assert client.get_balance(session['keys'][1]) == 2000
def test_transfer_failure(self, client: Client, session):
with pytest.raises(Exception):
client.transfer(999.95, session['keys'][0], session['keys'][1])
def test_originate_contract_noop(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'noop.tz')
client.remember('noop', contract)
client.typecheck(contract)
client.originate(
'noop', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
client.bake('bootstrap1', BAKE_ARGS)
def test_transfer_to_noop(self, client: Client):
client.transfer(10, 'bootstrap1', 'noop', ['--arg', 'Unit'])
client.bake('bootstrap1', BAKE_ARGS)
def test_contract_hardlimit(self, client: Client):
contract = path.join(CONTRACT_PATH, 'mini_scenarios', 'hardlimit.tz')
client.originate(
'hardlimit',
1000,
'bootstrap1',
contract,
['--init', '3', '--burn-cap', '0.341'],
)
client.bake('bootstrap1', BAKE_ARGS)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
client.bake('bootstrap1', BAKE_ARGS)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
client.bake('bootstrap1', BAKE_ARGS)
def test_transfers_bootstraps5_bootstrap1(self, client: Client):
assert client.get_balance('bootstrap5') == 4000000
client.transfer(
400000,
'bootstrap5',
'bootstrap1',
['--fee', '0', '--force-low-fee'],
)
client.bake('bootstrap1', BAKE_ARGS)
client.transfer(
400000,
'bootstrap1',
'bootstrap5',
['--fee', '0', '--force-low-fee'],
)
client.bake('bootstrap1', BAKE_ARGS)
assert client.get_balance('bootstrap5') == 4000000
def test_activate_accounts(self, client: Client, session):
account = f"{ACCOUNT_PATH}/king_commitment.json"
session['keys'] += ['king', 'queen']
client.activate_account(session['keys'][3], account)
client.bake('bootstrap1', BAKE_ARGS)
account = f"{ACCOUNT_PATH}/queen_commitment.json"
client.activate_account(session['keys'][4], account)
client.bake('bootstrap1', BAKE_ARGS)
assert client.get_balance(session['keys'][3]) == 23932454.669343
assert client.get_balance(session['keys'][4]) == 72954577.464032
def test_transfer_king_queen(self, client: Client, session):
keys = session['keys']
client.transfer(10, keys[3], keys[4], TRANSFER_ARGS)
client.bake('bootstrap1', BAKE_ARGS)
def test_duplicate_alias(self, client: Client):
client.add_address("baz", "foo", force=True)
show_foo = client.show_address("foo", show_secret=True)
assert show_foo.secret_key is not None
class TestRememberContract:
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_not_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(contract_name, non_originated_contract_address)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_with_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(
contract_name, non_originated_contract_address, force=True
)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
expected_error = f"The contract alias {contract_name} already exists"
with assert_run_failure(expected_error):
client.remember_contract(
contract_name, non_originated_contract_address, force=False
)
# Test operation size.
def test_operation_size_originate_byte_contract(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'bytes.tz')
client.remember('bytes', contract)
client.typecheck(contract)
client.originate(
'bytes', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
client.bake('bootstrap1', BAKE_ARGS)
# Test that operations under 16KB can be injected in the node.
def test_operation_size_small(self, client: Client):
bytes_arg = "0x" + ("00" * 6 * 1024) # 6 KB of data.
client.transfer(10, 'bootstrap1', 'bytes', ['--arg', bytes_arg])
client.bake('bootstrap1', BAKE_ARGS)
# Test that operations between 16KB and 32KB can be injected in the node.
def test_operation_size_medium(self, client: Client):
bytes_arg = "0x" + ("00" * 24 * 1024) # 24 KB of data.
client.transfer(10, 'bootstrap1', 'bytes', ['--arg', bytes_arg])
client.bake('bootstrap1', BAKE_ARGS)
# Test that operations above 32KB fail to be injected.
def test_operation_size_oversized(self, client: Client):
bytes_arg = "0x" + ("00" * 36 * 1024) # 36 KB of data.
expected_error = "Oversized operation"
with assert_run_failure(expected_error):
client.transfer(10, 'bootstrap1', 'bytes', ['--arg', bytes_arg])
# Test operation size with various data types.
def test_operation_size_originate_munch_contract(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'munch.tz')
client.remember('munch', contract)
client.typecheck(contract)
client.originate(
'munch', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
client.bake('bootstrap1', BAKE_ARGS)
# Test that a large operation under 32KB can be injected in the node
# (variant using a lambda with deep nesting).
def test_operation_size_with_lambda_ok(self, client: Client):
# Each pair of braces is encoded on 5 bytes so this takes
# 5 * 6 * 1024 = 30 KB < 32KB
big_arg = ("{" * 6 * 1024) + ("}" * 6 * 1024)
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "lambda"],
)
client.bake('bootstrap1', BAKE_ARGS)
# Test that a large operation over 32KB cannot be injected in the node,
# and the error is not a stack overflow
# (variant using a lambda with deep nesting).
def test_operation_size_with_lambda_fail(self, client: Client):
# Each pair of braces is encoded on 5 bytes so this takes
# 5 * 7 * 1024 = 35 KB > 32KB
big_arg = ("{" * 7 * 1024) + ("}" * 7 * 1024)
expected_error = "Oversized operation"
with assert_run_failure(expected_error):
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "lambda"],
)
# Test that a large operation under 32KB can be injected in the node
# (variant using a long list).
def test_operation_size_with_list_ok(self, client: Client):
# Each element in the list takes 2 bytes so about 30KB in total
big_arg = "{" + ("0; " * 15 * 1024) + "}"
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "list_nat"],
)
client.bake('bootstrap1', BAKE_ARGS)
def test_operation_size_with_list_syntax_error(self, client: Client):
# Each element in the list takes 2 bytes so about 30KB in total
big_arg = "{" + ("0; " * 15 * 1024) + "'foo;'" + "}"
expected_error = "transfer simulation failed"
with assert_run_failure(expected_error):
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "list_nat"],
)
def test_operation_size_with_list_ill_typed(self, client: Client):
# Each element in the list takes 2 bytes so about 30KB in total
big_arg = "{" + ("0; " * 15 * 1024) + "Unit;" + "}"
expected_error = "transfer simulation failed"
with assert_run_failure(expected_error):
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "list_nat"],
)
# Test that a large operation over 32KB cannot be injected in the node,
# and the error is not a stack overflow
# (variant using a long list).
def test_operation_size_with_list_fail(self, client: Client):
# Each element in the list takes 2 bytes so about 34KB in total
big_arg = "{" + ("0; " * 17 * 1024) + "}"
expected_error = "Oversized operation"
with assert_run_failure(expected_error):
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', big_arg, "--entrypoint", "list_nat"],
)
# Test that a large operation under 32KB can be injected in the node
# (variant using a big nat).
def test_operation_size_with_nat_ok(self, client: Client):
# The encoding for nat uses a byte to encode 7 bits of the number
# so the size of 2 ** (7 * n) is about n bytes
big_arg = 2 ** (7 * 30 * 1024)
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', f"{big_arg}", "--entrypoint", "nat"],
)
client.bake('bootstrap1', BAKE_ARGS)
# Test that a large operation over 32KB cannot be injected in the node,
# and the error is not a stack overflow
# (variant using a big nat).
def test_operation_size_with_nat_fail(self, client: Client):
# The encoding for nat uses a byte to encode 7 bits of the number
# so the size of 2 ** (7 * n) is about n bytes
big_arg = 2 ** (7 * 33 * 1024)
expected_error = "Oversized operation"
with assert_run_failure(expected_error):
client.transfer(
10,
'bootstrap1',
'munch',
['--arg', f"{big_arg}", "--entrypoint", "nat"],
)
| 36.956088 | 80 | 0.576344 |
7940ba49d235f001cc224d6d6230a4a4241fad3e | 9,675 | py | Python | tests/torch/sparsity/rb/test_algo.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | tests/torch/sparsity/rb/test_algo.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | tests/torch/sparsity/rb/test_algo.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import torch
from copy import deepcopy
from pytest import approx
from torch import nn
from nncf.api.compression import CompressionStage
from nncf.config import NNCFConfig
from nncf.torch.module_operations import UpdateWeight
from nncf.torch.sparsity.rb.algo import RBSparsityController
from nncf.torch.sparsity.rb.layers import RBSparsifyingWeight
from nncf.torch.sparsity.rb.loss import SparseLoss, SparseLossForPerLayerSparsity
from nncf.common.sparsity.schedulers import PolynomialSparsityScheduler
from tests.torch.helpers import MockModel, BasicConvTestModel, TwoConvTestModel, \
create_compressed_model_and_algo_for_test, check_correct_nncf_modules_replacement, get_empty_config
def get_basic_sparsity_config(input_sample_size=None,
sparsity_init=0.02, sparsity_target=0.5, sparsity_target_epoch=2,
sparsity_freeze_epoch=3):
if input_sample_size is None:
input_sample_size = [1, 1, 4, 4]
config = NNCFConfig()
config.update({
"model": "basic_sparse_conv",
"input_info":
{
"sample_size": input_sample_size,
},
"compression":
{
"algorithm": "rb_sparsity",
"sparsity_init": sparsity_init,
"params":
{
"schedule": "polynomial",
"sparsity_target": sparsity_target,
"sparsity_target_epoch": sparsity_target_epoch,
"sparsity_freeze_epoch": sparsity_freeze_epoch
},
}
})
return config
def test_can_load_sparse_algo__with_defaults():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(deepcopy(model), config)
assert isinstance(compression_ctrl, RBSparsityController)
_, sparse_model_conv = check_correct_nncf_modules_replacement(model, sparse_model)
for sparse_module in sparse_model_conv.values():
store = []
for op in sparse_module.pre_ops.values():
if isinstance(op, UpdateWeight) and isinstance(op.operand, RBSparsifyingWeight):
assert torch.allclose(op.operand.binary_mask, torch.ones_like(sparse_module.weight))
assert not op.operand.frozen
assert op.__class__.__name__ not in store
store.append(op.__class__.__name__)
def test_can_set_sparse_layers_to_loss():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
loss = compression_ctrl.loss
assert isinstance(loss, SparseLoss)
# pylint: disable=protected-access
for layer in loss._sparse_layers:
assert isinstance(layer, RBSparsifyingWeight)
def test_sparse_algo_does_not_replace_not_conv_layer():
class TwoLayersTestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
return self.bn(self.conv(x))
model = TwoLayersTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert isinstance(compression_ctrl, RBSparsityController)
for m in compression_ctrl.sparsified_module_info:
assert isinstance(m.operand, RBSparsifyingWeight)
def test_can_create_sparse_loss_and_scheduler():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
scheduler = compression_ctrl.scheduler
scheduler.epoch_step()
loss = compression_ctrl.loss
assert isinstance(loss, SparseLoss)
assert not loss.disabled
assert loss.target_sparsity_rate == approx(0.02)
assert loss.p == approx(0.05)
assert isinstance(scheduler, PolynomialSparsityScheduler)
assert scheduler.current_sparsity_level == approx(0.02)
assert scheduler.target_level == approx(0.5)
assert scheduler.target_epoch == 2
assert scheduler.freeze_epoch == 3
def test_sparse_algo_can_calc_sparsity_rate__for_basic_model():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
nncf_stats = compression_ctrl.statistics()
sparse_model_stats = nncf_stats.rb_sparsity.model_statistics
assert sparse_model_stats.sparsity_level == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert sparse_model_stats.sparsity_level_for_layers == 1 - model.nz_weights_num / model.weights_num
assert len(compression_ctrl.sparsified_module_info) == 1
def test_sparse_algo_can_collect_sparse_layers():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert len(compression_ctrl.sparsified_module_info) == 2
def test_sparse_algo_can_calc_sparsity_rate__for_2_conv_model():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
nncf_stats = compression_ctrl.statistics()
sparse_model_stats = nncf_stats.rb_sparsity.model_statistics
assert pytest.approx(sparse_model_stats.sparsity_level) == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert sparse_model_stats.sparsity_level_for_layers == 1 - model.nz_weights_num / model.weights_num
def test_scheduler_can_do_epoch_step__with_rb_algo():
config = NNCFConfig()
config['input_info'] = [{"sample_size": [1, 1, 32, 32]}]
config['compression'] = {
'algorithm': 'rb_sparsity',
'sparsity_init': 0.2,
"params": {
'schedule': 'polynomial',
'power': 1,
'sparsity_target_epoch': 2,
'sparsity_target': 0.6,
'sparsity_freeze_epoch': 3
}
}
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
scheduler = compression_ctrl.scheduler
loss = compression_ctrl.loss
assert pytest.approx(loss.target_sparsity_rate) == 0.2
assert not loss.disabled
for module_info in compression_ctrl.sparsified_module_info:
assert not module_info.operand.frozen
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.2
assert pytest.approx(loss().item(), abs=1e-3) == 16
assert not loss.disabled
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.4
assert pytest.approx(loss().item(), abs=1e-3) == 64
assert not loss.disabled
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
assert pytest.approx(loss().item(), abs=1e-3) == 144
assert not loss.disabled
scheduler.epoch_step()
assert loss.disabled
assert loss.target_sparsity_rate == 0.6
assert loss() == 0
for module_info in compression_ctrl.sparsified_module_info:
assert module_info.operand.frozen
def test_create_rb_algo_with_per_layer_loss():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config)
# pylint: disable=protected-access
assert isinstance(compression_ctrl._loss, SparseLossForPerLayerSparsity)
def test_rb_sparsity__can_set_sparsity_level_for_module():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
# pylint: disable=protected-access
assert list(compression_ctrl._loss.per_layer_target.values())[0] == 1
compression_ctrl.set_sparsity_level(0.7, compression_ctrl.sparsified_module_info[0])
assert list(compression_ctrl._loss.per_layer_target.values())[0] == pytest.approx(0.3)
def test_create_rb_algo_with_local_sparsity_mode():
config = get_empty_config()
config['compression'] = {'algorithm': 'rb_sparsity', "params": {"sparsity_level_setting_mode": 'local'}}
_, compression_ctrl = create_compressed_model_and_algo_for_test(MockModel(), config)
assert compression_ctrl.compression_stage() == CompressionStage.FULLY_COMPRESSED
def test_can_set_compression_rate_for_rb_sparsity_algo():
config = get_basic_sparsity_config()
_, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)
compression_ctrl.compression_rate = 0.65
assert pytest.approx(compression_ctrl.compression_rate, 1e-2) == 0.65
| 38.855422 | 108 | 0.725995 |
Subsets and Splits