content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
import os
from aws_cdk import core as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
from aws_securityhub_falco_ecs_eks_integration.aws_securityhub_falco_ecs_eks_integration_stack import AwsSecurityhubFalcoEcsEksIntegrationStack
app = core.App()
AwsSecurityhubFalcoEcsEksIntegrationStack(app, "AwsSecurityhubFalcoEcsEksIntegrationStack",
# If you don't specify 'env', this stack will be environment-agnostic.
# Account/Region-dependent features and context lookups will not work,
# but a single synthesized template can be deployed anywhere.
# Uncomment the next line to specialize this stack for the AWS Account
# and Region that are implied by the current CLI configuration.
#env=core.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')),
# Uncomment the next line if you know exactly what Account and Region you
# want to deploy the stack to. */
#env=core.Environment(account='123456789012', region='us-east-1'),
# For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html
)
app.synth()
| 41.028571 | 143 | 0.769499 | [
"MIT-0"
] | aws-samples/aws-securityhub-falco-ecs-eks-integration | app.py | 1,436 | Python |
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
from tests import conftest
from core.test_run import TestRun
from api.cas import git
from api.cas import cas_module
from test_utils import os_utils
from test_utils.output import CmdException
def rsync_opencas_sources():
TestRun.LOGGER.info("Copying Open CAS repository to DUT")
TestRun.executor.rsync_to(
f"{TestRun.usr.repo_dir}/",
f"{TestRun.usr.working_dir}/",
exclude_list=["test/functional/results/"],
delete=True)
def _clean_opencas_repo():
TestRun.LOGGER.info("Cleaning Open CAS repo")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"make distclean")
if output.exit_code != 0:
raise CmdException("make distclean command executed with nonzero status", output)
def build_opencas():
TestRun.LOGGER.info("Building Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"./configure && "
"make -j")
if output.exit_code != 0:
raise CmdException("Make command executed with nonzero status", output)
def install_opencas():
TestRun.LOGGER.info("Installing Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make install")
if output.exit_code != 0:
raise CmdException("Error while installing Open CAS", output)
TestRun.LOGGER.info("Check if casadm is properly installed.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("'casadm -V' command returned an error", output)
else:
TestRun.LOGGER.info(output.stdout)
def set_up_opencas(version=None):
_clean_opencas_repo()
if version:
git.checkout_cas_version(version)
build_opencas()
install_opencas()
def uninstall_opencas():
TestRun.LOGGER.info("Uninstalling Open CAS")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("Open CAS is not properly installed", output)
else:
TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make uninstall")
if output.exit_code != 0:
raise CmdException("There was an error during uninstall process", output)
def reinstall_opencas(version=None):
if check_if_installed():
uninstall_opencas()
set_up_opencas(version)
def check_if_installed():
TestRun.LOGGER.info("Check if Open-CAS-Linux is installed")
output = TestRun.executor.run("which casadm")
modules_loaded = os_utils.is_kernel_module_loaded(cas_module.CasModule.cache.value)
if output.exit_code == 0 and modules_loaded:
TestRun.LOGGER.info("CAS is installed")
return True
TestRun.LOGGER.info("CAS not installed")
return False
| 28.303922 | 89 | 0.68133 | [
"BSD-3-Clause"
] | ArkadiuszNeumann/open-cas-linux | test/functional/api/cas/installer.py | 2,887 | Python |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
from tqdm import tqdm
from pyserini.dsearch import SimpleDenseSearcher
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.output_writer import get_output_writer, OutputFormat
from pyserini.search import ImpactSearcher, SimpleSearcher
from pyserini.hsearch import HybridSearcher
from pyserini.dsearch.__main__ import define_dsearch_args, init_query_encoder
from pyserini.search.__main__ import define_search_args, set_bm25_parameters
# Fixes this error: "OMP: Error #15: Initializing libomp.a, but found libomp.dylib already initialized."
# https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def define_fusion_args(parser):
parser.add_argument('--alpha', type=float, metavar='num', required=False, default=0.1,
help="alpha for hybrid search")
parser.add_argument('--hits', type=int, required=False, default=10, help='number of hits from dense and sparse')
parser.add_argument('--normalization', action='store_true', required=False, help='hybrid score with normalization')
parser.add_argument('--weight-on-dense', action='store_true', required=False, help='weight on dense part')
def parse_args(parser, commands):
# Divide argv by commands
split_argv = [[]]
for c in sys.argv[1:]:
if c in commands.choices:
split_argv.append([c])
else:
split_argv[-1].append(c)
# Initialize namespace
args = argparse.Namespace()
for c in commands.choices:
setattr(args, c, None)
# Parse each command
parser.parse_args(split_argv[0], namespace=args) # Without command
for argv in split_argv[1:]: # Commands
n = argparse.Namespace()
setattr(args, argv[0], n)
parser.parse_args(argv, namespace=n)
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conduct a hybrid search on dense+sparse indexes.')
commands = parser.add_subparsers(title='sub-commands')
dense_parser = commands.add_parser('dense')
define_dsearch_args(dense_parser)
sparse_parser = commands.add_parser('sparse')
define_search_args(sparse_parser)
fusion_parser = commands.add_parser('fusion')
define_fusion_args(fusion_parser)
run_parser = commands.add_parser('run')
run_parser.add_argument('--topics', type=str, metavar='topic_name', required=False,
help="Name of topics. Available: msmarco-passage-dev-subset.")
run_parser.add_argument('--hits', type=int, metavar='num', required=False, default=1000, help="Number of hits.")
run_parser.add_argument('--topics-format', type=str, metavar='format', default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}")
run_parser.add_argument('--output-format', type=str, metavar='format', default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}")
run_parser.add_argument('--output', type=str, metavar='path', required=False, help="Path to output file.")
run_parser.add_argument('--max-passage', action='store_true',
default=False, help="Select only max passage from document.")
run_parser.add_argument('--max-passage-hits', type=int, metavar='num', required=False, default=100,
help="Final number of hits when selecting only max passage.")
run_parser.add_argument('--max-passage-delimiter', type=str, metavar='str', required=False, default='#',
help="Delimiter between docid and passage id.")
run_parser.add_argument('--batch-size', type=int, metavar='num', required=False,
default=1, help="Specify batch size to search the collection concurrently.")
run_parser.add_argument('--threads', type=int, metavar='num', required=False,
default=1, help="Maximum number of threads to use.")
args = parse_args(parser, commands)
query_iterator = get_query_iterator(args.run.topics, TopicsFormat(args.run.topics_format))
topics = query_iterator.topics
query_encoder = init_query_encoder(args.dense.encoder,
args.dense.tokenizer,
args.run.topics,
args.dense.encoded_queries,
args.dense.device,
args.dense.query_prefix)
if os.path.exists(args.dense.index):
# create searcher from index directory
dsearcher = SimpleDenseSearcher(args.dense.index, query_encoder)
else:
# create searcher from prebuilt index name
dsearcher = SimpleDenseSearcher.from_prebuilt_index(args.dense.index, query_encoder)
if not dsearcher:
exit()
if os.path.exists(args.sparse.index):
# create searcher from index directory
if args.sparse.impact:
ssearcher = ImpactSearcher(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher(args.sparse.index)
else:
# create searcher from prebuilt index name
if args.sparse.impact:
ssearcher = ImpactSearcher.from_prebuilt_index(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher.from_prebuilt_index(args.sparse.index)
if not ssearcher:
exit()
set_bm25_parameters(ssearcher, args.sparse.index, args.sparse.k1, args.sparse.b)
if args.sparse.language != 'en':
ssearcher.set_language(args.sparse.language)
hsearcher = HybridSearcher(dsearcher, ssearcher)
if not hsearcher:
exit()
# build output path
output_path = args.run.output
print(f'Running {args.run.topics} topics, saving to {output_path}...')
tag = 'hybrid'
output_writer = get_output_writer(output_path, OutputFormat(args.run.output_format), 'w',
max_hits=args.run.hits, tag=tag, topics=topics,
use_max_passage=args.run.max_passage,
max_passage_delimiter=args.run.max_passage_delimiter,
max_passage_hits=args.run.max_passage_hits)
with output_writer:
batch_topics = list()
batch_topic_ids = list()
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
if args.run.batch_size <= 1 and args.run.threads <= 1:
hits = hsearcher.search(text, args.fusion.hits, args.run.hits, args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(topic_id, hits)]
else:
batch_topic_ids.append(str(topic_id))
batch_topics.append(text)
if (index + 1) % args.run.batch_size == 0 or \
index == len(topics.keys()) - 1:
results = hsearcher.batch_search(
batch_topics, batch_topic_ids, args.fusion.hits, args.run.hits, args.run.threads,
args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(id_, results[id_]) for id_ in batch_topic_ids]
batch_topic_ids.clear()
batch_topics.clear()
else:
continue
for topic, hits in results:
output_writer.write(topic, hits)
results.clear()
| 45.691892 | 153 | 0.655507 | [
"Apache-2.0"
] | ArthurChen189/pyserini | pyserini/hsearch/__main__.py | 8,453 | Python |
import tensorflow_federated as tff
def download_and_save_stackoverflow():
tff.simulation.datasets.stackoverflow.load_data(cache_dir='./')
def download_and_save_word_counts():
tff.simulation.datasets.stackoverflow.load_word_counts(cache_dir='./')
def download_and_save_tag_counts():
tff.simulation.datasets.stackoverflow.load_tag_counts(cache_dir='./')
"""
#with Tensorflow dependencies, you can run this python script to process the data from Tensorflow Federated locally:
python dataset.py
Before downloading, please install TFF as its official instruction:
pip install --upgrade tensorflow_federated
"""
if __name__ == "__main__":
download_and_save_stackoverflow()
download_and_save_word_counts()
download_and_save_tag_counts()
| 29.307692 | 116 | 0.799213 | [
"Apache-2.0"
] | 11asdad/FedML | data/stackoverflow/dataset.py | 762 | Python |
from django.conf import settings
from django.urls import re_path
from django.http import HttpResponsePermanentRedirect as perma_redirect
from django.urls import reverse
from django.views.generic.base import TemplateView
urlpatterns = [
re_path(
r'^about$',
TemplateView.as_view(template_name='pages/about.lhtml'),
name='pages.about',
),
re_path(
r'^google1f3e37b7351799a5\.html$',
TemplateView.as_view(template_name='pages/google_webmaster_verification.html'),
),
re_path(
r'^google231a41e803e464e9\.html$',
TemplateView.as_view(template_name='pages/google_search_console.html'),
),
re_path(
r'^review_guide$',
TemplateView.as_view(template_name='pages/review_guide.html'),
name='pages.review_guide',
),
re_path(
r'^shield-study-2/',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^shield_study_\d{1,2}$',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^pages/review_guide$',
lambda req: perma_redirect(reverse('pages.review_guide')),
),
re_path(
r'^pages/developer_agreement$',
lambda req: perma_redirect(reverse('devhub.docs', args=['policies/agreement'])),
),
re_path(
r'^pages/validation$', lambda req: perma_redirect(settings.VALIDATION_FAQ_URL)
),
re_path(
r'^pioneer$',
TemplateView.as_view(template_name='pages/pioneer.html'),
name='pages.pioneer',
),
]
| 30.461538 | 88 | 0.659091 | [
"BSD-3-Clause"
] | Exhorder6/addons-server | src/olympia/pages/urls.py | 1,584 | Python |
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import GroupTagValue, TagKey, TagValue
from sentry.testutils import TestCase
class GroupTagExportTest(TestCase):
def test_simple(self):
key, value = 'foo', 'bar'
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project()
group = self.create_group(project=project)
TagKey.objects.create(project=project, key=key)
TagValue.objects.create(
project=project,
key=key,
value=value,
)
group_tag_value = GroupTagValue.objects.create(
project=project,
group=group,
key=key,
value=value,
times_seen=1,
first_seen=now - timedelta(hours=1),
last_seen=now,
)
self.login_as(user=self.user)
url = '/{}/{}/issues/{}/tags/{}/export/'.format(
project.organization.slug, project.slug, group.id, key
)
response = self.client.get(url)
assert response.status_code == 200
assert response.streaming
assert response['Content-Type'] == 'text/csv'
rows = list(response.streaming_content)
for idx, row in enumerate(rows):
row = row.decode('utf-8')
assert row.endswith(u'\r\n')
bits = row[:-2].split(',')
if idx == 0:
assert bits == ['value', 'times_seen', 'last_seen', 'first_seen']
else:
assert bits[0] == value
assert bits[1] == '1'
assert bits[2] == group_tag_value.last_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
assert bits[3] == group_tag_value.first_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
| 32.362069 | 94 | 0.571124 | [
"BSD-3-Clause"
] | GSA/sentry | tests/sentry/web/frontend/test_group_tag_export.py | 1,877 | Python |
ifconfig -a | grep PROMISC
cat /var/log/messages |grep promisc
1 #!/usr/bin/python 2 3 import sys 4 from scapy.all import promiscping 5 6 if len(sys.argv) < 2: 7 print sys.argv[0] + " <net>" 8 sys.exit() 9 10 promiscping(sys.argv[1]) | 47.4 | 171 | 0.691983 | [
"MIT"
] | mumbo-pro/network-penetration | 05_tcp_ip_tricks/Sniffer Detection.py | 237 | Python |
# -*- coding: utf-8 -*-
from functools import cache
INPUT = 33100000
def sigma_pentagonal_numbers(limit):
"""
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
"""
n = 1
p = 1
while p <= limit:
yield p
if n > 0:
n = -n
else:
n = -n + 1
p = (3 * n * n - n) // 2
def sigma_sign_generator():
while True:
yield 1
yield 1
yield -1
yield -1
@cache
def presents_for_house(house):
"""
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
"""
if house == 1:
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = house - p
if n == 0:
presents += house * next(sign) * 10
else:
presents += presents_for_house(n) * next(sign)
return presents
def part1(data):
"""
# Takes too long so commented out
# >>> part1(INPUT)
# 776160
"""
house = 0
presents = 0
max = 0
while presents < data:
house += 1
presents = presents_for_house(house)
if presents > max:
max = presents
print(max)
return house
def part2(data):
"""
>>> part2(INPUT)
786240
"""
upper_limit = INPUT
house = [0] * (upper_limit + 1)
elf = 1
while elf <= upper_limit:
elf_end = min(elf * 50, upper_limit)
for number in range(elf, elf_end + 1, elf):
index = number - 1
house[index] += 11 * elf
if house[index] >= data:
upper_limit = min(number, upper_limit)
elf += 1
for i, value in enumerate(house):
if value >= data:
return i + 1
raise ValueError()
def main():
print(part1(INPUT))
print(part2(INPUT))
if __name__ == "__main__":
main()
| 16.076923 | 58 | 0.507177 | [
"Unlicense"
] | davweb/advent-of-code | advent/year2015/day20.py | 2,091 | Python |
import datetime
import functools
import os
import subprocess
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| 29.9125 | 79 | 0.64814 | [
"MIT"
] | ch1huizong/Scode | django-src/utils/version.py | 2,393 | Python |
import django_filters
from netaddr.core import AddrFormatError
from django.db.models import Q
from extras.filters import CustomFieldFilterSet
from tenancy.models import Tenant
from utilities.filters import NullableModelMultipleChoiceFilter
from .models import (
ConsolePort, ConsoleServerPort, Device, DeviceRole, DeviceType, Interface, InterfaceConnection, Manufacturer,
Platform, PowerOutlet, PowerPort, Rack, RackGroup, RackRole, Site,
)
class SiteFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = Site
fields = ['q', 'name', 'facility', 'asn']
def search(self, queryset, value):
qs_filter = Q(name__icontains=value) | Q(facility__icontains=value) | Q(physical_address__icontains=value) | \
Q(shipping_address__icontains=value) | Q(comments__icontains=value)
try:
qs_filter |= Q(asn=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class RackGroupFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = RackGroup
class RackFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
group_id = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
label='Group (ID)',
)
group = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
to_field_name='slug',
label='Group',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
role_id = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
label='Role (ID)',
)
role = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
class Meta:
model = Rack
fields = ['u_height']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(facility_id__icontains=value) |
Q(comments__icontains=value)
)
class DeviceTypeFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
class Meta:
model = DeviceType
fields = ['model', 'part_number', 'u_height', 'is_console_server', 'is_pdu', 'is_network_device',
'subdevice_role']
def search(self, queryset, value):
return queryset.filter(
Q(manufacturer__name__icontains=value) |
Q(model__icontains=value) |
Q(part_number__icontains=value) |
Q(comments__icontains=value)
)
class DeviceFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
mac_address = django_filters.MethodFilter(
action='_mac_address',
label='MAC address',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='rack__site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='rack__site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site name (slug)',
)
rack_group_id = django_filters.ModelMultipleChoiceFilter(
name='rack__group',
queryset=RackGroup.objects.all(),
label='Rack group (ID)',
)
rack_id = django_filters.ModelMultipleChoiceFilter(
name='rack',
queryset=Rack.objects.all(),
label='Rack (ID)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
name='device_role',
queryset=DeviceRole.objects.all(),
label='Role (ID)',
)
role = django_filters.ModelMultipleChoiceFilter(
name='device_role__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
device_type_id = django_filters.ModelMultipleChoiceFilter(
name='device_type',
queryset=DeviceType.objects.all(),
label='Device type (ID)',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
model = django_filters.ModelMultipleChoiceFilter(
name='device_type__slug',
queryset=DeviceType.objects.all(),
to_field_name='slug',
label='Device model (slug)',
)
platform_id = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
label='Platform (ID)',
)
platform = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
status = django_filters.BooleanFilter(
name='status',
label='Status',
)
is_console_server = django_filters.BooleanFilter(
name='device_type__is_console_server',
label='Is a console server',
)
is_pdu = django_filters.BooleanFilter(
name='device_type__is_pdu',
label='Is a PDU',
)
is_network_device = django_filters.BooleanFilter(
name='device_type__is_network_device',
label='Is a network device',
)
class Meta:
model = Device
fields = ['name', 'serial', 'asset_tag']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(serial__icontains=value.strip()) |
Q(modules__serial__icontains=value.strip()) |
Q(asset_tag=value.strip()) |
Q(comments__icontains=value)
).distinct()
def _mac_address(self, queryset, value):
value = value.strip()
if not value:
return queryset
try:
return queryset.filter(interfaces__mac_address=value).distinct()
except AddrFormatError:
return queryset.none()
class ConsolePortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsolePort
fields = ['name']
class ConsoleServerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsoleServerPort
fields = ['name']
class PowerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerPort
fields = ['name']
class PowerOutletFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerOutlet
fields = ['name']
class InterfaceFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = Interface
fields = ['name']
class ConsoleConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = ConsoleServerPort
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(cs_port__device__rack__site__slug=value)
class PowerConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = PowerOutlet
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(power_outlet__device__rack__site__slug=value)
class InterfaceConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = InterfaceConnection
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(interface_a__device__rack__site__slug=value) |
Q(interface_b__device__rack__site__slug=value)
)
| 28.95 | 118 | 0.628999 | [
"Apache-2.0"
] | BILDQUADRAT/netbox | netbox/dcim/filters.py | 12,159 | Python |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
###########################################################################
#
# Copyright (c) 2018 www.codingchen.com, Inc. All Rights Reserved
#
##########################################################################
'''
@brief leetcode algorithm
@author chenhui([email protected])
@date 2018/11/07 21:30:33
'''
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
odd = (len(nums1) + len(nums2)) % 2
if odd:
half = (len(nums1) + len(nums2)) // 2
else:
half = (len(nums1) + len(nums2)) // 2 - 1
for _ in range(half):
__ = self.pop_num(nums1, nums2)
if odd:
return float(self.pop_num(nums1, nums2))
else:
t1 = self.pop_num(nums1, nums2)
t2 = self.pop_num(nums1, nums2)
return (t1 + t2) / 2
def pop_num(self, nums1, nums2):
if len(nums1) == 0:
return nums2.pop(0)
elif len(nums2) == 0:
return nums1.pop(0)
elif nums1[0] > nums2[0]:
return nums2.pop(0)
elif nums1[0] <= nums2[0]:
return nums1.pop(0)
if __name__ == '__main__':
s = Solution()
nums1 = [1, 2]
nums2 = [3, 4]
print(s.findMedianSortedArrays(nums1, nums2)) | 29.163265 | 75 | 0.46746 | [
"MIT"
] | xiaoh12/leetcode | 4+Median+of+Two+Sorted+Arrays/alg.py | 1,429 | Python |
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VMRC console drivers."""
import base64
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
vmrc_opts = [
cfg.IntOpt('console_vmrc_port',
default=443,
help="Port for VMware VMRC connections"),
cfg.IntOpt('console_vmrc_error_retries',
default=10,
help="Number of retries for retrieving VMRC information"),
]
CONF = cfg.CONF
CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
"""VMRC console driver with ESX credentials."""
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
"""Get available port for consoles."""
return CONF.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
pass
def teardown_console(self, context, console):
"""Tears down console."""
pass
def init_host(self):
"""Perform console initialization."""
pass
def fix_pool_password(self, password):
"""Encode password."""
# TODO(sateesh): Encrypt pool password
return password
def generate_password(self, vim_session, pool, instance_name):
"""Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
"""
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return False
class VMRCSessionConsole(VMRCConsole):
"""VMRC console driver with VMRC One Time Sessions."""
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
"""Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return True
| 32.309859 | 79 | 0.607672 | [
"Apache-2.0"
] | ONOP/nova | nova/console/vmrc.py | 4,588 | Python |
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "res.settings")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 34.956522 | 74 | 0.767413 | [
"Apache-2.0"
] | onap/vfc-gvnfm-vnfres | res/manage.py | 804 | Python |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import numba
import numpy as np
import argparse
import time
@numba.njit()
def linear_regression(Y, X, w, iterations, alphaN):
for i in range(iterations):
w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)
return w
def main():
parser = argparse.ArgumentParser(description='Linear Regression.')
parser.add_argument('--samples', dest='samples', type=int, default=200000)
parser.add_argument('--features', dest='features', type=int, default=10)
parser.add_argument('--functions', dest='functions', type=int, default=4)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
N = args.samples
D = args.features
p = args.functions
iterations = args.iterations
alphaN = 0.01/N
w = np.zeros((D,p))
np.random.seed(0)
points = np.random.random((N,D))
labels = np.random.random((N,p))
t1 = time.time()
w = linear_regression(labels, points, w, iterations, alphaN)
selftimed = time.time()-t1
print("SELFTIMED ", selftimed)
print("checksum: ", np.sum(w))
if __name__ == '__main__':
main()
| 29.268293 | 80 | 0.661667 | [
"BSD-2-Clause"
] | ARF1/numba | examples/linear_regression/linear_regression_numba.py | 1,200 | Python |
from interface import *
class M2:
interface = Interface()
def __init__(self, tamanhoDaLista, tempoDeAtraso, charPixel = ' '):
self.guardarNumero = 0
self.interface.set_tamanhoDaLista(tamanhoDaLista)
self.interface.set_tempoDeAtraso(tempoDeAtraso)
self.interface.set_charPixel(charPixel)
def Maneira2(self):
self.guardarNumero
for c in range(len(self.interface.lista)):
for i in range(len(self.interface.lista)):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i+1)
for i in reversed(range(len(self.interface.lista))):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i)
| 37.560976 | 83 | 0.518831 | [
"MIT"
] | jonasht/Python | 07-programasDeAlgoritmo/1-programaDeAlgoritmoTipo1/0versoesAntigas/9-programa/algoritmo2.py | 1,540 | Python |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import re
import sphinx_rtd_theme
import subprocess as sp
# -- Project information -----------------------------------------------------
project = 'CubismNova'
copyright = 'ETH Zurich'
author = 'Fabian Wermelinger'
sp.run('(cd .. && doxygen)', shell=True) # compile the xml source
v = str(sp.check_output('git describe --abbrev=0', shell=True)) # get version
# The short X.Y version
version = '.'.join(v.split('.')[:2])
# The full version, including alpha/beta/rc tags
release = v
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# breathe extension
breathe_default_project = "CubismNova"
breathe_projects = {
"CubismNova": "../doxygen/xml"
}
breathe_domain_by_extension = { "h" : "cpp", "cu" : "cpp" }
cpp_id_attributes = ['__device__', '__global__', '__host__']
cpp_paren_attributes = ['__align__']
# Tell sphinx what the primary language being documented is
primary_domain = 'cpp'
# Tell sphinx what the pygments highlight language should be
highlight_language = 'cpp'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = "CubismNova Documentation"
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CubismNovadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CubismNova.tex', 'CubismNova Documentation',
'Fabian Wermelinger', 'manual'),
]
# BibTeX files
bibtex_bibfiles = ['bibtex/references.bib']
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cubismnova', 'CubismNova Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CubismNova', 'CubismNova Documentation',
author, 'CubismNova', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.034934 | 79 | 0.664728 | [
"BSD-2-Clause"
] | cselab/CubismNova | docs/source/conf.py | 6,878 | Python |
# coding: utf-8
"""
ProcessMaker API
This ProcessMaker I/O API provides access to a BPMN 2.0 compliant workflow engine api that is designed to be used as a microservice to support enterprise cloud applications. The current Alpha 1.0 version supports most of the descriptive class of the BPMN 2.0 specification.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ProcessMaker_PMIO
from ProcessMaker_PMIO.rest import ApiException
from ProcessMaker_PMIO.models.input_output import InputOutput
class TestInputOutput(unittest.TestCase):
""" InputOutput unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInputOutput(self):
"""
Test InputOutput
"""
model = ProcessMaker_PMIO.models.input_output.InputOutput()
if __name__ == '__main__':
unittest.main()
| 29.537037 | 278 | 0.731661 | [
"Apache-2.0"
] | ProcessMaker/pmio-sdk-python | test/test_input_output.py | 1,599 | Python |
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.graphics import *
from kivy.graphics.texture import Texture
from kivy.properties import ListProperty
from .gardenGraph import Plot
from .ExdLabel import *
import math
class PolarGraph(Widget):
def __init__(self, radial_tick=4, linear_tick=10, scale=10, **kwargs):
self.bind(pos=self.draw,
size=self.draw)
self.tick_color= [0.51, 0.51, 0.51, 1]
self.plots= []
self.nb_radial_tick= radial_tick
self.nb_linear_tick= linear_tick
self.scale= scale
super(PolarGraph, self).__init__(**kwargs)
self.ratio= 1
if self.size_hint[0] != None:
self.ratio= self.size_hint[0]
def draw(self, *args):
self.canvas.clear()
if hasattr(self, "parent"):
self.ratio= min(self.parent.size_hint_x, self.parent.size_hint_y)
self.dim= min(self.width, self.height)
self.update_ticks(*args)
self.update_plots(*args)
def update_ticks(self, *args):
with self.canvas:
Color(*self.tick_color)
for i in range(1,self.nb_radial_tick+1):
Line(circle=(self.center_x,
self.center_y,
self.ratio*i*(self.height/self.nb_radial_tick)/2))
for i in range(1,self.nb_linear_tick+1):
tick_len = self.dim*self.ratio*.5
Line(points=[self.center_x-tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y-tick_len*math.sin(i*(3.14/self.nb_linear_tick)),
self.center_x+tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y+tick_len*math.sin(i*(3.14/self.nb_linear_tick))],
width=1)
def add_plot(self, plot):
if plot in self.plots:
return
plot.bind(on_clear_plot=self.draw)
self.update_plots()
self.plots.append(plot)
def update_plots(self, *args):
for plot in self.plots:
with self.canvas:
Color(plot.color)
for pt in plot.points:
t= pt[0]
a= math.radians(pt[1][0])
m= pt[1][1]
x= self.center_x + math.cos(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
y= self.center_y + math.sin(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
Rectangle(pos=(x,y), size=(2,2))
class polarPlot(Plot):
pass
| 31.372093 | 95 | 0.569311 | [
"Apache-2.0"
] | RenatoTorres/Exode | Exode/UI/polarGraph.py | 2,698 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution):
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
with_distribution.__class__.__name__ != 'TPUStrategy')
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': 1,
'shuffle': False,
}
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
# TODO(b/119318587): We should not require batch_size when distribution
# is enabled.
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (
len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {
'batch_size': predict_batch_size,
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': 1,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'batch_size': None,
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies = [combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=['graph'])
def strategy_combinations():
return combinations.combine(
distribution=strategies,
mode=['graph'])
def strategy_and_optimizer_combinations():
return combinations.combine(
distribution=strategies,
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn],
mode=['graph'])
def strategy_and_inputs():
return combinations.combine(
distribution=strategies,
use_numpy=[True, False],
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
self._dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
def test_train_functional_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist,
eval_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_train_sequential_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=self._dist)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_creating_var_with_numpy_arrays(self, distribution):
with self.cached_session():
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
var_x = distributed_training_utils.get_var_for_numpy(distribution, x)
val = self.evaluate(var_x.value())
# Verify that the numpy value is copied to the variable.
self.assertAllEqual(x, val)
def test_calculating_batch_params(self):
# This verifies that we calculate the number of steps when the batch size
# is specified.
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
# The number of replicas is equal to 3.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0',
'/device:GPU:1'])
with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '
'that is smaller than'):
# The batch size(128) is larger than the number of input
# samples(64).
distributed_training_utils.get_input_batch_params(inputs,
128,
strategy)
with self.assertRaisesRegexp(ValueError, 'is smaller than the number '
'of replicas'):
# The batch size(32) * num_replicas_in_sync(3) is 96 which is greater
# than the number of input samples(64).
distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of replicas now is equal to 2.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
# 32 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(32) which is 2. The number of steps(1) is the ratio of
# number of batches(2) to the number of replicas(2).
self.assertEqual(steps, 1)
# 16 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
16,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(16) which is 4. The number of steps(2) is the ratio of
# number of batches(4) to the number of replicas(2).
self.assertEqual(steps, 2)
def test_calculating_batch_size(self):
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
strategy._require_static_shapes = True
model.compile(optimizer, loss, distribute=strategy)
iterator = model._distribution_standardize_user_data(inputs,
targets,
batch_size=None,
check_steps=True,
steps_name='steps',
steps=3)
# The global batch size(21) across all replicas is the ratio of the input
# samples(64) to the steps(3).
# The batch size(10) per device is the ratio of the global batch size(21)
# to the number of replicas(2).
# The global batch size and batch size are rounded integer values.
self.assertEqual(10, distributed_training_utils.get_batch_dimension(
iterator._iterator))
@combinations.generate(strategy_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_minus_tpu_combinations())
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertEqual(2, len(outs))
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
def test_fit_with_tuple_and_dict_dataset_inputs(self):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, distribute=strategy)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
def test_learning_phase_value(self):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
strategy = mirrored_strategy.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(8)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(5)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps and in each step you process 10 samples.
ref_output = np.ones((100, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
def test_validating_dataset_input_tensors_with_shape_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_unsupported_features(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
def test_calling_with_unsupported_predefined_callbacks(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'LearningRateScheduler callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'ReduceLROnPlateau callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
with self.assertRaisesRegexp(ValueError,
'histogram_freq in the TensorBoard callback '
'is not supported when using '
'DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])
class TestDistributionStrategyWithLossMasking(test.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
def test_masking(self):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=strategy)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0])
@combinations.generate(strategy_and_inputs())
def test_correctness(self, distribution, use_numpy):
with self.cached_session():
tolerance = 1e-5
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# TODO(b/119257215): use the default one once the flakyness is fixed.
tolerance = 1e-4
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
initial_weights = model.get_weights()
def fit_and_predict(with_distribution=None):
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict))
model.fit(**training_inputs)
eval_result = model.evaluate(**eval_inputs)
weights = model.get_weights()
predict_result = model.predict(**predict_inputs)
return weights, eval_result, predict_result
wts_with_ds, eval_with_ds, predict_with_ds = fit_and_predict(
with_distribution=distribution)
wts_without_ds, eval_without_ds, predict_without_ds = fit_and_predict(
with_distribution=None)
# Verify that the weights, eval results, predict outputs are the same
# within some limits of tolerance.
self.assertAllClose(
wts_with_ds, wts_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
eval_with_ds, eval_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
predict_with_ds, predict_without_ds, atol=tolerance, rtol=tolerance)
# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.
if __name__ == '__main__':
test.main()
| 40.847128 | 80 | 0.657507 | [
"Apache-2.0"
] | unnir/tensorflow | tensorflow/contrib/distribute/python/keras_test.py | 48,363 | Python |
#!/bin/python
# -*- coding: utf-8 -*-
# import numpy as np
# define additional functions used in the *.yaml.
# Of course, as this is a trivial function you could have defined it in the *.yaml directly
def calc_nu(nub):
nu = nub / (1 - nub)
return nu
| 20.153846 | 91 | 0.656489 | [
"MIT"
] | OpenSourceEconomics/ose-scientific-computing-course-pytholisks | pydsge/examples/dfi_funcs.py | 262 | Python |
import numpy as np
# fmt: off
clon, clat = 0.0, 0.0
rad_km = 800.0
area_km2 = np.pi*rad_km**2
nlat, nlon = 17, 17
lat1d = np.linspace(-8.0, 8.0, nlat)
lon1d = np.linspace(-8.0, 8.0, nlon)
lat2d, lon2d = np.meshgrid(lat1d, lon1d)
_, X = 0, 1
mask = np.array([
[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],
[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],
[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],
[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],
[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_],
[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],
[_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_],
[_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_],
[_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_],
[_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_],
], np.bool).T[:, ::-1]
| 26.911765 | 40 | 0.504918 | [
"MIT"
] | ruestefa/stormtrack | tests/test_stormtrack/test_core/test_features/data/circle_on_globe_clat-00_rad-800_delta-1.0_pyproj.py | 915 | Python |
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
| 46.429612 | 115 | 0.603429 | [
"Apache-2.0"
] | Anusha-G-Rao/kaldi | egs/wsj/s5/utils/lang/make_lexicon_fst.py | 19,129 | Python |
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
| 34.040506 | 93 | 0.622564 | [
"Apache-2.0"
] | hzdwang/falcon-1 | falcon/inspect.py | 26,938 | Python |
# -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import configparser
import datetime
import hashlib
import os
import shutil
import tempfile
import unittest
import medusa.storage.abstract_storage
from medusa.backup import generate_md5_hash
from medusa.config import MedusaConfig, StorageConfig, _namedtuple_from_dict, CassandraConfig
from medusa.index import build_indices
from medusa.storage import Storage
class RestoreNodeTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.local_storage_dir = "/tmp/medusa_local_storage"
self.medusa_bucket_dir = "/tmp/medusa_test_bucket"
def setUp(self):
if os.path.isdir(self.local_storage_dir):
shutil.rmtree(self.local_storage_dir)
if os.path.isdir(self.medusa_bucket_dir):
shutil.rmtree(self.medusa_bucket_dir)
os.makedirs(self.local_storage_dir)
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {
'host_file_separator': ',',
'bucket_name': 'medusa_test_bucket',
'key_file': '',
'storage_provider': 'local',
'prefix': '',
'fqdn': '127.0.0.1',
'api_key_or_username': '',
'api_secret_or_password': '',
'base_path': '/tmp'
}
config['cassandra'] = {
'is_ccm': 1
}
self.config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config['storage']),
cassandra=_namedtuple_from_dict(CassandraConfig, config['cassandra']),
monitoring={},
ssh=None,
restore=None
)
self.storage = Storage(config=self.config.storage)
def test_add_object_from_string(self):
file_content = "content of the test file"
self.storage.storage_driver.upload_blob_from_string("test1/file.txt", file_content)
self.assertEqual(self.storage.storage_driver.get_blob_content_as_string("test1/file.txt"), file_content)
def test_download_blobs(self):
files_to_download = list()
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
files_to_download.append("test_download_blobs1/file1.txt")
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
files_to_download.append("test_download_blobs2/file2.txt")
self.assertEqual(len(os.listdir(self.medusa_bucket_dir)), 2)
self.storage.storage_driver.download_blobs(files_to_download, self.local_storage_dir)
self.assertEqual(len(os.listdir(self.local_storage_dir)), 2)
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
self.assertEqual(len(one_object), 1)
def test_read_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
objects = self.storage.storage_driver.list_objects("test_download_blobs1")
object_content = self.storage.storage_driver.read_blob_as_string(objects[0])
self.assertEqual(object_content, file1_content)
def test_get_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(obj.name, "test_download_blobs1/file1.txt")
def test_read_blob_as_bytes(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
object_content = self.storage.storage_driver.get_blob_content_as_bytes("test_download_blobs1/file1.txt")
self.assertEqual(object_content, b"content of the test file1")
def test_verify_hash(self):
file1_content = "content of the test file1"
manifest = self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(manifest.MD5, obj.hash)
def test_hashes_match(self):
# Should match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQ=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should match
hash1 = "4b510033f05532a85b267014b3f9d695"
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should not match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQsdfsdf=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertFalse(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
def test_generate_md5_hash(self):
with tempfile.NamedTemporaryFile() as tf:
# write random bytes
two_megabytes = 2 * 1024 * 1024
tf.write(os.urandom(two_megabytes))
tf.flush()
# compute checksum of the whole file at once
tf.seek(0)
checksum_full = hashlib.md5(tf.read()).digest()
digest_full = base64.encodestring(checksum_full).decode('UTF-8').strip()
# compute checksum using default-size chunks
tf.seek(0)
digest_chunk = generate_md5_hash(tf.name)
# compare the digests
self.assertEqual(digest_chunk, digest_full)
# compute checksum using custom size chunks
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=128))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=256))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=1024))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=100000000)) # 100M
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=-1))
tf.seek(0)
self.assertNotEqual(digest_full, generate_md5_hash(tf.name, block_size=0))
def test_get_object_datetime(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(
datetime.datetime.fromtimestamp(int(obj.extra["modify_time"])),
self.storage.storage_driver.get_object_datetime(obj)
)
def test_get_fqdn_from_backup_index_blob(self):
blob_name = "index/backup_index/2019051307/manifest_node1.whatever.com.json"
self.assertEqual(
"node1.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node2.whatever.com.cql"
self.assertEqual(
"node2.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node3.whatever.com.txt"
self.assertEqual(
"node3.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node_with_underscores.whatever.com.txt"
self.assertEqual(
"node_with_underscores.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_get_fqdn_from_any_index_blob(self):
blob_name = "tokenmap_hostname-with-dashes-and-3-numbers.json"
self.assertEqual(
"hostname-with-dashes-and-3-numbers",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname-with-dashes.and-dots.json"
self.assertEqual(
"hostname-with-dashes.and-dots",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname_with-underscores.and-dots-and.dashes.json"
self.assertEqual(
"hostname_with-underscores.and-dots-and.dashes",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/bi/third_backup/finished_localhost_1574343029.timestamp"
self.assertEqual(
"localhost",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_parse_backup_index(self):
file_content = "content of the test file"
# SSTables for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/schema.cql", file_content)
# SSTables for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/schema.cql", file_content)
# SSTables for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/schema.cql", file_content)
build_indices(self.config, False)
path = 'index/backup_index'
backup_index = self.storage.storage_driver.list_objects(path)
blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
self.assertTrue("backup1" in blobs_by_backup)
self.assertTrue("backup2" in blobs_by_backup)
self.assertTrue("node1" in blobs_by_backup["backup1"])
self.assertTrue("node2" in blobs_by_backup["backup1"])
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
def test_remove_extension(self):
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.txt')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.timestamp')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.cql')
)
self.assertEqual(
'localhost.foo',
self.storage.remove_extension('localhost.foo')
)
def test_get_timestamp_from_blob_name(self):
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_localhost_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some.host.net_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some_underscores.host.net_1558021519.timestamp')
)
self.assertEqual(
1574343029,
self.storage.get_timestamp_from_blob_name('index/bi/third_backup/finished_localhost_1574343029.timestamp')
)
if __name__ == '__main__':
unittest.main()
| 45.035599 | 119 | 0.69359 | [
"Apache-2.0"
] | phanirajl/cassandra-medusa | tests/storage_test.py | 13,916 | Python |
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import numpy as np
import rrdtool
start = 2628000
end = 0
if int(end) <= 0:
end = 2
if int(start) <= 0:
start = 600
epochTimeNow = int(time.time()-1)
data = rrdtool.fetch('/home/bca/rrdtoolfilesave/powerCapturenew.rrd', 'AVERAGE',
'--start', f'-{start}',
'--end', f'-{end}')
values = np.array(data[2])
values[values == None] = 0
epochEndTime = epochTimeNow - int(end)
epochStartTime = epochTimeNow - int(start)
timeseries = np.zeros(shape=((epochEndTime-epochStartTime + 1), 1))
for i in range (epochEndTime - epochStartTime + 1):
timeseries[i] = epochStartTime + 7200 + i
fig, ax = plt.subplots()
timeseries = mdate.epoch2num(timeseries)
ax.plot_date(timeseries, values, linestyle = '-', marker = '', label=f'AllThePower')
timeseriesFormat = '%d-%m-%y %H:%M:%S'
timeseriesFormatted = mdate.DateFormatter(timeseriesFormat)
ax.xaxis.set_major_formatter(timeseriesFormatted)
fig.autofmt_xdate()
plt.ylim(bottom = 0)
StartTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochStartTime))
EndTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochEndTime))
plt.ylabel('Watt')
plt.title(f'Time range: {StartTime} - {EndTime}')
plt.tight_layout()
plt.legend()
plt.show()
plt.close()
| 29.130435 | 84 | 0.680597 | [
"MIT"
] | SelectLOL1/BeagleBoneBlack_PRU_PowerMeter | RRDGraphs/rrd_1month.py | 1,340 | Python |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
"""
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
"""
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
"""
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
"""
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
| 44.209302 | 121 | 0.642951 | [
"BSD-3-Clause"
] | bowang-lab/shape-attentive-unet | data/crop_and_pad_augmentations.py | 7,604 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
logging.basicConfig(level=logging.DEBUG)
# run the gapic generator
gapic = gcp.GAPICBazel()
versions = ['v1beta2']
name = 'memcache'
for version in versions:
library = gapic.node_library(name, version)
s.copy(library, excludes=['package.json', 'README.md'])
# Copy common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
| 32.891892 | 74 | 0.771569 | [
"Apache-2.0"
] | Global19/nodejs-memcache | synth.py | 1,217 | Python |
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)""" | 29.73913 | 92 | 0.773392 | [
"MIT"
] | BharathKumarRavichandran/ML-A-Z | Part 2 - Regression/Section 4 - Simple Linear Regression/data_preprocessing_template.py | 684 | Python |
import serial
import pynmea2
# Probando con el pincho usb azul
ser = serial.Serial('/dev/ttyUSB0',4800)
while 1:
try:
data = ser.readline().decode('utf-8')
if(data.startswith("$GPGGA")):
parse = pynmea2.parse(data)
print(repr(parse))
except UnicodeDecodeError:
continue | 23.357143 | 45 | 0.617737 | [
"MIT"
] | Raniita/Accuatic-Probe | CTD_controller/gps_test1.py | 327 | Python |
####################################################################################################
"""
adres_dataset.py
This module implements several classes to perform dataset-specific downloading, saving and
data-transformation operations.
Written by Swaan Dekkers & Thomas Jongstra
"""
####################################################################################################
#############
## Imports ##
#############
from pathlib import Path
import pandas.io.sql as sqlio
import pandas as pd
import numpy as np
import requests
import psycopg2
import time
import os
import re
# Import own modules.
import datasets, clean
# Define HOME and DATA_PATH on a global level.
HOME = Path.home() # Home path for old VAO.
# USERNAME = os.path.basename(HOME)
# HOME = os.path.join('/data', USERNAME) # Set home for new VAO.
DATA_PATH = os.path.join(HOME, 'Documents/woonfraude/data/')
########################
## AdresDataset class ##
########################
class AdresDataset(datasets.MyDataset):
"""Create a dataset for the adres data."""
# Set the class attributes.
name = 'adres'
table_name = 'import_adres'
id_column = 'adres_id'
def extract_leegstand(self):
"""Create a column indicating leegstand (no inhabitants on the address)."""
self.data['leegstand'] = ~self.data.inwnrs.notnull()
self.version += '_leegstand'
self.save()
def enrich_with_woning_id(self):
"""Add woning ids to the adres dataframe."""
adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')
self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')
self.version += '_woningId'
self.save()
def prepare_bag(self, bag):
# To int
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].astype(int)
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].replace(0, -1)
# Fillna and replace ''
bag['huisletter_nummeraanduiding'] = bag['huisletter_nummeraanduiding'].replace('', 'None')
# bag['_openbare_ruimte_naam@bag'] = bag['_openbare_ruimte_naam@bag'].fillna('None')
bag['_openbare_ruimte_naam_nummeraanduiding'] = bag['_openbare_ruimte_naam_nummeraanduiding'].replace('', 'None')
# bag['_huisnummer_toevoeging@bag'] = bag['_huisnummer_toevoeging@bag'].fillna('None')
bag['huisnummer_toevoeging_nummeraanduiding'] = bag['huisnummer_toevoeging_nummeraanduiding'].replace('', 'None')
return bag
def prepare_adres(self, adres):
# To int
adres['hsnr'] = adres['hsnr'].astype(int)
adres['hsnr'] = adres['hsnr'].replace(0, -1)
return adres
def replace_string_nan_adres(self, adres):
adres['hsnr'] = adres['hsnr'].replace(-1, np.nan)
adres['sttnaam'] = adres['sttnaam'].replace('None', np.nan)
adres['hsltr'] = adres['hsltr'].replace('None', np.nan)
adres['toev'] = adres['toev'].replace('None', np.nan)
adres['huisnummer_nummeraanduiding'] = adres['huisnummer_nummeraanduiding'].replace(-1, np.nan)
adres['huisletter_nummeraanduiding'] = adres['huisletter_nummeraanduiding'].replace('None', np.nan)
adres['_openbare_ruimte_naam_nummeraanduiding'] = adres['_openbare_ruimte_naam_nummeraanduiding'].replace('None', np.nan)
adres['huisnummer_toevoeging_nummeraanduiding'] = adres['huisnummer_toevoeging_nummeraanduiding'].replace('None', np.nan)
return adres
def match_bwv_bag(self, adres, bag):
# Merge dataframes on adres dataframe.
new_df = pd.merge(adres, bag, how='left', left_on=['sttnaam','hsnr'], right_on = ['_openbare_ruimte_naam_nummeraanduiding', 'huisnummer_nummeraanduiding'])
# Find id's that have a direct match and that have multiple matches.
g = new_df.groupby('adres_id')
df_direct = g.filter(lambda x: len(x) == 1)
df_multiple = g.filter(lambda x: len(x) > 1)
# Make multiplematch more specific to construct perfect match.
df_multiple = df_multiple[(df_multiple['hsltr'] == df_multiple['huisletter_nummeraanduiding']) & (df_multiple['toev'] == df_multiple['huisnummer_toevoeging_nummeraanduiding'])]
# Concat df_direct and df_multiple.
df_result = pd.concat([df_direct, df_multiple])
# Because of the seperation of an object, there are two matching objects. Keep the oldest object with definif point.
df_result = df_result.sort_values(['adres_id', 'status_coordinaat_code'])
df_result = df_result.drop_duplicates(subset='adres_id', keep='first')
# Add adresses without match.
final_df = pd.merge(adres, df_result, how='left', on='adres_id', suffixes=('', '_y'))
final_df.drop(list(final_df.filter(regex='_y$')), axis=1, inplace=True)
# Set the name of the final adres dataframe again.
final_df.name = 'adres'
return final_df
def impute_values_for_bagless_addresses(self, adres):
"""Impute values for adresses where no BAG-match could be found."""
clean.impute_missing_values(adres)
# clean.impute_missing_values_mode(adres, ['status_coordinaat_code@bag'])
adres.fillna(value={'huisnummer_nummeraanduiding': 0,
'huisletter_nummeraanduiding': 'None',
'_openbare_ruimte_naam_nummeraanduiding': 'None',
'huisnummer_toevoeging_nummeraanduiding': 'None',
'type_woonobject_omschrijving': 'None',
'eigendomsverhouding_id': 'None',
'financieringswijze_id': -1,
'gebruik_id': -1,
'reden_opvoer_id': -1,
'status_id_verblijfsobject': -1,
'toegang_id': 'None'}, inplace=True)
return adres
def enrich_with_bag(self, bag):
"""Enrich the adres data with information from the BAG data. Uses the bag dataframe as input."""
bag = self.prepare_bag(bag)
self.data = self.prepare_adres(self.data)
self.data = self.match_bwv_bag(self.data, bag)
self.data = self.replace_string_nan_adres(self.data)
self.data = self.impute_values_for_bagless_addresses(self.data)
self.version += '_bag'
self.save()
print("The adres dataset is now enriched with BAG data.")
def enrich_with_personen_features(self, personen):
"""Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input."""
# Create simple handle to the adres data.
adres = self.data
# Compute age of people in years (float)
today = pd.to_datetime('today')
# Set all dates within range allowed by Pandas (584 years?)
personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')
# Get the most frequent birthdate (mode).
geboortedatum_mode = personen['geboortedatum'].mode()[0]
# Compute the age (result is a TimeDelta).
personen['leeftijd'] = today - personen['geboortedatum']
# Convert the age to an approximation in years ("smearin out" the leap years).
personen['leeftijd'] = personen['leeftijd'].apply(lambda x: x.days / 365.25)
# Find the matching address ids between the adres df and the personen df.
adres_ids = adres.adres_id
personen_adres_ids = personen.ads_id_wa
intersect = set(adres_ids).intersection(set(personen_adres_ids))
# Iterate over all matching address ids and find all people at each address.
inhabitant_locs = {}
print("Now looping over all address ids that have a link with one or more inhabitants...")
for i, adres_id in enumerate(intersect):
if i % 1000 == 0:
print(i)
inhabitant_locs[adres_id] = personen_adres_ids[personen_adres_ids == adres_id]
# Create a new column in the dataframe showing the amount of people at each address.
# TODO: this step currently takes a few minutes to complete, should still be optimized.
adres['aantal_personen'] = 0
adres['aantal_vertrokken_personen'] = -1
adres['aantal_overleden_personen'] = -1
adres['aantal_niet_uitgeschrevenen'] = -1
adres['leegstand'] = True
adres['leeftijd_jongste_persoon'] = -1.
adres['leeftijd_oudste_persoon'] = -1.
adres['aantal_kinderen'] = 0
adres['percentage_kinderen'] = -1.
adres['aantal_mannen'] = 0
adres['percentage_mannen'] = -1.
adres['gemiddelde_leeftijd'] = -1.
adres['stdev_leeftijd'] = -1.
adres['aantal_achternamen'] = 0
adres['percentage_achternamen'] = -1.
for i in range(1,8):
adres[f'gezinsverhouding_{i}'] = 0
adres[f'percentage_gezinsverhouding_{i}'] = 0.
print("Now looping over all rows in the adres dataframe in order to add person information...")
for i in adres.index:
if i % 1000 == 0:
print(i)
row = adres.iloc[i]
adres_id = row['adres_id']
try:
# Get the inhabitants for the current address.
inhab_locs = inhabitant_locs[adres_id].keys()
inhab = personen.loc[inhab_locs]
# Check whether any registered inhabitants have left Amsterdam or have passed away.
aantal_vertrokken_personen = sum(inhab["vertrekdatum_adam"].notnull())
aantal_overleden_personen = sum(inhab["overlijdensdatum"].notnull())
aantal_niet_uitgeschrevenen = len(inhab[inhab["vertrekdatum_adam"].notnull() | inhab["overlijdensdatum"].notnull()])
adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen
adres['aantal_overleden_personen'] = aantal_overleden_personen
adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen
# If there are more inhabitants than people that are incorrectly still registered, then there is no 'leegstand'.
if len(inhab) > aantal_niet_uitgeschrevenen:
adres['leegstand'] = False
# Totaal aantal personen (int).
aantal_personen = len(inhab)
adres.at[i, 'aantal_personen'] = aantal_personen
# Leeftijd jongste persoon (float).
leeftijd_jongste_persoon = min(inhab['leeftijd'])
adres.at[i, 'leeftijd_jongste_persoon'] = leeftijd_jongste_persoon
# Leeftijd oudste persoon (float).
leeftijd_oudste_persoon = max(inhab['leeftijd'])
adres.at[i, 'leeftijd_oudste_persoon'] = leeftijd_oudste_persoon
# Aantal kinderen ingeschreven op adres (int/float).
aantal_kinderen = sum(inhab['leeftijd'] < 18)
adres.at[i, 'aantal_kinderen'] = aantal_kinderen
adres.at[i, 'percentage_kinderen'] = aantal_kinderen / aantal_personen
# Aantal mannen (int/float).
aantal_mannen = sum(inhab.geslacht == 'M')
adres.at[i, 'aantal_mannen'] = aantal_mannen
adres.at[i, 'percentage_mannen'] = aantal_mannen / aantal_personen
# Gemiddelde leeftijd (float).
gemiddelde_leeftijd = inhab.leeftijd.mean()
adres.at[i, 'gemiddelde_leeftijd'] = gemiddelde_leeftijd
# Standardeviatie van leeftijd (float). Set to 0 when the sample size is 1.
stdev_leeftijd = inhab.leeftijd.std()
adres.at[i, 'stdev_leeftijd'] = stdev_leeftijd if aantal_personen > 1 else 0
# Aantal verschillende achternamen (int/float).
aantal_achternamen = inhab.naam.nunique()
adres.at[i, 'aantal_achternamen'] = aantal_achternamen
adres.at[i, 'percentage_achternamen'] = aantal_achternamen / aantal_personen
# Gezinsverhouding (frequency count per klasse) (int/float).
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[i, f'gezinsverhouding_{key}'] = val
adres.at[i, f'percentage_gezinsverhouding_{key}'] = val / aantal_personen
except (KeyError, ValueError) as e:
pass
print("...done!")
self.data = adres
self.version += '_personen'
self.save()
print("The adres dataset is now enriched with personen data.")
def add_hotline_features(self, hotline):
"""Add the hotline features to the adres dataframe."""
# Create a temporary merged df using the adres and hotline dataframes.
merge = self.data.merge(hotline, on='wng_id', how='left')
# Create a group for each adres_id
adres_groups = merge.groupby(by='adres_id')
# Count the number of hotline meldingen per group/adres_id.
# 'id' should be the primary key of hotline df, so it is usable for hotline entry counting.
hotline_counts = adres_groups['id'].agg(['count'])
# Rename column
hotline_counts.columns = ['aantal_hotline_meldingen']
# Enrich the 'adres' dataframe with the computed hotline counts.
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print("The adres dataset is now enriched with hotline data.") | 45.853821 | 184 | 0.624402 | [
"MIT"
] | petercuret/woonfraude | codebase/datasets/adres_dataset.py | 13,802 | Python |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.visualization.decisiontree import variants, visualizer
| 43.555556 | 76 | 0.741071 | [
"MIT"
] | Malekhy/ws2122-lspm | ws2122-lspm/Lib/site-packages/pm4py/visualization/decisiontree/__init__.py | 784 | Python |
from __future__ import division
from keras.callbacks import Callback
from .generic_plot import PlotLosses
metric2printable = {
"acc": "Accuracy",
"mean_squared_error": "Mean squared error",
"mean_absolute_error": "Mean absolute error",
"mean_absolute_percentage_error": "Mean absolute percentage error",
# etc
"categorical_crossentropy": "Log-loss",
"sparse_categorical_crossentropy": "Log-loss",
"binary_crossentropy": "Log-loss",
"kullback_leibler_divergence": "Log-loss"
}
def loss2name(loss):
if hasattr(loss, '__call__'):
# if passed as a function
return loss.__name__
else:
# if passed as a string
return loss
class PlotLossesKeras(Callback):
def __init__(self, **kwargs):
super(PlotLossesKeras, self).__init__()
self.liveplot = PlotLosses(**kwargs)
def on_train_begin(self, logs={}):
self.liveplot.set_metrics([
metric for metric in self.params['metrics']
if not metric.startswith('val_')
])
# slightly convolved due to model.complie(loss=...) stuff
# vide https://github.com/keras-team/keras/blob/master/keras/engine/training.py
if isinstance(self.model.loss, list):
losses = self.model.loss
elif isinstance(self.model.loss, dict):
losses = list(self.model.loss.values())
else:
# by far the most common scenario
losses = [self.model.loss]
metric2printable_updated = metric2printable.copy()
loss_name = loss2name(losses[0])
metric2printable_updated['loss'] =\
"{} (cost function)".format(metric2printable_updated.get(loss_name, loss_name))
if len(losses) > 1:
for output_name, loss in zip(self.model.output_names, losses):
loss_name = loss2name(loss)
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
else:
for output_name in self.model.output_names:
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
self.liveplot.metric2title = metric2printable_updated
self.liveplot.set_max_epoch(self.params['epochs'])
def on_epoch_end(self, epoch, logs={}):
self.liveplot.update(logs.copy())
self.liveplot.draw()
| 36.142857 | 101 | 0.641107 | [
"MIT"
] | kkanska/livelossplot | livelossplot/keras_plot.py | 2,530 | Python |
import numpy as np
from .tensor import Function
# ************* unary ops *************
class ReLU(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.maximum(input, 0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output * (input >= 0)
class Log(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.log(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / input
class Exp(Function):
@staticmethod
def forward(ctx, input):
ret = np.exp(input)
ctx.save_for_backward(ret)
return ret
@staticmethod
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return grad_output * ret
# ************* reduce ops *************
class Sum(Function):
@staticmethod
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return np.array([input.sum()]) if axis is None else input.sum(axis=axis)
@staticmethod
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
axis = [axis] if type(axis) is int else axis
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return grad_output.reshape(shape) + np.zeros_like(input)
class Max(Function):
@staticmethod
def forward(ctx, inp, axis=None):
axis = [axis] if type(axis) == int else axis
ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
@staticmethod
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == ret.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * grad_output.reshape(shape) / div
# ************* binary ops *************
def unbroadcast(out, in_sh):
# adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i] == 1 and out.shape[i] > 1]) if in_sh != (1,) else None
return out.sum(axis=sum_axis).reshape(in_sh)
class Add(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x + y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x - y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x * y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * grad_output, x.shape), unbroadcast(x * grad_output, y.shape)
class Pow(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x ** y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * (x ** (y - 1.0)) * grad_output, x.shape), \
unbroadcast((x ** y) * np.log(x) * grad_output, y.shape)
# ************* movement ops *************
class Reshape(Function):
@staticmethod
def forward(ctx, x, shape):
ctx.save_for_backward(x.shape)
return x.reshape(shape)
@staticmethod
def backward(ctx, grad_output):
in_shape, = ctx.saved_tensors
return grad_output.reshape(in_shape)
class Transpose(Function):
@staticmethod
def forward(ctx, x, order):
ctx.save_for_backward(order)
return np.transpose(x, order)
@staticmethod
def backward(ctx, x):
return np.transpose(x, np.argsort(ctx.order))
def inner_slice(x, arg):
padding = [(max(0, -p[0]), max(0, p[1] - x.shape[i])) for i, p in enumerate(arg)]
x = np.pad(x, padding)
slicee = [(p[0] + padding[i][0], p[1] + padding[i][0]) for i, p in enumerate(arg)]
return x[tuple([slice(x[0], x[1], None) for x in slicee])]
class Slice(Function):
@staticmethod
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(x, arg)
@staticmethod
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(ctx.arg)]
return inner_slice(grad_output, narg)
# ************* processing ops *************
class Matmul(Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return input @ weight
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output @ np.swapaxes(weight, -2, -1)
grad_weight = np.swapaxes(input, -2, -1) @ grad_output
return grad_input, grad_weight
class Conv2D(Function):
@staticmethod
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout, cin, H, W = w.shape
ys, xs = ctx.stride
bs, cin_ = x.shape[0], x.shape[1]
oy, ox = (x.shape[2] - (H - ys)) // ys, (x.shape[3] - (W - xs)) // xs
assert cin * ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout // ctx.groups
gx = x.reshape(bs, ctx.groups, cin, x.shape[2], x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3] * ys, gx.strides[4] * xs,
*gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
ret = np.zeros((bs, ctx.groups, oy, ox, rcout), dtype=x.dtype)
for g in range(ctx.groups):
# ijYXyx,kjyx -> iYXk ->ikYX
ret[:, g] += np.tensordot(tx[:, g], tw[g], ((1, 4, 5), (1, 2, 3)))
return np.moveaxis(ret, 4, 2).reshape(bs, cout, oy, ox)
@staticmethod
def backward(ctx, grad_output):
bs, _, oy, ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_, rcout, cin, H, W = tw.shape
ys, xs = ctx.stride
OY, OX = x_shape[2:4]
ggg = grad_output.reshape(bs, ctx.groups, rcout, oy, ox)
gdw = np.zeros((ctx.groups, rcout, cin, H, W), dtype=tx.dtype)
for g in range(ctx.groups):
# 'ikYX,ijYXyx -> kjyx'
gdw[g] += np.tensordot(ggg[:, g], tx[:, g], ((0, 2, 3), (0, 2, 3)))
# needs to be optimized
gdx = np.zeros((bs, ctx.groups, cin, OY, OX), dtype=tx.dtype)
for k in range(oy * ox):
Y, X = k // ox, k % ox
iY, iX = Y * ys, X * xs
# gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw)
for g in range(ctx.groups):
tg = np.dot(ggg[:, g, :, Y, X].reshape(bs, -1), tw[g].reshape(rcout, -1))
gdx[:, g, :, iY:iY + H, iX:iX + W] += tg.reshape((bs, cin, H, W))
return gdx.reshape((bs, ctx.groups * cin, OY, OX)), gdw.reshape((ctx.groups * rcout, cin, H, W))
| 32.193676 | 117 | 0.570166 | [
"Apache-2.0"
] | dredwardhyde/tinygrad-universal | tinygrad/ops_cpu.py | 8,145 | Python |
import numpy as np
import tensorflow as tf
# ----------------------------------------------------------------------------
def SubPixel1D_v2(I, r):
"""One-dimensional subpixel upsampling layer
Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py
"""
with tf.compat.v1.name_scope('subpixel'):
bsize, a, r = I.get_shape().as_list()
bsize = tf.shape(input=I)[0] # Handling Dimension(None) type for undefined batch dim
X = tf.split(1, a, I) # a, [bsize, 1, r]
if 'axis' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X]) # bsize, a*r
elif 'squeeze_dims' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X]) # bsize, a*r
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, a*r, 1))
def SubPixel1D(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2,1,0]) # (r, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
def SubPixel1D_multichan(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r).
Works with multiple channels: (B,L,rC) -> (B,rL,C)
"""
with tf.compat.v1.name_scope('subpixel'):
_, w, rc = I.get_shape()
assert rc % r == 0
c = rc / r
X = tf.transpose(a=I, perm=[2,1,0]) # (rc, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (c, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
# ----------------------------------------------------------------------------
# demonstration
if __name__ == "__main__":
with tf.compat.v1.Session() as sess:
x = np.arange(2*4*2).reshape(2, 4, 2)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 2), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('single-channel:')
print('original, element 0 (2 channels):', x[0,:,0], x[0,:,1])
print('rescaled, element 1:', y[0,:,0])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1])
print('rescaled, element 1:', y[1,:,0])
print()
x = np.arange(2*4*4).reshape(2, 4, 4)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 4), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('multichannel:')
print('original, element 0 (4 channels):', x[0,:,0], x[0,:,1], x[0,:,2], x[0,:,3])
print('rescaled, element 1:', y[0,:,0], y[0,:,1])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1], x[1,:,2], x[1,:,3])
print('rescaled, element 1:', y[1,:,0], y[1,:,1], end=' ')
| 36.17284 | 88 | 0.567918 | [
"MIT"
] | Lootwig/audio-super-res | src/models/layers/subpixel.py | 2,930 | Python |
import logging
import boto3
from botocore.vendored.requests.packages.urllib3.exceptions import ResponseError
from django.core.mail.backends.base import BaseEmailBackend
from django_ses import settings
from datetime import datetime, timedelta
from time import sleep
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
# Shim for Python 3.7. Remove when support is dropped.
import importlib_metadata
__version__ = importlib_metadata.version(__name__)
__all__ = ('SESBackend',)
# These would be nice to make class-level variables, but the backend is
# re-created for each outgoing email/batch.
# recent_send_times also is not going to work quite right if there are multiple
# email backends with different rate limits returned by SES, but that seems
# like it would be rare.
cached_rate_limits = {}
recent_send_times = []
logger = logging.getLogger('django_ses')
def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None):
"""Return signed email message if dkim package and settings are available."""
try:
import dkim
except ImportError:
pass
else:
if dkim_domain and dkim_key:
sig = dkim.sign(message,
dkim_selector,
dkim_domain,
dkim_key,
include_headers=dkim_headers)
message = sig + message
return message
def cast_nonzero_to_float(val):
"""Cast nonzero number to float; on zero or None, return None"""
if not val:
return None
return float(val)
class SESBackend(BaseEmailBackend):
"""A Django Email backend that uses Amazon's Simple Email Service.
"""
def __init__(self, fail_silently=False, aws_access_key=None,
aws_secret_key=None, aws_region_name=None,
aws_region_endpoint=None, aws_auto_throttle=None, aws_config=None,
dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None,
ses_source_arn=None, ses_from_arn=None, ses_return_path_arn=None,
**kwargs):
super(SESBackend, self).__init__(fail_silently=fail_silently, **kwargs)
self._access_key_id = aws_access_key or settings.ACCESS_KEY
self._access_key = aws_secret_key or settings.SECRET_KEY
self._region_name = aws_region_name if aws_region_name else settings.AWS_SES_REGION_NAME
self._endpoint_url = aws_region_endpoint if aws_region_endpoint else settings.AWS_SES_REGION_ENDPOINT_URL
self._throttle = cast_nonzero_to_float(aws_auto_throttle or settings.AWS_SES_AUTO_THROTTLE)
self._config = aws_config or settings.AWS_SES_CONFIG
self.dkim_domain = dkim_domain or settings.DKIM_DOMAIN
self.dkim_key = dkim_key or settings.DKIM_PRIVATE_KEY
self.dkim_selector = dkim_selector or settings.DKIM_SELECTOR
self.dkim_headers = dkim_headers or settings.DKIM_HEADERS
self.ses_source_arn = ses_source_arn or settings.AWS_SES_SOURCE_ARN
self.ses_from_arn = ses_from_arn or settings.AWS_SES_FROM_ARN
self.ses_return_path_arn = ses_return_path_arn or settings.AWS_SES_RETURN_PATH_ARN
self.connection = None
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = boto3.client(
'ses',
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
region_name=self._region_name,
endpoint_url=self._endpoint_url,
config=self._config
)
except Exception:
if not self.fail_silently:
raise
def close(self):
"""Close any open HTTP connections to the API server.
"""
self.connection = None
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
new_conn_created = self.open()
if not self.connection:
# Failed silently
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
# SES Configuration sets. If the AWS_SES_CONFIGURATION_SET setting
# is not None, append the appropriate header to the message so that
# SES knows which configuration set it belongs to.
#
# If settings.AWS_SES_CONFIGURATION_SET is a callable, pass it the
# message object and dkim settings and expect it to return a string
# containing the SES Configuration Set name.
if (settings.AWS_SES_CONFIGURATION_SET
and 'X-SES-CONFIGURATION-SET' not in message.extra_headers):
if callable(settings.AWS_SES_CONFIGURATION_SET):
message.extra_headers[
'X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET(
message,
dkim_domain=self.dkim_domain,
dkim_key=self.dkim_key,
dkim_selector=self.dkim_selector,
dkim_headers=self.dkim_headers
)
else:
message.extra_headers[
'X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET
# Automatic throttling. Assumes that this is the only SES client
# currently operating. The AWS_SES_AUTO_THROTTLE setting is a
# factor to apply to the rate limit, with a default of 0.5 to stay
# well below the actual SES throttle.
# Set the setting to 0 or None to disable throttling.
if self._throttle:
global recent_send_times
now = datetime.now()
# Get and cache the current SES max-per-second rate limit
# returned by the SES API.
rate_limit = self.get_rate_limit()
logger.debug("send_messages.throttle rate_limit='{}'".format(rate_limit))
# Prune from recent_send_times anything more than a few seconds
# ago. Even though SES reports a maximum per-second, the way
# they enforce the limit may not be on a one-second window.
# To be safe, we use a two-second window (but allow 2 times the
# rate limit) and then also have a default rate limit factor of
# 0.5 so that we really limit the one-second amount in two
# seconds.
window = 2.0 # seconds
window_start = now - timedelta(seconds=window)
new_send_times = []
for time in recent_send_times:
if time > window_start:
new_send_times.append(time)
recent_send_times = new_send_times
# If the number of recent send times in the last 1/_throttle
# seconds exceeds the rate limit, add a delay.
# Since I'm not sure how Amazon determines at exactly what
# point to throttle, better be safe than sorry and let in, say,
# half of the allowed rate.
if len(new_send_times) > rate_limit * window * self._throttle:
# Sleep the remainder of the window period.
delta = now - new_send_times[0]
total_seconds = (delta.microseconds + (delta.seconds +
delta.days * 24 * 3600) * 10**6) / 10**6
delay = window - total_seconds
if delay > 0:
sleep(delay)
recent_send_times.append(now)
# end of throttling
kwargs = dict(
Source=source or message.from_email,
Destinations=message.recipients(),
# todo attachments?
RawMessage={'Data': dkim_sign(message.message().as_string(),
dkim_key=self.dkim_key,
dkim_domain=self.dkim_domain,
dkim_selector=self.dkim_selector,
dkim_headers=self.dkim_headers)}
)
if self.ses_source_arn:
kwargs['SourceArn'] = self.ses_source_arn
if self.ses_from_arn:
kwargs['FromArn'] = self.ses_from_arn
if self.ses_return_path_arn:
kwargs['ReturnPathArn'] = self.ses_return_path_arn
try:
response = self.connection.send_raw_email(**kwargs)
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response['MessageId']
message.extra_headers['request_id'] = response['ResponseMetadata']['RequestId']
num_sent += 1
if 'X-SES-CONFIGURATION-SET' in message.extra_headers:
logger.debug(
"send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}' "
"ses-configuration-set='{}'".format(
message.from_email,
", ".join(message.recipients()),
message.extra_headers['message_id'],
message.extra_headers['request_id'],
message.extra_headers['X-SES-CONFIGURATION-SET']
))
else:
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}'".format(
message.from_email,
", ".join(message.recipients()),
message.extra_headers['message_id'],
message.extra_headers['request_id']
))
except ResponseError as err:
# Store failure information so to post process it if required
error_keys = ['status', 'reason', 'body', 'request_id',
'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if not self.fail_silently:
raise
if new_conn_created:
self.close()
return num_sent
def get_rate_limit(self):
if self._access_key_id in cached_rate_limits:
return cached_rate_limits[self._access_key_id]
new_conn_created = self.open()
if not self.connection:
raise Exception(
"No connection is available to check current SES rate limit.")
try:
quota_dict = self.connection.get_send_quota()
max_per_second = quota_dict['MaxSendRate']
ret = float(max_per_second)
cached_rate_limits[self._access_key_id] = ret
return ret
finally:
if new_conn_created:
self.close()
| 42.88806 | 119 | 0.583522 | [
"MIT"
] | mlissner/django-ses | django_ses/__init__.py | 11,494 | Python |
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import os
import sys
import subprocess
def main():
subprocess.call(
["npx", "pyright"],
env={
**os.environ,
"PATH": os.path.pathsep.join(
(
os.path.dirname(sys.executable),
os.getenv("PATH") or "",
)
),
},
shell=True,
)
if __name__ == "__main__":
main()
| 15.806452 | 52 | 0.453061 | [
"MIT"
] | gentle-knight-13/auto-derby | scripts/run_pyright.py | 490 | Python |
u"foo"
| 3.5 | 6 | 0.571429 | [
"MIT"
] | jwilk-forks/python-grammar-changes | examples/py33-0012-uprefix1.py | 7 | Python |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 60100
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351 | 90 | 0.682581 | [
"MIT"
] | listedlinked/sors | contrib/linearize/linearize-hashes.py | 3,037 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
'''The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <[email protected]>"
__version__ = "2.0.17+dev"
| 41.2 | 94 | 0.548544 | [
"MIT"
] | KTH/azure-cli | src/azure-cli/azure/cli/__init__.py | 618 | Python |
#!/usr/bin/python
import os
import unittest
""" Script to run the Python tests. """
def run_python_tests():
""" Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures. """
print("Starting tests...")
loader = unittest.TestLoader()
# Get the directory this module is in.
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
if __name__ == "__main__":
run_python_tests()
| 21.2 | 68 | 0.712264 | [
"MIT"
] | djpetti/rhodopsin | run_tests.py | 636 | Python |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Change `db_dbnode.type` for base `Data` types.
The base Data types Bool, Float, Int and Str have been moved in the source code, which means that their
module path changes, which determines the plugin type string which is stored in the databse.
The type string now will have a type string prefix that is unique to each sub type.
Revision ID: django_0009
Revises: django_0008
"""
from alembic import op
revision = 'django_0009'
down_revision = 'django_0008'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.bool.Bool.' WHERE type = 'data.base.Bool.';
UPDATE db_dbnode SET type = 'data.float.Float.' WHERE type = 'data.base.Float.';
UPDATE db_dbnode SET type = 'data.int.Int.' WHERE type = 'data.base.Int.';
UPDATE db_dbnode SET type = 'data.str.Str.' WHERE type = 'data.base.Str.';
UPDATE db_dbnode SET type = 'data.list.List.' WHERE type = 'data.base.List.';
"""
)
def downgrade():
"""Migrations for the downgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.base.Bool.' WHERE type = 'data.bool.Bool.';
UPDATE db_dbnode SET type = 'data.base.Float.' WHERE type = 'data.float.Float.';
UPDATE db_dbnode SET type = 'data.base.Int.' WHERE type = 'data.int.Int.';
UPDATE db_dbnode SET type = 'data.base.Str.' WHERE type = 'data.str.Str.';
UPDATE db_dbnode SET type = 'data.base.List.' WHERE type = 'data.list.List.';
"""
)
| 42.924528 | 103 | 0.566154 | [
"MIT",
"BSD-3-Clause"
] | mkrack/aiida-core | aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py | 2,275 | Python |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualMachineRuntimeInfo(vim, *args, **kwargs):
'''The RuntimeInfo data object type provides information about the execution state
and history of a virtual machine.'''
obj = vim.client.factory.create('{urn:vim25}VirtualMachineRuntimeInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 7:
raise IndexError('Expected at least 8 arguments got: %d' % len(args))
required = [ 'connectionState', 'consolidationNeeded', 'faultToleranceState',
'numMksConnections', 'powerState', 'recordReplayState', 'toolsInstallerMounted' ]
optional = [ 'bootTime', 'cleanPowerOff', 'dasVmProtection', 'device', 'host',
'maxCpuUsage', 'maxMemoryUsage', 'memoryOverhead', 'minRequiredEVCModeKey',
'needSecondaryReason', 'question', 'suspendInterval', 'suspendTime',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 38.315789 | 124 | 0.644231 | [
"MIT"
] | Infinidat/pyvisdk | pyvisdk/do/virtual_machine_runtime_info.py | 1,456 | Python |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add non-adjusted next cycle start date
Revision ID: 44047daa31a9
Revises: 1431e7094e26
Create Date: 2015-07-07 14:31:27.780564
"""
# revision identifiers, used by Alembic.
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from datetime import date
from ggrc.app import app
from ggrc import settings, db
import ggrc_workflows.models as models
from ggrc_workflows import adjust_next_cycle_start_date
from ggrc_workflows.services.workflow_cycle_calculator import \
get_cycle_calculator
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
# If somebody deleted all the tasks we must clear the next cycle start
# date
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
# We must skip tasks that don't have start days and end days defined
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| 38.146893 | 80 | 0.598045 | [
"ECL-2.0",
"Apache-2.0"
] | zidarsk8/ggrc-core | src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py | 6,752 | Python |
A = 'A'
B = 'B'
Environment = {
A: 'Dirty',
B: 'Dirty',
'Current': A
}
def REFLEX_VACUUM_AGENT(loc_st): # Determine action
if loc_st[1] == 'Dirty':
return 'Suck'
if loc_st[0] == A:
return 'Right'
if loc_st[0] == B:
return 'Left'
def Sensors(): # Sense Environment
location = Environment['Current']
return (location, Environment[location])
def Actuators(action): # Modify Environment
location = Environment['Current']
if action == 'Suck':
Environment[location] = 'Clean'
elif action == 'Right' and location == A:
Environment['Current'] = B
elif action == 'Left' and location == B:
Environment['Current'] = A
def run(n, make_agent): # run the agent through n steps
print(' Current New')
print('location status action location status')
for i in range(1, n):
(location, status) = Sensors() # Sense Environment before action
print("{:12s}{:8s}".format(location, status), end='')
action = make_agent(Sensors())
Actuators(action)
(location, status) = Sensors() # Sense Environment after action
print("{:8s}{:12s}{:8s}".format(action, location, status))
if __name__ == '__main__':
run(10, REFLEX_VACUUM_AGENT)
| 26.571429 | 73 | 0.592934 | [
"MIT"
] | aleksander-GD/AI-F20 | Lecture_3_Agents/Exercise1/Exercises/reflex_vacuum_agent.py | 1,302 | Python |
def average_rating(rating_list):
if not rating_list:
# if rating_list is empty return 0
return 0
return round(sum(rating_list) / len(rating_list)) | 24.571429 | 53 | 0.680233 | [
"MIT"
] | rodrigobmedeiros/Bookr | bookr/reviews/utils.py | 172 | Python |
from .check_nd_array_for_bad import check_nd_array_for_bad
def make_reflecting_grid(grid, reflecting_grid_value, raise_for_bad=True):
check_nd_array_for_bad(grid, raise_for_bad=raise_for_bad)
reflecting_grid = grid.copy()
for i, grid_value in enumerate(reflecting_grid):
if grid_value < reflecting_grid_value:
reflecting_grid[i] += (reflecting_grid_value - grid_value) * 2
else:
reflecting_grid[i] -= (grid_value - reflecting_grid_value) * 2
return reflecting_grid
| 25.285714 | 74 | 0.730697 | [
"MIT"
] | alex-wenzel/ccal | ccal/make_reflecting_grid.py | 531 | Python |
import operator
class Istr:
def count(self, s, k):
letters = {}
for letter in s:
if letter not in letters:
letters[letter] = 1
else:
letters[letter] += 1
for i in range(0, k):
index = max(letters.iteritems(), key=operator.itemgetter(1))[0]
letters[index] -= 1
score = 0
for element in letters:
val = letters[element] * letters[element]
score += val
return score
| 21.615385 | 77 | 0.455516 | [
"MIT"
] | mikefeneley/topcoder | src/SRM-684/istr.py | 562 | Python |
import asyncio
import copy
import random
from typing import Callable
import pytest
from starkware.starknet.apps.starkgate.cairo.contracts import erc20_contract_def
from starkware.starknet.apps.starkgate.conftest import str_to_felt
from starkware.starknet.testing.contract import StarknetContract
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
AMOUNT_BOUND = 2 ** 256
GOVERNOR_ADDRESS = str_to_felt("GOVERNOR")
MINTER_ADDRESS = str_to_felt("MINTER")
L1_ACCOUNT = 1
initial_balances = {1: 13, 2: 10}
uninitialized_account = 3
initial_total_supply = sum(initial_balances.values())
initialized_account = random.choice(list(initial_balances.keys()))
another_account = 4 # Not initialized_account and not uninitialized_account.
# 0 < TRANSFER_AMOUNT < APPROVE_AMOUNT < initial_balance < HIGH_APPROVE_AMOUNT.
TRANSFER_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
APPROVE_AMOUNT = 8
HIGH_APPROVE_AMOUNT = 100
MINT_AMOUNT = 10
BURN_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def session_starknet() -> Starknet:
return await Starknet.empty()
@pytest.fixture(scope="session")
async def session_empty_token_contract(
session_starknet: Starknet,
token_name: int,
token_symbol: int,
token_decimals: int,
) -> StarknetContract:
return await session_starknet.deploy(
constructor_calldata=[
token_name,
token_symbol,
token_decimals,
MINTER_ADDRESS,
],
contract_def=erc20_contract_def,
)
@pytest.fixture(scope="session")
async def uint256(session_empty_token_contract: StarknetContract) -> Callable:
def convert_int_to_uint256(num: int):
if num < 0:
num += 2 ** 256
return session_empty_token_contract.Uint256(low=num % 2 ** 128, high=num // 2 ** 128)
return convert_int_to_uint256
@pytest.fixture(scope="session")
async def session_token_contract(
session_empty_token_contract: StarknetContract,
uint256: Callable,
) -> StarknetContract:
for account in initial_balances:
await session_empty_token_contract.permissionedMint(
recipient=account, amount=uint256(initial_balances[account])
).invoke(caller_address=MINTER_ADDRESS)
return session_empty_token_contract
@pytest.fixture
async def starknet(session_starknet: Starknet) -> Starknet:
return copy.deepcopy(session_starknet)
@pytest.fixture
async def token_contract(
starknet: Starknet, session_token_contract: StarknetContract
) -> StarknetContract:
return StarknetContract(
state=starknet.state,
abi=erc20_contract_def.abi,
contract_address=session_token_contract.contract_address,
deploy_execution_info=session_token_contract.deploy_execution_info,
)
@pytest.mark.asyncio
async def test_permitted_minter(token_contract: StarknetContract):
execution_info = await token_contract.permittedMinter().call()
assert execution_info.result == (MINTER_ADDRESS,)
@pytest.mark.asyncio
async def test_name(token_contract: StarknetContract, token_name: int):
execution_info = await token_contract.name().call()
assert execution_info.result == (token_name,)
@pytest.mark.asyncio
async def test_symbol(token_contract: StarknetContract, token_symbol: int):
execution_info = await token_contract.symbol().call()
assert execution_info.result == (token_symbol,)
@pytest.mark.asyncio
async def test_decimal(token_contract: StarknetContract, token_decimals: int):
execution_info = await token_contract.decimals().call()
assert execution_info.result == (token_decimals,)
@pytest.mark.asyncio
async def test_total_supply(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
@pytest.mark.asyncio
async def test_balance_of(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
@pytest.mark.asyncio
async def test_transfer_zero_sender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(sender\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_transfer_zero_recipient(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transfer(recipient=0, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_happy_flow(token_contract: StarknetContract, uint256: Callable):
transfer_amount = uint256(TRANSFER_AMOUNT)
await token_contract.transfer(recipient=uninitialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
expected_balance = uint256(initial_balances[initialized_account] - TRANSFER_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (transfer_amount,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=uninitialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
# Tests the case of sender = recipient.
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
@pytest.mark.asyncio
async def test_approve_zero_owner(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(caller\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_approve_zero_spender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.approve(spender=0, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_happy_flow(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.approve(
spender=uninitialized_account, amount=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
@pytest.mark.asyncio
async def test_transfer_from_zero_sender(token_contract: StarknetContract, uint256: Callable):
# The contract fails when checking for sufficient allowance of account 0.
# Only because we cannot put a balance for address(0) or approve on its behalf.
# Could we do that, we would have failed on the more sensible error assert_not_zero(sender).
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=0, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_zero_recipient(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
await token_contract.approve(spender=another_account, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=0, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(
spender=another_account, amount=uint256(HIGH_APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(spender=another_account, amount=uint256(APPROVE_AMOUNT)).invoke(
caller_address=initialized_account
)
amount = uint256(APPROVE_AMOUNT + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
@pytest.mark.parametrize("approve_num", [APPROVE_AMOUNT, HIGH_APPROVE_AMOUNT])
async def test_transfer_from_happy_flow(
token_contract: StarknetContract, uint256: Callable, approve_num: int
):
await token_contract.approve(spender=another_account, amount=uint256(approve_num)).invoke(
caller_address=initialized_account
)
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_increase_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.increaseAllowance(
spender=0, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(added_value\)"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_overflow(token_contract: StarknetContract, uint256: Callable):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert \(is_overflow\) = 0"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND - APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
approve_amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(spender=0, subtracted_value=approve_amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_decrease_allowance_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(APPROVE_AMOUNT + 1)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(subtracted_value\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_and_decrease_allowance_happy_flow(
token_contract: StarknetContract, uint256: Callable
):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(int(APPROVE_AMOUNT / 2))
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT - int(APPROVE_AMOUNT / 2)),)
@pytest.mark.asyncio
async def test_permissioned_mint_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_mint_zero_recipient(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.permissionedMint(recipient=0, amount=uint256(MINT_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_mint_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_total_supply_out_of_range(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND - initial_total_supply)
with pytest.raises(StarkException, match=f"assert \(is_overflow\) = 0"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=amount
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(MINT_AMOUNT),)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT),)
@pytest.mark.asyncio
async def test_permissioned_burn_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_burn_zero_account(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(account\)"):
await token_contract.permissionedBurn(account=0, amount=uint256(BURN_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_burn_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match=f"assert_not_zero\(enough_balance\)"):
await token_contract.permissionedBurn(account=initialized_account, amount=amount).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=initialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
expected_balance = uint256(initial_balances[initialized_account] + MINT_AMOUNT - BURN_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT - BURN_AMOUNT),)
| 40.885437 | 100 | 0.772226 | [
"Apache-2.0"
] | starkware-libs/starkgate-contracts | src/starkware/starknet/apps/starkgate/cairo/token_test.py | 21,056 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2018_08_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, **kwargs):
super(SubnetAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = kwargs.get('security_rules', None)
| 31.756098 | 77 | 0.596774 | [
"MIT"
] | 00Kai0/azure-cli-extensions | src/virtual-network-tap/azext_vnettap/vendored_sdks/v2018_08_01/models/subnet_association.py | 1,302 | Python |
from flask import render_template, url_for, request, flash, redirect, make_response
import email
from app import app
from werkzeug.utils import secure_filename
from app.predict_email import Prediction
import tempfile
predict_email = Prediction()
def parse_email(email_raw):
parser = email.parser.BytesParser()
email_parsed = parser.parse(email_raw)
return email_parsed
@app.route("/")
def home():
return render_template("home.html")
@app.route("/predict", methods=["POST", "GET"])
def predict():
if request.method == "POST":
email_raw = request.files["email_raw"]
if email_raw.filename != "":
temp_name = next(tempfile._get_candidate_names())
with open(f"./app/data/uploads/{temp_name}.eml", "wb") as f:
f.write(email_raw.read())
spam,prediction = predict_email.predict_emails([f"./app/data/uploads/{temp_name}.eml"])
# email_parsed = parse_email(email_raw)
# print(email["subject"])
# Features = prepData(textData)
# prediction = int((np.asscalar(loaded_model.predict(Features))) * 100)
if spam:
page = "spam.html"
score = int(round(prediction[0][1]*100))
else:
page = "ham.html"
score = int(round(prediction[0][0]*100))
r = make_response(render_template(page, prediction=score))
r.headers.add('Access-Control-Allow-Origin', '*')
r.headers.add('Access-Control-Expose-Headers', 'Content-Disposition')
return r
else:
return render_template("home.html")
else:
return render_template("home.html")
@app.route("/predict2")
def predict2():
return render_template("ham.html")
# @app.route("/predict", methods=["POST"])
# def predict():
# df = pd.read_csv("spam.csv", encoding="latin-1")
# df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1, inplace=True)
# # Features and Labels
# df["label"] = df["class"].map({"ham": 0, "spam": 1})
# X = df["message"]
# y = df["label"]
# # Extract Feature With CountVectorizer
# cv = CountVectorizer()
# X = cv.fit_transform(X) # Fit the Data
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.33, random_state=42
# )
# # Naive Bayes Classifier
# from sklearn.naive_bayes import MultinomialNB
# clf = MultinomialNB()
# clf.fit(X_train, y_train)
# clf.score(X_test, y_test)
# # Alternative Usage of Saved Model
# # joblib.dump(clf, 'NB_spam_model.pkl')
# # NB_spam_model = open('NB_spam_model.pkl','rb')
# # clf = joblib.load(NB_spam_model)
# if request.method == "POST":
# message = request.form["message"]
# data = [message]
# vect = cv.transform(data).toarray()
# my_prediction = clf.predict(vect)
# return render_template("result.html", prediction=my_prediction)
| 32.157895 | 99 | 0.614403 | [
"MIT"
] | konstantingoretzki/spamdetection-web | app/routes.py | 3,055 | Python |
class ParticleData(object):
""" Class for holding particle data such as charge.
"""
def __init__(self, charge=0):
self.charge=charge
def __repr__(self):
return "charge="+str(self.charge)
class ParticleDataList(object):
""" Class for generic handling particle ids, names and properties.
Multiple ids can be mapped to multiple names of particle.
First name/id in the list is the default name. But additional names/ids can be given.
An examples can be found in the defaultParticleDataList.
"""
def __init__(self, list=None):
""" A list of particle ids and names can be given to the constructor.
"""
self._list = []
if list != None:
self._list = list
def setList(self, list):
self._list = list
def getList(self):
return self._list
def addParticle(self, ids, names, particleData):
""" Add a paricle with (multiple) ids and names to the list.
"""
if not (isinstance(ids,list) and isinstance(names,list)):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
def getDefaultName(self, name):
""" Return the default (first in list) name given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[1][0]
return name
def getDefaultId(self, id):
""" Return the default (first in list) id given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[0][0]
return id
def getIdFromName(self, name):
""" Return the default (first in list) id given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[0][0]
return 0
def getNameFromId(self, id):
""" Return the default (first in list) name given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[1][0]
return "unknown"
def getParticleDataFromId(self, id):
for items in self._list:
if id in items[0]:
return items[2]
def isQuarkId(self, id):
return abs(id) in [1, 2, 3, 4, 5, 6]
def isLeptonId(self, id):
return abs(id) in [11, 12, 13, 14, 15, 16]
def isGluonId(self, id):
return abs(id) in [21, 9]
def isBosonId(self, id):
return abs(id) in [21, 9, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37]
def isPhotonId(self, id):
return id == 22
def isHiggsId(self, id):
return abs(id) in [25, 35, 36, 37]
def isSusyId(self, id):
return abs(id) in [1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000013, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039]
defaultQuarkDataList = ParticleDataList([
([1, - 1], ["d", "d_quark", "dbar"], ParticleData(1.0/3.0)),
([2, - 2], ["u", "u_quark", "ubar"], ParticleData(2.0/3.0)),
([3, - 3], ["s", "s_quark", "sbar"], ParticleData(1.0/3.0)),
([4, - 4], ["c", "c_quark", "cbar"], ParticleData(2.0/3.0)),
([5, - 5], ["b", "b_quark", "bbar"], ParticleData(1.0/3.0)),
([6, - 6], ["t", "t_quark", "tbar"], ParticleData(2.0/3.0))
])
defaultLeptonDataList = ParticleDataList([
([11, - 11], ["e","electron", "Electron", "e+", "e-"], ParticleData(1)),
([12, - 12], ["nu_e", "Electron_neutrino", "electron_neutrino", "nu_electron"], ParticleData(0)),
([13, - 13], ["mu", "Muon", "muon", "mu+", "mu-"], ParticleData(1)),
([14, - 14], ["nu_mu", "nu_muon", "Muon_neutrino", "muon_neutrino"], ParticleData(0)),
([15, - 15], ["tau", "Tau", "tau+", "tau-"], ParticleData(1)),
([16, - 16], ["nu_tau", "Tau_neutrino", "tau_neutrino"], ParticleData(0))
])
defaultBosonDataList = ParticleDataList([
([21, 9], ["g", "Gluon", "gluon"], ParticleData(0)),
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(0))
])
defaultHadronDataList = ParticleDataList([
([111], ["pi0", "Pi0"], ParticleData(0)),
([112], ["pi+", "Pi+"], ParticleData(1)),
([221], ["eta", "Eta"], ParticleData(0)),
([130], ["K0_L"], ParticleData(0)),
([310], ["K0_S"], ParticleData(0)),
([311], ["K0"], ParticleData(0)),
([321], ["K+"], ParticleData(1)),
([411], ["D0"], ParticleData(0)),
([421], ["D+"], ParticleData(1)),
([511], ["B0"], ParticleData(0)),
([521], ["B+"], ParticleData(1)),
([2212], ["p","Proton","proton"], ParticleData(1)),
([2112], ["n","Neutron","neutron"], ParticleData(0)),
([2224], ["Delta++"], ParticleData(2)),
([2214], ["Delta+"], ParticleData(1)),
([2114], ["Delta0"], ParticleData(0)),
([1114], ["Delta-"], ParticleData(1))
])
defaultExtensionDataList = ParticleDataList([
([32], ["Z'", "Z_prime"], ParticleData(0)),
([33], ["Z''", "Z_primeprime"], ParticleData(0)),
([34, - 34], ["W'", "W_prime", "W'+", "W'-"], ParticleData(1)),
([37, - 37], ["H+", "Charged_Higgs", "H+", "H-"], ParticleData(1)),
([35], ["H0", "Neutral_Higgs_H", "H"], ParticleData(0)),
([36], ["A0", "Neutral_Higgs_A", "A"], ParticleData(0))
])
defaultSusyDataList = ParticleDataList([
([1000001, - 1000001], ["d_squark_L", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([1000002, - 1000002], ["u_squark_L", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([1000003, - 1000003], ["s_squark_L", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([1000004, - 1000004], ["c_squark_L", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([1000005, - 1000005], ["sbottom_L", "b~_1", "b~_1_bar"], ParticleData(1.0/3.0)),
([1000006, - 1000006], ["stop_L", "t~_1", "t~_1_bar"], ParticleData(2.0/3.0)),
([1000011, - 1000011], ["Selectron_L", "selectron_L", "e~_L", "e~_L+", "e~_L-"], ParticleData(1)),
([1000012, - 1000012], ["Electron_sneutrino", "electron_sneutrino", "nu~_e_L"], ParticleData(0)),
([1000013, - 1000013], ["Smuon_L", "smuon_L", "mu~_L", "mu~_L+", "mu~_L-"], ParticleData(1)),
([1000014, - 1000014], ["Muon_sneutrino", "muon_sneutrino", "nu~_mu_L"], ParticleData(0)),
([1000015, - 1000015], ["Stau_1", "stau_1", "tau~_1+", "tau~_1-"], ParticleData(1)),
([1000016, - 1000016], ["Tau_sneutrino", "tau_sneutrino", "nu~_tau_L"], ParticleData(0)),
([2000001, - 2000001], ["d_squark_R", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([2000002, - 2000002], ["u_squark_R", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([2000003, - 2000003], ["s_squark_R", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([2000004, - 2000004], ["c_squark_R", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([2000005, - 2000005], ["sbottom_R", "b~_2", "b~_2_bar"], ParticleData(1.0/3.0)),
([2000006, - 2000006], ["stop_R", "t~_2", "t~_2_bar"], ParticleData(2.0/3.0)),
([2000011, - 2000011], ["Selectron_R", "selectron_R", "e~_R", "e~_R+", "e~_R-"], ParticleData(1)),
([1000013, - 1000013], ["Smuon_R", "smuon_R", "mu~_L", "mu~_R+", "mu~_R-"], ParticleData(1)),
([1000015, - 1000015], ["Stau_2", "stau_2", "tau~_2+", "tau~_2 -"], ParticleData(1)),
([1000021], ["Gluino", "gluino", "g~"], ParticleData(0)),
([1000022, - 1000022], ["Neutralino_1", "neutralino_1", "chi~_1"], ParticleData(0)),
([1000023, - 1000023], ["Neutralino_2", "neutralino_2", "chi~_2"], ParticleData(0)),
([1000025, - 1000025], ["Neutralino_3", "neutralino_3", "chi~_3"], ParticleData(0)),
([1000035, - 1000035], ["Neutralino_4", "neutralino4", "chi~_4"], ParticleData(0)),
([1000024, - 1000024], ["Chargino_1", "chargino_1", "chi~_1+", "chi~_1-"], ParticleData(1)),
([1000037, - 1000037], ["Chargino_2", "chargino_2", "chi~_2+", "chi~_2-"], ParticleData(1)),
([1000039], ["Gravitino", "gravitino", "G"], ParticleData(0))
])
defaultParticleDataList = ParticleDataList(
defaultQuarkDataList.getList() +
defaultLeptonDataList.getList() +
defaultBosonDataList.getList() +
defaultHadronDataList.getList() +
defaultExtensionDataList.getList() +
defaultSusyDataList.getList())
partonParticleDataList = ParticleDataList([
([1, - 1, 2, - 2, 3, - 3, 4, - 4, 21, 9], ["parton", "d", "dbar", "u", "ubar", "s", "sbar", "c", "cbar", "b", "bbar", "t", "tbar", "gluon", "g"], ParticleData())
] +
defaultLeptonDataList.getList() + [
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(1))
])
| 42.921951 | 278 | 0.585521 | [
"Apache-2.0"
] | 7quantumphysics/cmssw | FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py | 8,799 | Python |
import torch
from torch import nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import random
import numpy as np
import scipy as sp
import gurobipy as gp
from qpthlocal.qp import QPFunction
from qpthlocal.qp import QPSolvers
from qpthlocal.qp import make_gurobi_model
import pickle
import sys
import datetime
from collections import defaultdict
import math
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import logging
import datetime
import time
from collections import defaultdict
from sklearn.metrics import mean_squared_error as mse
from scipy.special import expit, logit
import copy
sys.path.insert(0,'../Interior/')
sys.path.insert(0,'../..')
# from ip_model import *
from ip_model_whole import *
from remove_redundancy import _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense
from sgd_learner import *
import pandas as pd
def bceloss(inputs,target):
return -(np.log(1-expit(inputs)) + target*inputs).mean()
def _remove_redundant_rows (A_eq):
# remove redundant (linearly dependent) rows from equality constraints
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
# if (sps.issparse(A_eq)):
# if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
# A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)
# if A_eq.shape[0] < n_rows_A:
# warn(redundancy_warning, OptimizeWarning, stacklevel=1)
# if status != 0:
# complete = True
# return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,
# x, x0, undo, complete, status, message)
# This is a wild guess for which redundancy removal algorithm will be
# faster. More testing would be good.
small_nullspace = 5
if A_eq.size > 0:
try: # TODO: instead use results of first SVD in _remove_redundancy
rank = np.linalg.matrix_rank(A_eq)
except Exception: # oh well, we'll have to go with _remove_redundancy_dense
rank = 0
if A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
d_removed, status, message = _remove_redundancy(A_eq)
if dim_row_nullspace > small_nullspace :
d_removed, status, message = _remove_redundancy_dense(A_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate sparse presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return d_removed
def get_loss(net,A, X, y,instances):
net.eval()
rslt = []
c_pred = net(torch.from_numpy(X).float()).squeeze().detach().numpy()
c = y
for k,v in instances.items():
source, destination = v
b = np.zeros(len(A))
b [source] =1
b[destination ]=-1
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape=A.shape[1], vtype=gp.GRB.BINARY, name="x")
model.setObjective(c_pred @x, gp.GRB.MINIMIZE)
model.addConstr(A @ x == b, name="eq")
model.optimize()
if model.status ==2:
sol =x.X
rslt.append( c.dot(sol))
else:
print(model.status, k,v)
net.train()
return mse(c_pred,c), sum(rslt)
def validation_module(net,A, X,y, training_instances,validation_instances, test_instances,time,
epoch,subepoch,**kwargs):
# return bceloss(c_pred,c), sum(rslt)
dict_validation = {}
losses_test = get_loss(net, A, X,y,test_instances)
dict_validation['test_prediction_loss'] = losses_test[0]
dict_validation['test_task_loss'] = losses_test[1]
losses_train = get_loss(net, A, X,y,training_instances)
dict_validation['train_prediction_loss'] = losses_train[0]
dict_validation['train_task_loss'] = losses_train[1]
losses_validation = get_loss(net, A, X,y,validation_instances)
dict_validation['validation_prediction_loss'] = losses_validation[0]
dict_validation['validation_task_loss'] = losses_validation[1]
dict_validation['batch'] = subepoch
dict_validation['epoch'] = epoch
dict_validation['time'] = time
return dict_validation
def make_fc(num_layers, num_features, num_targets=1,
activation_fn = nn.ReLU,intermediate_size=50, regularizers = True):
net_layers = [nn.Linear(num_features, intermediate_size),
activation_fn()]
for hidden in range(num_layers-2):
net_layers.append(nn.Linear(intermediate_size, intermediate_size))
net_layers.append(activation_fn())
net_layers.append(nn.Linear(intermediate_size, num_targets))
net_layers.append(nn.ReLU())
return nn.Sequential(*net_layers)
class two_stage_matching:
def __init__(self,A,num_features, num_layers, intermediate_size,
activation_fn = nn.ReLU, num_instance=1,
epochs=10,batchsize= 256, optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.batchsize = batchsize
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
indexes = np.arange(n_train)
loss_fn = nn.MSELoss()# nn.KLDivLoss(reduction='batchmean')
for e in range(self.epochs):
start_time = time.time()
np.random.shuffle(indexes)
num_batches = len(indexes) //(self.batchsize)
bi = 0#batch-index
for b in range(num_batches):
self.optimizer.zero_grad()
X_np = X[indexes[bi:(bi+self.batchsize)]]
y_np = y[indexes[bi:(bi+self.batchsize)]]
bi += self.batchsize
X_torch = torch.from_numpy(X_np).float()
y_torch = torch.from_numpy(y_np).float()
c_pred = self.net(X_torch).squeeze()
loss = loss_fn(c_pred,y_torch)
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances, test_instances,time_,e,b))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class qptl:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
gamma=1e-5,validation=False,
**hyperparams):
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.A = A
self.num_instance = num_instance
self.epochs = epochs
self.optimizer = optimizer
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
self.gamma= gamma
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
logging.info("training started")
# rows_to_be_removed = _remove_redundant_rows(self.A)
# A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
A_torch = torch.from_numpy(self.A).float()
Q_torch = self.gamma*torch.eye(A_torch.shape[1])
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
G_torch = -1*torch.eye(A_torch.shape[1])
h_torch = torch.zeros(A_torch.shape[1])
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
# b = np.zeros(len(self.A))
# b[source] =1
# b[dest ]=-1
# b= np.delete(b, rows_to_be_removed)
# b_torch = torch.from_numpy(b).float()
b_torch = torch.zeros(len(self.A))
b_torch[source] =1
b_torch[dest ]=-1
model_params_quad = make_gurobi_model(G_torch.detach().numpy(),
h_torch.detach().numpy(),A_torch.detach().numpy(),
b_torch.detach().numpy(), Q_torch.detach().numpy())
# model_params_quad = make_gurobi_model(None,None,
# A_torch.detach().numpy(),
# b_torch.detach().numpy(), Q_torch.detach().numpy())
c_pred = self.net(X_torch)
if any(torch.isnan(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** nan in param c_pred ")
if any(torch.isinf(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** inf in param c_pred ")
logging.info("shapes c {} A {} b {} G {} h {} Q {}".format(c_pred.shape,
A_torch.shape,b_torch.shape,G_torch.shape,h_torch.shape,
Q_torch.shape ))
x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
c_pred.squeeze(),G_torch.expand(1, *G_torch.shape),
h_torch.expand(1, *h_torch.shape),
A_torch.expand(1, *A_torch.shape),
b_torch.expand(1, *b_torch.shape))
# x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
# model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
# c_pred.squeeze(),torch.Tensor(),
# torch.Tensor(),
# A_torch.expand(1, *A_torch.shape),
# b_torch.expand(1, *b_torch.shape))
c_pred.retain_grad()
loss = (y_torch*x).mean()
loss.backward()
c_grad = copy.deepcopy(c_pred.grad)
if any(torch.isnan(torch.flatten(c_grad)).tolist()):
logging.info("**Alert** nan in param c_grad ")
self.optimizer.step()
# logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class intopt:
def __init__(self,A, num_features, num_layers, intermediate_size,
num_instance= 1,activation_fn = nn.ReLU,epochs=10,optimizer=optim.Adam,
method=1,max_iter=100,smoothing=False,thr = None,mu0=None,full_row_rank=True,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.num_instance = num_instance
self.method = method
self.epochs = epochs
self.method = method
self.optimizer = optimizer
self.max_iter = max_iter
self.smoothing = smoothing
self.thr = thr
self.mu0 = mu0
self.validation = validation
self.full_row_rank = full_row_rank
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
# model = gp.Model()
# model.setParam('OutputFlag', 0)
# x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
if self.full_row_rank:
rows_to_be_removed = _remove_redundant_rows(self.A)
A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
else:
A_torch = torch.from_numpy(self.A).float()
logging.info("shape of A {} shape of A-torch {}".format(self.A.shape,A_torch.shape))
# A_ = np.delete(A_, rows_to_be_removed, axis=0)
# b_ = np.delete(b_, rows_to_be_removed)
# A_torch = torch.from_numpy(self.A).float()
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
if self.full_row_rank:
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
b= np.delete(b, rows_to_be_removed)
b_torch = torch.from_numpy(b).float()
else:
b_torch = torch.zeros(len(self.A))
b_torch[source] = 1
b_torch[dest] = -1
c_pred = self.net(X_torch).squeeze()
x = IPOfunc(A_torch,b_torch,torch.Tensor(),torch.Tensor(),
bounds= [(0., None)],
max_iter=self.max_iter,mu0 = self.mu0,
thr=self.thr,method = self.method,
smoothing=self.smoothing)(c_pred)
loss = (y_torch*x).mean()
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0) :
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class SPO:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.num_instance = num_instance
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
true_solution ={}
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
if i not in true_solution:
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((y_torch.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
x_true = x.X
true_solution[i] = np.copy(x_true)
x_true = true_solution[i]
c_pred = self.net(X_torch).squeeze()
c_spo = (2*c_pred - y_torch)
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((c_spo.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
#print(model.status)
x_spo = x.X
grad = torch.from_numpy( x_true - x_spo).float()
loss = self.net(X_torch).squeeze()
loss.backward(gradient=grad)
self.optimizer.step()
logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
# print(validation_module(self.net,self.A,
# X,y,train_instances,validation_instances,
# test_instances,time_,e,i))
# pred = self.predict(X)
# print(mse(pred,y))
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze() | 34.463028 | 101 | 0.678161 | [
"MIT"
] | JayMan91/NeurIPSIntopt | shortespath/shortespath.py | 19,575 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two divid nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import DiviTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (DiviTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| 40.903614 | 112 | 0.706922 | [
"MIT"
] | gingfinger/divi99 | test/functional/wallet_zapwallettxes.py | 3,395 | Python |
"""
Filename: RobotsParser.py
Author: Maxwell Goldberg
Last modified: 06.09.17
Description: Helper class for parsing individual robots.txt records.
"""
# CONSTANTS
from constants import RECORD_MAX_LEN
# PYTHON BUILTINS
import re, unicodedata, logging
def test_ctrl_chars(s):
return len(s) != len("".join(ch for ch in s if unicodedata.category(ch)[0]!="C"))
class RobotsParser:
valid_fields = [u'user-agent', u'allow', u'disallow']
def __init__(self, record=None):
if record is None:
raise TypeError('Parameter record must not be NoneType')
if not isinstance(record, unicode):
raise TypeError('Parameter record must be a Unicode string')
if len(record) > RECORD_MAX_LEN:
raise ValueError('Parameter record exceeds maximum record num characters')
self.record = record
def parse_field(self, field):
field = field.strip().lower()
if field not in RobotsParser.valid_fields:
raise ValueError('Record contains invalid field')
return field
def parse_path(self, path):
path = path.strip()
if test_ctrl_chars(path):
raise ValueError('Record path contains control characters')
# Get path length prior to parsing
self.init_path_len = len(path)
path = re.escape(path)
path = path.replace('\\*', '.*').replace('\\$', '$')
return path
def parse(self):
# Attempt to separate a record by a colon delimiter.
record_list = self.record.split('#')[0]
record_list = record_list.split(':', 1)
if len(record_list) <= 1:
raise ValueError('Record must contain a delimiter')
if len(record_list) > 2:
raise ValueError('Record contains too many delimited fields')
# Parse the field
self.field = self.parse_field(record_list[0])
# Parse the path
self.path = self.parse_path(record_list[1]) | 30.034483 | 82 | 0.721584 | [
"MIT"
] | wilsonsk/Node-React-Python-D3-Crawler-App | crawler/lib/RobotsParser.py | 1,742 | Python |
# -*- coding: utf-8 -*-
# @Time : 2019/5/11 15:12
# @Author : LegenDong
# @User : legendong
# @File : __init__.py.py
# @Software: PyCharm
from .channel_attention_layer import *
from .nan_attention_layer import *
| 22.3 | 38 | 0.654709 | [
"MIT"
] | LegenDong/IQIYI_VID_FACE_2019 | models/layer/__init__.py | 223 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.125 | 78 | 0.690476 | [
"Apache-2.0"
] | serverlessplus/django-example | manage.py | 546 | Python |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import src.proto.predict_pb2 as predict__pb2
class PredictionServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Predict = channel.unary_unary(
"/onnxruntime.server.PredictionService/Predict",
request_serializer=predict__pb2.PredictRequest.SerializeToString,
response_deserializer=predict__pb2.PredictResponse.FromString,
)
class PredictionServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Predict(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PredictionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"Predict": grpc.unary_unary_rpc_method_handler(
servicer.Predict,
request_deserializer=predict__pb2.PredictRequest.FromString,
response_serializer=predict__pb2.PredictResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler("onnxruntime.server.PredictionService", rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PredictionService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Predict(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/onnxruntime.server.PredictionService/Predict",
predict__pb2.PredictRequest.SerializeToString,
predict__pb2.PredictResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 32.766234 | 119 | 0.67063 | [
"MIT"
] | akiueno/ml-system-in-actions | chapter2_training/cifar10/evaluate/src/proto/prediction_service_pb2_grpc.py | 2,523 | Python |
#Importing Libraries
import os
import csv
import sys, getopt
import uuid
import SimpleITK as sitk
import cv2
import numpy as np
import tensorflow as tf
from flask import Flask, flash, request, redirect, render_template
from flask import jsonify
from flask import send_from_directory
from flask_materialize import Material
from tensorflow.python.keras.backend import set_session
from werkzeug.utils import secure_filename
import shutil
import nibabel as nib
import pandas as pd
import numpy
from sarcopenia_ai.apps.segmentation.segloader import preprocess_test_image
from sarcopenia_ai.apps.server import settings
from sarcopenia_ai.apps.slice_detection.predict import parse_inputs, to256
from sarcopenia_ai.apps.slice_detection.utils import decode_slice_detection_prediction, \
preprocess_sitk_image_for_slice_detection, adjust_detected_position_spacing, place_line_on_img
from sarcopenia_ai.core.model_wrapper import BaseModelWrapper
from sarcopenia_ai.io import load_image
from sarcopenia_ai.preprocessing.preprocessing import blend2d
from sarcopenia_ai.utils import compute_muscle_area, compute_muscle_attenuation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
graph = tf.get_default_graph()
import cv2
import numpy as np
def normalise_zero_one(image, eps=1e-8):
print("Here 1")
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) - np.min(image) + eps)
return ret
def normalise_one_one(image):
print("Here 2")
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def preprocess_test_image(image):
print("Here")
#image = normalise_one_one(image, -250, 250)
image = normalise_one_one(image)
return image
##################
def find_max(img):
return np.unravel_index(np.argmax(img, axis=None), img.shape)[0]
#Read arguments
#############################
import argparse
msg = "Adding description"
# Initialize parser
parser = argparse.ArgumentParser(description = msg)
# Reading the input arguments
parser.add_argument("-i", "--Input", help = "Input file or folder")
parser.add_argument('-test_name', type=str, default='Test')
# Read arguments from command line
args = parser.parse_args()
path = args.Input
test_name = args.test_name
#Creating the result structure variables
main = os.getcwd()
directory = os.path.join(main+'/NII_Data/'+path)
if not os.path.exists(main+'/Results/'+path+"/"):
os.mkdir(main+'/Results/'+path+'/')
out = os.path.join(main+'/Results/'+path+"/"+test_name+'/')
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
if not os.path.exists(out):
os.mkdir(out)
out_yes = os.path.join(out+'/Yes')
if not os.path.exists(out_yes):
os.mkdir(out_yes)
out_no = os.path.join(out+'/No')
if not os.path.exists(out_no):
os.mkdir(out_no)
out_rev = os.path.join(out+'/Review/')
if not os.path.exists(out_rev):
os.mkdir(out_rev)
out_csv = os.path.join(out+'/Pred CSVs/')
if not os.path.exists(out_csv):
os.mkdir(out_csv)
#Load the sarcopenia-ai models
#set_session(sess)
model_wrapper = BaseModelWrapper(settings.SLICE_DETECTION_MODEL_PATH)
model_wrapper.setup_model()
global slice_detection_model
slice_detection_model= model_wrapper.model
slice_detection_model._make_predict_function()
global segmentation_model
model_wrapper = BaseModelWrapper(settings.SEGMENTATION_MODEL_PATH)
model_wrapper.setup_model()
segmentation_model = model_wrapper.model
segmentation_model._make_predict_function()
####Updated functions to replace older versions listed in the sarcopenia-ai enviroment
#Previous research indicates adjusting the HU range can help bone appear better
def reduce_hu_intensity_range(img, minv=100, maxv=1500):
img = np.clip(img, minv, maxv)
img = 255 * normalise_zero_one(img)
return img
#Setting up the output file name & Prediction counter
pred_id = 0
cols = ['Folder_Path','Patient_Folder','Study_Folder','Serie_Folder','L3_detection','L3_position','Total_slices','Confidence','Slice_Thickness', 'Orientation']
lst = []
#Looping through the input folder and analyzing the images
for folder in os.listdir(directory):
#Patient Folder
if(folder=='.DS_Store'):
continue
#Study Folder
for sub_folder in os.listdir(directory+"/"+folder):
if(sub_folder=='.DS_Store'):
continue
#Series Folder
for sub_sub_folder in os.listdir(directory+"/"+folder+"/"+sub_folder):
#Image Level
for file in os.listdir(directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder):
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
#print(file)
if(file.endswith(".nii.gz") or file.endswith(".nii")):
print("Processing file: "+file)
try:
if(sub_sub_folder=='.DS_Store'):
continue
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
image_path = directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder+"/"+file
prob_threshold_U=settings.THRESHOLD_U
prob_threshold_L=settings.THRESHOLD_L
#Gathering image name
import ntpath
head, tail = ntpath.split(image_path)
image_name = tail or ntpath.basename(head)
pred_id = pred_id +1
print("ID --> "+str(pred_id))
results = {"success": False, "prediction": {'id': pred_id}}
sitk_image, _ = load_image(image_path)
print("-----------------------------image path: "+image_path )
#The code is not set up to analyze 4 dimensional data.
if len(sitk_image.GetSize()) == 4:
print("-------- 4D Image: Grabbing only first volume")
sitk_image = sitk_image[:, :, :, 0]
#Getting image orientation information for output file.
print('-------------- NIB')
nib_image = nib.load(image_path)
orient_nib=nib.orientations.aff2axcodes(nib_image.affine)
print('-------------- Preprocess')
#Preprocessing the image
image2d, image2d_preview= preprocess_sitk_image_for_slice_detection(sitk_image)
image3d = sitk.GetArrayFromImage(sitk_image)
#print(image3d.shape)
#print(image2d.shape)
#print(image2d_preview.shape)
spacing = sitk_image.GetSpacing()
size = list(sitk_image.GetSize())
slice_thickness = spacing[2]
#Utilizing the sarcopenia-ai model to predict the L3 vertabrae
with graph.as_default():
set_session(sess)
preds = slice_detection_model.predict(image2d)
print('-------------- Predict')
#Processing the model output
pred_z, prob = decode_slice_detection_prediction(preds)
slice_z = adjust_detected_position_spacing(pred_z, spacing)
print('Prob: '+ str(prob))
print('Slice Z: ' +str(slice_z) )
print('{red_z: '+str(pred_z))
#Normalizing the prediction image to be within %28-%47 percent of the body
new_z_calculate = 0
new_pred_z = pred_z
new_slice_z = slice_z
new_prob = prob
print('-------------- Normalize')
if(slice_z < .27*size[2] or slice_z > .48*size[2]):
print("---------------------debug")
print(preds.shape)
print(preds.shape[1])
new_pred_z = find_max(preds[0, int(.27*preds.shape[1]):int(.48*preds.shape[1])])
new_pred_z = new_pred_z + int(.27*preds.shape[1]);
new_slice_z = adjust_detected_position_spacing(new_pred_z, spacing)
print("old position")
print(pred_z)
print(slice_z)
print("new position")
print(new_pred_z)
print(new_slice_z)
new_z_calculate =1;
new_prob = float(preds[0,new_pred_z])
## Outputting prediction data
print('-------------- Predict CSV')
preds_reshaped = preds.reshape(preds.shape[0], -1)
numpy.savetxt(out_csv+"PRED_"+str(pred_id)+".csv", preds_reshaped, delimiter=",")
#If the prediction for L3 is above the predifined threshold for acceptance
if (new_prob > prob_threshold_U):
print('-------------- Above')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'YES',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was not identified
elif (new_prob <= prob_threshold_L ):
print('-------------- No')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], -pred_z, -pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, -new_pred_z, r=1)
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'NO',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was identified but confidence requirements were not met.
else:
print('-------------- Review')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_SL_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(slice_image))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR_'+str(slice_z)+'_PROB_'+str(prob)+'.jpg', to256(image2dA))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR2_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'REVIEW',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images that error out (e.g. image orientation is incorrect)
except:
print('-------------- Wrong')
print('-------------- ')
print('-------------- ')
print("Something went wrong - File: "+image_path)
print("Unexpected error"+str(sys.exc_info()[0]))
output = [image_path,folder,sub_folder,sub_sub_folder,'Error','','','Something went wrong:'+str(sys.exc_info()[1]),'', orient_nib]
lst.append(output)
#Outputting the results dataset
df = pd.DataFrame(lst, columns=cols)
if not os.path.exists('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'):
os.mkdir('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/')
df.to_csv('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'+path+'_'+test_name+".csv")
print(' ')
print(' ')
print(' ')
print(' -------------- PROCESSING COMPLETE ------------------- ')
| 39.205556 | 159 | 0.540315 | [
"BSD-3-Clause"
] | kaylajanos1/TeamSpark-L3Detection | detection.py | 14,114 | Python |
# 20
num = 1
for i in range(100):
num *= i + 1
print(sum(int(n) for n in str(num)))
| 11.25 | 36 | 0.544444 | [
"MIT"
] | Martin-Gong/euler | python/p020.py | 90 | Python |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for ConvertLocationAndScaleParameters
"""
import unittest
import numpy as np
from scipy import stats
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
ConvertLocationAndScaleParameters as Plugin)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_valid_distribution(self):
"""Test for a valid distribution."""
plugin = Plugin(distribution="norm")
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
def test_valid_distribution_with_shape_parameters(self):
"""Test for a valid distribution with shape parameters."""
plugin = Plugin(distribution="truncnorm", shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
def test_invalid_distribution(self):
"""Test for an invalid distribution."""
msg = "The distribution requested"
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution="elephant")
class Test__repr__(IrisTest):
"""Test string representation of plugin."""
def test_basic(self):
"""Test string representation"""
expected_string = ("<ConvertLocationAndScaleParameters: "
"distribution: norm; shape_parameters: []>")
result = str(Plugin())
self.assertEqual(result, expected_string)
class Test__rescale_shape_parameters(IrisTest):
"""Test the _rescale_shape_parameters"""
def setUp(self):
"""Set up values for testing."""
self.location_parameter = np.array([-1, 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
def test_truncated_at_zero(self):
"""Test scaling shape parameters implying a truncation at zero."""
expected = [np.array([1., 0, -0.5]),
np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_discrete_shape_parameters(self):
"""Test scaling discrete shape parameters."""
expected = [np.array([-3, -2.666667, -2.5]), np.array([7, 4, 2.5])]
shape_parameters = [-4, 6]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_alternative_distribution(self):
"""Test specifying a distribution other than truncated normal. In
this instance, no rescaling is applied."""
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="norm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
def test_no_shape_parameters_exception(self):
"""Test raising an exception when shape parameters are not specified
for the truncated normal distribution."""
plugin = Plugin(distribution="truncnorm")
msg = "For the truncated normal distribution"
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
if __name__ == '__main__':
unittest.main()
| 41.679389 | 79 | 0.694139 | [
"BSD-3-Clause"
] | LaurenceBeard/improver | improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py | 5,460 | Python |
"""
Item class for Jaseci
Each item has an id, name, timestamp.
"""
from jaseci.element.element import element
class item(element):
"""Item class for Jaseci"""
def __init__(self, value=None, *args, **kwargs):
self.item_value = value
super().__init__(*args, **kwargs)
@property
def value(self):
return self.item_value
@value.setter
def value(self, val):
self.item_value = val
self.save()
def __str__(self):
if self.value:
return super().__str__() + f":{self.value}"
else:
return super().__str__() + ":None"
| 20.7 | 55 | 0.584541 | [
"MIT"
] | AshishMahendra/jaseci | jaseci_core/jaseci/attr/item.py | 621 | Python |
from __future__ import unicode_literals
from dvc.command.base import CmdBase
class CmdCheckout(CmdBase):
def run(self):
if not self.args.targets:
self.project.checkout(force=self.args.force)
else:
for target in self.args.targets:
self.project.checkout(
target=target, with_deps=self.args.with_deps, force=self.args.force
)
return 0
def add_parser(subparsers, parent_parser):
CHECKOUT_HELP = "Checkout data files from cache."
checkout_parser = subparsers.add_parser(
"checkout",
parents=[parent_parser],
description=CHECKOUT_HELP,
help=CHECKOUT_HELP,
)
checkout_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Checkout all dependencies of the specified target.",
)
checkout_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Do not prompt when removing working directory files.",
)
checkout_parser.add_argument("targets", nargs="*", help="DVC files.")
checkout_parser.set_defaults(func=CmdCheckout)
| 29.047619 | 87 | 0.62377 | [
"Apache-2.0"
] | yfarjoun/dvc | dvc/command/checkout.py | 1,220 | Python |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: [email protected]
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import time
import argparse
import importlib
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
import autotorch as at
import encoding
from encoding.nn import LabelSmoothing, NLLMultiLabelSmooth
from encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler, torch_dist_sum)
try:
import apex
from apex import amp
except ModuleNotFoundError:
print('please install amp if using float16 training')
class Options():
def __init__(self):
# data settings
parser = argparse.ArgumentParser(description='Deep Encoding')
parser.add_argument('--dataset', type=str, default='imagenet',
help='training dataset (default: imagenet)')
parser.add_argument('--base-size', type=int, default=None,
help='base image size')
parser.add_argument('--crop-size', type=int, default=224,
help='crop image size')
parser.add_argument('--label-smoothing', type=float, default=0.0,
help='label-smoothing (default eta: 0.0)')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup (default eta: 0.0)')
parser.add_argument('--auto-policy', type=str, default=None,
help='path to auto augment policy')
parser.add_argument('--data-dir', type=str, default=os.path.expanduser('~/.encoding/data'),
help='data location for training')
# model params
#parser.add_argument('--model', type=str, default='resnet50',
# help='network model type (default: densenet)')
parser.add_argument('--arch', type=str, default='regnet',
help='network type (default: regnet)')
parser.add_argument('--config-file', type=str, required=True,
help='network node config file')
parser.add_argument('--last-gamma', action='store_true', default=False,
help='whether to init gamma of the last BN layer in \
each bottleneck to 0 (default: False)')
# training params
parser.add_argument('--amp', action='store_true',
default=False, help='using amp')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=120, metavar='N',
help='number of epochs to train (default: 600)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='the epoch number to start (default: 1)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
# optimizer
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-scheduler', type=str, default='cos',
help='learning rate scheduler (default: cos)')
parser.add_argument('--warmup-epochs', type=int, default=0,
help='number of warmup epochs (default: 0)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4,
metavar ='M', help='SGD weight decay (default: 1e-4)')
parser.add_argument('--no-bn-wd', action='store_true',
default=False, help='no bias decay')
# seed
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default',
help='set the checkpoint name')
# distributed
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
# evaluation option
parser.add_argument('--eval', action='store_true', default= False,
help='evaluating')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def main():
args = Options().parse()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
args.lr = args.lr * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
# global variable
best_pred = 0.0
acclist_train = []
acclist_val = []
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
# model name for checkpoint
args.model = "{}-{}".format(args.arch, os.path.splitext(os.path.basename(args.config_file))[0])
if args.gpu == 0:
print('model:', args.model)
print('rank: {} / {}'.format(args.rank, args.world_size))
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.set_device(args.gpu)
# init the args
global best_pred, acclist_train, acclist_val
if args.gpu == 0:
print(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# init dataloader
transform_train, transform_val = encoding.transforms.get_transform(
args.dataset, args.base_size, args.crop_size)
if args.auto_policy is not None:
print(f'Using auto_policy: {args.auto_policy}')
from augment import Augmentation
auto_policy = Augmentation(at.load(args.auto_policy))
transform_train.transforms.insert(0, auto_policy)
trainset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_train, train=True, download=True)
valset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_val, train=False, download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler)
# init the model
arch = importlib.import_module('arch.' + args.arch)
model = arch.config_network(args.config_file)
if args.gpu == 0:
print(model)
if args.mixup > 0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)
criterion = NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
criterion = LabelSmoothing(args.label_smoothing)
else:
criterion = nn.CrossEntropyLoss()
model.cuda(args.gpu)
criterion.cuda(args.gpu)
# criterion and optimizer
if args.no_bn_wd:
parameters = model.named_parameters()
param_dict = {}
for k, v in parameters:
param_dict[k] = v
bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
if args.gpu == 0:
print(" Weight decay NOT applied to BN parameters ")
print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
{'params': rest_params, 'weight_decay': args.wd}],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
else:
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if args.amp:
#optimizer = amp_handle.wrap_optimizer(optimizer)
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
#from apex import amp
DDP = apex.parallel.DistributedDataParallel
model = DDP(model, delay_allreduce=True)
else:
DDP = DistributedDataParallel
model = DDP(model, device_ids=[args.gpu])
# check point
if args.resume is not None:
if os.path.isfile(args.resume):
if args.gpu == 0:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch
best_pred = checkpoint['best_pred']
acclist_train = checkpoint['acclist_train']
acclist_val = checkpoint['acclist_val']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
if args.gpu == 0:
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
raise RuntimeError ("=> no resume checkpoint found at '{}'".\
format(args.resume))
scheduler = LR_Scheduler(args.lr_scheduler,
base_lr=args.lr,
num_epochs=args.epochs,
iters_per_epoch=len(train_loader),
warmup_epochs=args.warmup_epochs)
def train(epoch):
train_sampler.set_epoch(epoch)
model.train()
losses = AverageMeter()
top1 = AverageMeter()
global best_pred, acclist_train
tic = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
scheduler(optimizer, batch_idx, epoch, best_pred)
if not args.mixup:
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if not args.mixup:
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], data.size(0))
losses.update(loss.item(), data.size(0))
if batch_idx % 100 == 0 and args.gpu == 0:
iter_per_sec = 100.0 / (time.time() - tic) if batch_idx != 0 else 1.0 / (time.time() - tic)
tic = time.time()
if args.mixup:
#print('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Train loss: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, losses.avg.item()))
else:
#print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Top1: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, top1.avg.item()))
acclist_train += [top1.avg]
def validate(epoch):
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
global best_pred, acclist_train, acclist_val
is_best = False
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
with torch.no_grad():
output = model(data)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# sum all
sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)
if args.eval:
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
return
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
# save checkpoint
acclist_val += [top1_acc]
if top1_acc > best_pred:
best_pred = top1_acc
is_best = True
state_dict = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}
if args.amp:
state_dict['amp'] = amp.state_dict()
encoding.utils.save_checkpoint(state_dict, args=args, is_best=is_best)
if args.export:
if args.gpu == 0:
torch.save(model.module.state_dict(), args.export + '.pth')
return
if args.eval:
validate(args.start_epoch)
return
for epoch in range(args.start_epoch, args.epochs):
tic = time.time()
train(epoch)
if epoch % 10 == 0:# or epoch == args.epochs-1:
validate(epoch)
elapsed = time.time() - tic
if args.gpu == 0:
print(f'Epoch: {epoch}, Time cost: {elapsed}')
if args.gpu == 0:
encoding.utils.save_checkpoint({
'epoch': args.epochs-1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}, args=args, is_best=False)
if __name__ == "__main__":
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
main()
| 44.312329 | 110 | 0.563992 | [
"MIT"
] | YanchengWang/RegNet-Search-PyTorch | train.py | 16,174 | Python |
"""
Implements harmonic_mean() function.
"""
from .mean import mean
def harmonic_mean(x):
"""
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =
\\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
# sum_of_reciprocals = sum(reciprocals[:])
return(1 / mean(reciprocals))
| 28.446809 | 81 | 0.617801 | [
"MIT"
] | sheriferson/simple-statistics-py | simplestatistics/statistics/harmonic_mean.py | 1,337 | Python |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# import logging
import json
import logging
import subprocess as sp
import boto3
import pytest
from assertpy import assert_that
from utils import get_root_volume_id
@pytest.mark.regions(["ap-southeast-1"])
@pytest.mark.instances(["c5.xlarge"])
@pytest.mark.oss(["alinux2"])
@pytest.mark.schedulers(["slurm", "awsbatch"])
@pytest.mark.usefixtures("region", "instance")
def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os):
"""
Verify tags from various sources are propagated to the expected resources.
The following resources are checked for tags:
- main CFN stack
- head node
- head node's root EBS volume
- compute node (traditional schedulers)
- compute node's root EBS volume (traditional schedulers)
- shared EBS volume
"""
config_file_tags = {"ConfigFileTag": "ConfigFileTagValue"}
version_tags = {"parallelcluster:version": get_pcluster_version()}
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
cluster_name_tags = {"parallelcluster:cluster-name": cluster.name}
test_cases = [
{
"resource": "Main CloudFormation Stack",
"tag_getter": get_main_stack_tags,
"expected_tags": (version_tags, config_file_tags),
},
{
"resource": "Head Node",
"tag_getter": get_head_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "HeadNode", "parallelcluster:node-type": "HeadNode"},
),
},
{
"resource": "Head Node Root Volume",
"tag_getter": get_head_node_root_volume_tags,
"expected_tags": (cluster_name_tags, {"parallelcluster:node-type": "HeadNode"}),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
},
{
"resource": "Compute Node",
"tag_getter": get_compute_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "Compute", "parallelcluster:node-type": "Compute"},
config_file_tags,
),
"skip": scheduler == "awsbatch",
},
{
"resource": "Compute Node Root Volume",
"tag_getter": get_compute_node_root_volume_tags,
"expected_tags": (
cluster_name_tags,
{"parallelcluster:node-type": "Compute"},
config_file_tags if scheduler == "slurm" else {},
),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
"skip": scheduler == "awsbatch",
},
{
"resource": "Shared EBS Volume",
"tag_getter": get_shared_volume_tags,
"expected_tags": (version_tags, config_file_tags),
},
]
for test_case in test_cases:
if test_case.get("skip"):
continue
logging.info("Verifying tags were propagated to %s", test_case.get("resource"))
tag_getter = test_case.get("tag_getter")
# Assume tag getters use lone cluster object arg if none explicitly given
tag_getter_args = test_case.get("tag_getter_kwargs", {"cluster": cluster})
observed_tags = tag_getter(**tag_getter_args)
expected_tags = test_case["expected_tags"]
assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags))
def convert_tags_dicts_to_tags_list(tags_dicts):
"""Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}]."""
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()])
return tags_list
def get_cloudformation_tags(region, stack_name):
"""
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
"""
cfn_client = boto3.client("cloudformation", region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response["Stacks"][0]["Tags"]
def get_main_stack_tags(cluster):
"""Return the tags for the cluster's main CFN stack."""
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
def get_head_node_instance_id(cluster):
"""Return the given cluster's head node's instance ID."""
return cluster.cfn_resources.get("HeadNode")
def get_ec2_instance_tags(instance_id, region):
"""Return a list of tags associated with the given EC2 instance."""
logging.info("Getting tags for instance %s", instance_id)
return (
boto3.client("ec2", region_name=region)
.describe_instances(InstanceIds=[instance_id])
.get("Reservations")[0]
.get("Instances")[0]
.get("Tags")
)
def get_tags_for_volume(volume_id, region):
"""Return the tags attached to the given EBS volume."""
logging.info("Getting tags for volume %s", volume_id)
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_head_node_root_volume_tags(cluster, os):
"""Return the given cluster's head node's root volume's tags."""
head_node_instance_id = get_head_node_instance_id(cluster)
root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_head_node_tags(cluster):
"""Return the given cluster's head node's tags."""
head_node_instance_id = get_head_node_instance_id(cluster)
return get_ec2_instance_tags(head_node_instance_id, cluster.region)
def get_compute_node_root_volume_tags(cluster, os):
"""Return the given cluster's compute node's root volume's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_compute_node_tags(cluster):
"""Return the given cluster's compute node's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
def get_ebs_volume_tags(volume_id, region):
"""Return the tags associated with the given EBS volume."""
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_shared_volume_tags(cluster):
"""Return the given cluster's EBS volume's tags."""
shared_volume = cluster.cfn_resources.get("EBS0")
return get_ebs_volume_tags(shared_volume, cluster.region)
def get_pcluster_version():
"""Return the installed version of the pclsuter CLI."""
return json.loads(sp.check_output("pcluster version".split()).decode().strip()).get("version")
| 38.313131 | 120 | 0.676246 | [
"Apache-2.0"
] | eshpc/aws-parallelcluster | tests/integration-tests/tests/tags/test_tag_propagation.py | 7,586 | Python |
from flask import Flask, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
from flask_mail import Mail
app = Flask(__name__)
# Configuration
app.config.from_object('config.DevelopmentConfig')
db = SQLAlchemy(app)
login_manager = LoginManager(app)
migrate = Migrate(app, db)
bcrypt = Bcrypt(app)
mail = Mail(app)
from app.auth.views import auth_blueprint
from app.admin.views import admin_blueprint
from app.user.views import user_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(admin_blueprint, url_prefix='/admin')
app.register_blueprint(user_blueprint, url_prefix='/user')
@app.route('/')
def root():
return(redirect(url_for('auth.login'))) | 28.678571 | 60 | 0.809465 | [
"MIT"
] | hazzillrodriguez/flask-user-management | app/__init__.py | 803 | Python |
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
| 38.146067 | 92 | 0.671281 | [
"MIT"
] | gmberton/CosPlace | commons.py | 3,395 | Python |
from bedlam import Game
from bedlam import Scene
from bedlam import Sprite
from balls import Ball
# __pragma__('skip')
document = window = Math = Date = console = 0 # Prevent complaints by optional static checker
# __pragma__('noskip')
# __pragma__('noalias', 'clear')
DEBUG = False
class PVector:
def __init__(self, xx=0, yy=0):
self.x = xx
self.y = yy
def __str__(self):
return "PVector({},{})".format(self.x, self.y)
def reset(self, xx, yy):
self.x = xx
self.y = yy
return self
def copy(self):
return PVector.Instance(self.x, self.y)
def add(self, v):
self.x = self.x + v.x
self.y = self.y + v.y
return self
def sub(self, v):
self.x = self.x - v.x
self.y = self.y - v.y
return self
def mult(self, mag):
self.x = self.x * mag
self.y = self.y * mag
return self
def div(self, mag):
self.x = self.x / mag
self.y = self.y / mag
return self
def normalize(self, mag=1.0):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
self.x = 0
self.y = 0
else:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def limit(self, mag):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
return
if d > mag:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def mag(self):
return Math.sqrt(self.x * self.x + self.y * self.y)
@classmethod
def Instance(cls, xx, yy):
if cls.pool is None:
cls.pool = []
cls.pool_max_size = 10
if len(cls.pool) == 0:
return PVector(xx, yy)
else:
v = cls.pool.pop()
v.x = xx
v.y = yy
return v
@classmethod
def Free(cls, pvector):
if len(cls.pool) < cls.pool_max_size:
cls.pool.append
class Boid(Sprite):
def __init__(self, game, w=10):
Sprite.__init__(self, game, w, w)
self.color = 'white'
self.x = self.game.canvas.width * Math.random()
self.y = self.game.canvas.height * Math.random()
angle = 2 * Math.PI * Math.random()
self.dx = self.game.speed * Math.cos(angle)
self.dy = self.game.speed * Math.sin(angle)
def is_close(self, sprite, dist):
return self.distance(sprite) + self.width / 2 + sprite.width / 2 <= dist
def distance(self, sprite):
vx = self.x - sprite.x
vy = self.y - sprite.y
self_radius = (self.width + self.height) / 2
sprite_radius = (sprite.width + sprite.height) / 2
dist = Math.sqrt(vx * vx + vy * vy) - (self_radius + sprite_radius)
return dist if dist >= 0 else 0
def draw(self, ctx):
global DEBUG
Sprite.draw(self, ctx)
angle = self._angle()
ctx.save()
ctx.globalCompositeOperation = 'source-over'
if DEBUG:
ctx.strokeStyle = '#808080'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.cohesion_radius, 0, 2 * Math.PI)
ctx.stroke()
ctx.strokeStyle = '#696969'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.separation_radius + self.width/2, 0, 2 * Math.PI)
ctx.stroke()
ctx.lineWidth = 2
ctx.strokeStyle = self.color
ctx.fillStyle = self.color
ctx.beginPath()
ctx.translate(self.x, self.y)
ctx.rotate(angle)
ctx.moveTo(-1 * self.width, -0.5 * self.width)
ctx.lineTo(self.width, 0)
ctx.lineTo(-1 * self.width, 0.5 * self.width)
ctx.lineTo(-1 * self.width, -0.5 * self.width)
ctx.translate(-1 * self.originX, -1 * self.originY)
ctx.fill()
ctx.stroke()
ctx.restore()
def _angle(self, a=0.0):
angle = Math.atan2(self.dy, self.dx) + a
while angle > 2 * Math.PI:
angle = angle - 2 * Math.PI
while angle < 0:
angle = angle + 2 * Math.PI
return angle
def _find(self, boid, dist, clazz=None):
return self.game.currentScene.find(boid, dist, clazz)
def update(self, delta_time):
global DEBUG
move = PVector.Instance(self.dx, self.dy)
allignment = self.__calc_allignment().mult(self.game.allignment_mult)
separation = self.__calc_separation().mult(self.game.separation_mult)
cohesion = self.__calc_cohesion().mult(self.game.cohesion_mult)
noise = self.__calc_random_noise().mult(self.game.noise_mult)
if DEBUG:
console.log('time={} : allign={} : avoid={} : noise={} : cohese={}'.format(delta_time, allignment.mag(),
separation.mag(), noise.mag(),
cohesion.mag()))
move.add(allignment)
move.add(separation)
move.add(cohesion)
move.add(noise)
move.limit(self.game.speed)
self.dx = move.x
self.dy = move.y
self.x = self.x + self.dx * delta_time / 1000.0
if self.x < 0:
self.x = self.x + self.game.canvas.width
elif self.x > self.game.canvas.width:
self.x = self.x - self.game.canvas.width
self.y = self.y + self.dy * delta_time / 1000.0
if self.y < 0:
self.y = self.y + self.game.canvas.height
elif self.y > self.game.canvas.height:
self.y = self.y - self.game.canvas.height
PVector.Free(move)
PVector.Free(allignment)
PVector.Free(separation)
PVector.Free(noise)
def __calc_allignment(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.allignment_radius, Boid):
d = self.distance(sprite)
if d == 0:
continue
copy = PVector.Instance(sprite.dx, sprite.dy)
copy.normalize()
copy.div(d)
steer.add(copy)
return steer
def __calc_separation(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.separation_radius, Sprite):
d = self.distance(sprite)
if d == 0:
continue
diff = PVector(self.x - sprite.x, self.y - sprite.y)
diff.normalize()
diff.div(d)
steer.add(diff)
return steer
def __calc_random_noise(self):
return PVector.Instance(Math.random() * 2 - 1, Math.random() * 2 - 1)
def __calc_cohesion(self):
steer = PVector.Instance(0, 0)
count = 0
for sprite in self._find(self, self.game.cohesion_radius, Boid):
steer.x = steer.x + sprite.x
steer.y = steer.y + sprite.y
count = count + 1
if count > 0:
steer.x = steer.x / count
steer.y = steer.y / count
steer.normalize(0.05)
return steer
class BoidsScene(Scene):
def __init__(self, game, name=None, num_boids=8, w=10):
Scene.__init__(self, game, name)
self.color = 'black'
for n in range(num_boids):
self.append(Boid(self.game, w))
for n in range(3):
self.append(Ball(self.game, 30, 10, 'green'))
for n in range(1):
self.append(Ball(self.game, 30, 20, 'red'))
def _clear_screen(self, ctx):
ctx.save()
ctx.globalCompositeOperation = 'copy'
ctx.fillStyle = self.color
ctx.fillRect(0, 0, self.game.canvas.width, self.game.canvas.height)
ctx.restore()
def find(self, boid, dist, clazz=None):
sprite_list = []
for sprite in self:
if clazz is not None and not isinstance(sprite, clazz):
continue
if sprite == boid:
continue
if boid.is_close(sprite, dist):
sprite_list.append(sprite)
return sprite_list
class BoidsGame(Game):
def __init__(self, name='Boids', loop_time=20):
Game.__init__(self, name, loop_time)
sprite_width = 5
global_scale = sprite_width / 10.0
self.speed = 100
self.allignment_radius = 180 * global_scale
self.separation_radius = 25 * global_scale
self.cohesion_radius = self.allignment_radius
self.allignment_mult = 3
self.separation_mult = 30
self.cohesion_mult = 25
self.noise_mult = 5
self.append(BoidsScene(self, 'BOIDS', 32, sprite_width))
@staticmethod
def set_debug(b):
global DEBUG
if b is not None and b == 'true':
DEBUG = True
| 31.44523 | 117 | 0.539049 | [
"BSD-2-Clause"
] | KeithRieck/bedlam | boids.py | 8,899 | Python |
# -*- coding: utf-8 -*-
from pandas import DataFrame
from pandas_ta.utils import get_offset, verify_series
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
"""Indicator: Donchian Channels (DC)"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
lower_length = int(lower_length) if lower_length and lower_length > 0 else 20
upper_length = int(upper_length) if upper_length and upper_length > 0 else 20
lower_min_periods = int(kwargs["lower_min_periods"]) if "lower_min_periods" in kwargs and kwargs["lower_min_periods"] is not None else lower_length
upper_min_periods = int(kwargs["upper_min_periods"]) if "upper_min_periods" in kwargs and kwargs["upper_min_periods"] is not None else upper_length
offset = get_offset(offset)
# Calculate Result
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = 0.5 * (lower + upper)
# Handle fills
if "fillna" in kwargs:
lower.fillna(kwargs["fillna"], inplace=True)
mid.fillna(kwargs["fillna"], inplace=True)
upper.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
lower.fillna(method=kwargs["fill_method"], inplace=True)
mid.fillna(method=kwargs["fill_method"], inplace=True)
upper.fillna(method=kwargs["fill_method"], inplace=True)
# Offset
if offset != 0:
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
# Name and Categorize it
lower.name = f"DCL_{lower_length}_{upper_length}"
mid.name = f"DCM_{lower_length}_{upper_length}"
upper.name = f"DCU_{lower_length}_{upper_length}"
mid.category = upper.category = lower.category = "volatility"
# Prepare DataFrame to return
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f"DC_{lower_length}_{upper_length}"
dcdf.category = mid.category
return dcdf
donchian.__doc__ = \
"""Donchian Channels (DC)
Donchian Channels are used to measure volatility, similar to
Bollinger Bands and Keltner Channels.
Sources:
https://www.tradingview.com/wiki/Donchian_Channels_(DC)
Calculation:
Default Inputs:
lower_length=upper_length=20
LOWER = low.rolling(lower_length).min()
UPPER = high.rolling(upper_length).max()
MID = 0.5 * (LOWER + UPPER)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
lower_length (int): The short period. Default: 20
upper_length (int): The short period. Default: 20
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: lower, mid, upper columns.
"""
| 35.361446 | 151 | 0.69983 | [
"MIT"
] | MyBourse/pandas-ta | pandas_ta/volatility/donchian.py | 2,935 | Python |
import unittest
from unittest.mock import Mock
# PyATS
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.asa.show_vpn import ShowVPNLoadBalancing
# ============================================
# unit test for 'show vpn load-balancing'
# =============================================
class TestShowVPNLoadBalancing(unittest.TestCase):
"""
unit test for show vpn load-balancing
"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_parsed_output = {
'cluster_ip': 'cluster1',
'encryption': 'Enabled',
'failover': 'n/a',
'peers': {
1: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.1*',
'role': 'Master',
},
2: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.2',
'role': 'Backup',
},
},
'peers_count': 1,
'role': 'Master',
'status': 'Enabled',
'total_license_load': {
1: {
'anyconnect_premium_essentials': {
'limit': 250,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 250,
'load': 1,
'used': 2,
},
'public_ip': '10.246.0.1*',
},
2: {
'anyconnect_premium_essentials': {
'limit': 0,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 0,
'load': 0,
'used': 0,
},
'public_ip': '10.246.0.2',
},
},
}
golden_output = {'execute.return_value': '''
vASA-VPN-20#show vpn load-balancing
--------------------------------------------------------------------------
Status Role Failover Encryption Peers Cluster IP
--------------------------------------------------------------------------
Enabled Master n/a Enabled 1 cluster1
Peers:
--------------------------------------------------------------------------
Role Pri Model Load-Balancing Version Public IP
--------------------------------------------------------------------------
Master 5 ASA-VASA 4 10.246.0.1*
Backup 5 ASA-VASA 4 10.246.0.2
Total License Load:
--------------------------------------------------------------------------
AnyConnect Premium/Essentials Other VPN Public IP
----------------------------- ---------------------
Limit Used Load Limit Used Load
--------------------------------------------------------------------------
250 0 0% 250 2 1% 10.246.0.1*
0 0 0% 0 0 0% 10.246.0.2
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowVPNLoadBalancing(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
route_obj = ShowVPNLoadBalancing(device=self.device)
parsed_output = route_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main() | 35.408696 | 82 | 0.370088 | [
"Apache-2.0"
] | IreneZhou0129/genieparser | src/genie/libs/parser/asa/tests/test_show_vpn.py | 4,072 | Python |
from django.shortcuts import render
from django.http import HttpResponse
def feedHome(request):
return HttpResponse('<p>Welcome To My Social App</p>') | 26.166667 | 59 | 0.770701 | [
"MIT"
] | njokuifeanyigerald/django-social-media-app | feed/views.py | 157 | Python |
# <editor-fold desc="Basic Imports">
import os
import os.path as p
import requests
from time import time
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Args">
prog_file_path = p.join(p.dirname(__file__), 'progress.txt')
relative_base_path = '../../base_indexes/USE_lite_base_IVF16K.index'
base_index_path = p.abspath(p.join(p.dirname(__file__), relative_base_path))
arp = ArgumentParser(description='Vectorize Sentences for Searchable Index.')
arp.add_argument('input_dir', help='Path to raw news dir.')
arp.add_argument('output_dir', help='Path to saved index dir.')
arp.add_argument('-p', '--progress_file', default=prog_file_path,
help='For keeping track of news that has been preprocessed. '
'Default: dig-text-similarity-search/progress.txt')
arp.add_argument('-b', '--base_index_path', default=base_index_path,
help='Path to pre-trained empty faiss index. '
'Default: dig-text-similarity-search/base_indexes/*.index')
arp.add_argument('-l', '--large', action='store_true',
help='Toggle large Universal Sentence Encoder (Transformer NN).')
arp.add_argument('-m', '--m_per_batch', type=int, default=512*128,
help='Sentences per batch.')
arp.add_argument('-n', '--n_per_minibatch', type=int, default=64,
help='Sentences per mini-batch.')
arp.add_argument('-v', '--verbose', action='store_true',
help='Shows progress of batch vectorization.')
arp.add_argument('-t', '--num_threads', default='2',
help='Set CPU thread budget for numpy.')
arp.add_argument('-d', '--no_delete', action='store_false', default=True,
help='Keeps faiss indexes for each batch after merging on-disk.')
arp.add_argument('-a', '--add_shard', action='store_true',
help='Adds shard to running similarity server.')
arp.add_argument('-u', '--url', default='http://localhost:5954/faiss',
help='Port handling similarity server.')
arp.add_argument('-T', '--TF_logging', action='store_false', default=True,
help='Increase verbosity of TensorFlow.')
opts = arp.parse_args()
# </editor-fold>
if opts.num_threads:
print(f'\nRestricting numpy to {opts.num_threads} thread(s)\n')
os.environ['OPENBLAS_NUM_THREADS'] = opts.num_threads
os.environ['NUMEXPR_NUM_THREADS'] = opts.num_threads
os.environ['MKL_NUM_THREADS'] = opts.num_threads
os.environ['OMP_NUM_THREADS'] = opts.num_threads
from dt_sim.data_reader.jl_io_funcs import check_all_docs, get_all_docs
from dt_sim.data_reader.misc_io_funcs import check_unique, clear_dir
from dt_sim.vectorizer.sentence_vectorizer import SentenceVectorizer
from dt_sim.indexer.index_builder import OnDiskIVFBuilder
from dt_sim.processor.corpus_processor import CorpusProcessor
# Suppress TF logging
if opts.TF_logging:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Init
sv = SentenceVectorizer(large=opts.large)
idx_bdr = OnDiskIVFBuilder(path_to_base_index=opts.base_index_path)
cp = CorpusProcessor(vectorizer=sv, index_builder=idx_bdr,
progress_file=opts.progress_file)
# Track progress
prepped_news = cp.track_preprocessing(cp.progress_file, verbose=opts.verbose)
raw_news = cp.get_news_paths(opts.input_dir, verbose=opts.verbose)
candidates = cp.candidate_files(prepped_news, raw_news, verbose=opts.verbose)
file_to_process = candidates[:1] # Preprocesses one news.jl per call
def main(raw_jl, output_dir: str = opts.output_dir,
m_per_batch: int = opts.m_per_batch, n_per_minibatch: int = opts.n_per_minibatch,
no_delete: bool = opts.no_delete, verbose: bool = opts.verbose,
add_shard: bool = opts.add_shard, url: str = opts.url):
subidx_dir, shard_date = cp.init_paths(raw_jl)
if verbose:
print(f'Will process: {raw_jl}\n')
# Check File Content
if verbose:
print(f'\nReading file: {raw_jl}')
jl_stats = check_all_docs(raw_jl, batch_size=m_per_batch)
(doc_count, line_count, junk, n_batches) = jl_stats
if verbose:
print(f'* Found {doc_count} good documents with {line_count} total sentences\n'
f'* Will skip {junk} junk documents\n'
f'* Processing {n_batches} batches\n')
# Preprocess
t_start = time()
doc_batch_gen = get_all_docs(raw_jl, batch_size=m_per_batch)
for i, (batched_sents, batched_ids) in enumerate(doc_batch_gen):
t_0 = time()
if verbose:
print(f' Starting doc batch: {i+1:3d}')
subidx = str(raw_jl.split('/')[-1]).replace('.jl', f'_{i:03d}_sub.index')
subidx_path = p.join(subidx_dir, subidx)
if p.exists(subidx_path):
print(f' File exists: {subidx_path} \n Skipping... ')
cp.index_builder.include_subidx_path(subidx_path)
else:
# Vectorize
emb_batch, id_batch = cp.batch_vectorize(
text_batch=batched_sents, id_batch=batched_ids,
n_minibatch=n_per_minibatch, very_verbose=False
)
t_vect = time()
if verbose:
print(f' * Vectorized in {t_vect - t_0:6.2f}s')
# Make faiss subindex
subidx_path = check_unique(subidx_path)
cp.index_builder.generate_subindex(subidx_path, emb_batch, id_batch)
t_subidx = time()
if verbose:
print(f' * Subindexed in {t_subidx - t_vect:6.2f}s')
# Clear graph
del emb_batch, batched_sents, id_batch
cp.vectorizer.close_session()
t_reset = time()
if verbose:
print(f' * Cleared TF in {t_reset - t_subidx:6.2f}s')
# Restart TF session if necessary
if i < n_batches - 1:
cp.vectorizer.start_session()
if verbose:
print(f' * Started TF in {time() - t_reset:6.2f}s')
if verbose:
mp, sp = divmod(time() - t_start, 60)
print(f' Completed doc batch: {i+1:3d}/{n_batches} '
f' Total time passed: {int(mp):3d}m{sp:0.2f}s\n')
# Merge
# TODO: Title indexes
t_merge = time()
merged_index_path = shard_date + '_all.index'
merged_index_path = p.join(output_dir, merged_index_path)
merged_index_path = check_unique(merged_index_path)
merged_ivfdata_path = shard_date + '_all.ivfdata'
merged_ivfdata_path = p.join(output_dir, merged_ivfdata_path)
merged_ivfdata_path = check_unique(merged_ivfdata_path)
if verbose:
print(f'\n Merging {merged_index_path.split("/")[-1]} on-disk')
assert cp.index_builder.index_path_clear(merged_index_path)
assert cp.index_builder.index_path_clear(merged_ivfdata_path, '.ivfdata')
n_vect = cp.index_builder.merge_IVFs(index_path=merged_index_path,
ivfdata_path=merged_ivfdata_path)
if verbose:
mm, sm = divmod(time() - t_merge, 60)
print(f' Merged subindexes ({n_vect} vectors) in: {int(mm):3d}m{sm:0.2f}s')
# Record progress
cp.record_progress(raw_jl)
# Clear sub.index files after merge
if no_delete:
clear_dir(subidx_dir)
if verbose:
print('\n Cleared sub.index files')
if add_shard:
try:
url = url
payload = {'path': merged_index_path}
r = requests.put(url, params=payload)
print(r.text)
except Exception as e:
print(f'Shard was not added because an exception occurred: {e}')
if __name__ == '__main__':
if len(file_to_process):
jl = file_to_process[0]
main(raw_jl=jl)
else:
print('Nothing to process.')
| 40.428571 | 90 | 0.652196 | [
"MIT"
] | usc-isi-i2/dig-text-similarity-search | py_scripts/preprocessing/prep_shard.py | 7,924 | Python |
import pytest
pmgout = pytest.importorskip("pymatgen.io.vasp.outputs")
Vasprun = pmgout.Vasprun
import os
import numpy as np
from flare.struc import Structure, get_unique_species
from flare.dft_interface.vasp_util import md_trajectory_from_vasprun
from flare.utils.flare_io import md_trajectory_to_file, md_trajectory_from_file
pytestmark = pytest.mark.filterwarnings(
"ignore::UserWarning", "ignore::pymatgen.io.vasp.outputs.UnconvergedVASPWarning"
)
def test_read_write_trajectory():
structures = md_trajectory_from_vasprun("test_files/test_vasprun.xml")
fname = "tst_traj.json"
md_trajectory_to_file(fname, structures)
fstructures = md_trajectory_from_file(fname)
for s, f in zip(structures, fstructures):
assert np.isclose(s.forces, f.forces).all()
assert np.isclose(s.positions, f.positions).all()
os.system("rm tst_traj.json")
| 35.24 | 84 | 0.779796 | [
"MIT"
] | sh-divya/flare | tests/test_flare_io.py | 881 | Python |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
strongCiphers = ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_256_GCM_SHA384","TLS_RSA_WITH_AES_128_GCM_SHA256"]
class KubeletCryptographicCiphers(BaseK8Check):
def __init__(self):
# CIS-1.6 4.2.13
id = "CKV_K8S_151"
name = "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kubelet" in conf["command"]:
for command in conf["command"]:
if command.startswith("--tls-cipher-suites"):
value = command.split("=")[1]
ciphers = value.split(",")
for cipher in ciphers:
if cipher not in strongCiphers:
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeletCryptographicCiphers() | 49.75 | 329 | 0.672111 | [
"Apache-2.0"
] | Arun-kc/checkov | checkov/kubernetes/checks/resource/k8s/KubeletCryptographicCiphers.py | 1,592 | Python |
import argparse
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from util.default_util import *
from util.param_util import *
from util.model_util import *
from util.eval_util import *
from util.debug_logger import *
from util.train_logger import *
from util.eval_logger import *
from util.summary_writer import *
def add_arguments(parser):
parser.add_argument("--mode", help="mode to run", required=True)
parser.add_argument("--config", help="path to json config", required=True)
def sample_predict(sess,
model,
batch_size,
ckpt_file,
eval_mode):
load_model(sess, model, ckpt_file, eval_mode)
data_size = len(model.input_data)
feed_dict, data_dict = generate_feed_dict(model, data_size, batch_size)
sess.run(model.data_pipeline.initializer, feed_dict=feed_dict)
predict_span = []
while True:
try:
infer_result = model.model.infer(sess, model.word_embedding)
predict_span.extend(infer_result.predict)
except tf.errors.OutOfRangeError:
break
predict_size = len(predict_span)
if data_size != predict_size:
raise ValueError("input data size {0} and output data size {1} is not the same".format(data_size, predict_size))
sample_result = []
for i in range(data_size):
sample_id = data_dict["input_data"][i]["id"]
context = data_dict["input_context"][i]
context_tokens = context.split(" ")
predict_start = int(predict_span[i][0])
predict_end = int(predict_span[i][1])
predict = " ".join(context_tokens[predict_start:predict_end+1])
sample_result.append({
"id": sample_id,
"context": context,
"predict": {
"text": predict,
"start": predict_start,
"end": predict_end
},
"answers": []
})
for answer in data_dict["input_data"][i]["answers"]:
label_start = int(answer["start"])
label_end = int(answer["end"])
label = " ".join(context_tokens[label_start:label_end+1])
sample_result[-1]["answers"].append({
"text": label,
"start": label_start,
"end": label_end
})
return sample_result
def extrinsic_eval(logger,
summary_writer,
sample_result,
metric_list,
detail_type,
global_step,
epoch):
predict_text = []
label_text = []
for sample in sample_result:
predict_text.append(sample["predict"]["text"])
label_text.append([])
for answer in sample["answers"]:
label_text[-1].append(answer["text"])
eval_result_list = []
sample_output = sample_result
for metric in metric_list:
score = evaluate_from_data(predict_text, label_text, metric)
summary_writer.add_value_summary(metric, score, global_step)
eval_result = ExtrinsicEvalLog(metric=metric,
score=score, sample_output=None, sample_size=len(sample_output))
eval_result_list.append(eval_result)
if detail_type == "simplified":
sample_output = { sample["id"]: sample["predict"]["text"] for sample in sample_output }
eval_result_detail = ExtrinsicEvalLog(metric="detail",
score=0.0, sample_output=sample_output, sample_size=len(sample_output))
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_extrinsic_eval(eval_result_list, basic_info)
logger.update_extrinsic_eval_detail(eval_result_detail, basic_info)
logger.check_extrinsic_eval()
logger.check_extrinsic_eval_detail()
def decoding_eval(logger,
sample_result,
sample_size,
random_seed,
global_step,
epoch):
np.random.seed(random_seed)
sample_ids = np.random.randint(0, len(sample_result)-1, size=sample_size)
sample_data = [sample_result[sample_id] for sample_id in sample_ids]
eval_result_list = []
for sample in sample_data:
sample_input = sample
sample_output = sample["predict"]["text"]
sample_reference_list = []
for answer in sample["answers"]:
sample_reference = answer["text"]
sample_reference_list.append(sample_reference)
eval_result = DecodingEvalLog(sample_input=sample_input,
sample_output=sample_output, sample_reference=sample_reference_list)
eval_result_list.append(eval_result)
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_decoding_eval(eval_result_list, basic_info)
logger.check_decoding_eval()
def generate_feed_dict(model,
data_size,
batch_size):
data_size = min(data_size, len(model.input_data))
input_data = model.input_data[:data_size]
input_answer = model.input_answer[:data_size]
input_question = model.input_question[:data_size]
input_question_word = model.input_question_word[:data_size] if model.input_question_word is not None else None
input_question_subword = model.input_question_subword[:data_size] if model.input_question_subword is not None else None
input_question_char = model.input_question_char[:data_size] if model.input_question_char is not None else None
input_context = model.input_context[:data_size]
input_context_word = model.input_context_word[:data_size] if model.input_context_word is not None else None
input_context_subword = model.input_context_subword[:data_size] if model.input_context_subword is not None else None
input_context_char = model.input_context_char[:data_size] if model.input_context_char is not None else None
data_dict = {
"data_size": data_size,
"input_data": input_data,
"input_answer": input_answer,
"input_question": input_question,
"input_question_word": input_question_word,
"input_question_subword": input_question_subword,
"input_question_char": input_question_char,
"input_context": input_context,
"input_context_word": input_context_word,
"input_context_subword": input_context_subword,
"input_context_char": input_context_char
}
feed_dict = {
model.data_pipeline.data_size_placeholder: data_size,
model.data_pipeline.batch_size_placeholder: batch_size
}
if model.data_pipeline.input_answer_placeholder is not None and input_answer is not None:
feed_dict[model.data_pipeline.input_answer_placeholder] = input_answer
if model.data_pipeline.input_question_placeholder is not None and input_question is not None:
feed_dict[model.data_pipeline.input_question_placeholder] = input_question
if model.data_pipeline.input_question_word_placeholder is not None and input_question_word is not None:
feed_dict[model.data_pipeline.input_question_word_placeholder] = input_question_word
if model.data_pipeline.input_question_subword_placeholder is not None and input_question_subword is not None:
feed_dict[model.data_pipeline.input_question_subword_placeholder] = input_question_subword
if model.data_pipeline.input_question_char_placeholder is not None and input_question_char is not None:
feed_dict[model.data_pipeline.input_question_char_placeholder] = input_question_char
if model.data_pipeline.input_context_placeholder is not None and input_context is not None:
feed_dict[model.data_pipeline.input_context_placeholder] = input_context
if model.data_pipeline.input_context_word_placeholder is not None and input_context_word is not None:
feed_dict[model.data_pipeline.input_context_word_placeholder] = input_context_word
if model.data_pipeline.input_context_subword_placeholder is not None and input_context_subword is not None:
feed_dict[model.data_pipeline.input_context_subword_placeholder] = input_context_subword
if model.data_pipeline.input_context_char_placeholder is not None and input_context_char is not None:
feed_dict[model.data_pipeline.input_context_char_placeholder] = input_context_char
return feed_dict, data_dict
def train(logger,
hyperparams,
enable_eval=True,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create train model #####")
train_model = create_train_model(logger, hyperparams)
train_sess = tf.Session(config=config_proto, graph=train_model.graph)
if enable_debug == True:
train_sess = tf_debug.LocalCLIDebugWrapperSession(train_sess)
train_summary_writer = SummaryWriter(train_model.graph, os.path.join(summary_output_dir, "train"))
init_model(train_sess, train_model)
train_logger = TrainLogger(hyperparams.data_log_output_dir)
if enable_eval == True:
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start training #####")
global_step = 0
for epoch in range(hyperparams.train_num_epoch):
feed_dict, data_dict = generate_feed_dict(train_model, len(train_model.input_answer), hyperparams.train_batch_size)
train_sess.run(train_model.data_pipeline.initializer, feed_dict=feed_dict)
step_in_epoch = 0
while True:
try:
start_time = time.time()
train_result = train_model.model.train(train_sess, train_model.word_embedding)
end_time = time.time()
global_step = train_result.global_step
step_in_epoch += 1
train_logger.update(train_result, epoch, step_in_epoch, end_time-start_time)
if step_in_epoch % hyperparams.train_step_per_stat == 0:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
if step_in_epoch % hyperparams.train_step_per_ckpt == 0:
train_model.model.save(train_sess, global_step, "debug")
if step_in_epoch % hyperparams.train_step_per_eval == 0 and enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("debug")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "debug")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
except tf.errors.OutOfRangeError:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
train_model.model.save(train_sess, global_step, "epoch")
if enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("epoch")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "epoch")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
break
train_summary_writer.close_writer()
if enable_eval == True:
infer_summary_writer.close_writer()
logger.log_print("##### finish training #####")
def evaluate(logger,
hyperparams,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start evaluation #####")
global_step = 0
eval_mode = "debug" if enable_debug == True else "epoch"
ckpt_file_list = infer_model.model.get_ckpt_list(eval_mode)
for i, ckpt_file in enumerate(ckpt_file_list):
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, eval_mode)
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, i)
decoding_eval(eval_logger, sample_result,
hyperparams.train_decoding_sample_size, hyperparams.train_random_seed, global_step, i)
infer_summary_writer.close_writer()
logger.log_print("##### finish evaluation #####")
def main(args):
hyperparams = load_hyperparams(args.config)
logger = DebugLogger(hyperparams.data_log_output_dir)
tf_version = check_tensorflow_version()
logger.log_print("# tensorflow verison is {0}".format(tf_version))
if (args.mode == 'train'):
train(logger, hyperparams, enable_eval=False, enable_debug=False)
elif (args.mode == 'train_eval'):
train(logger, hyperparams, enable_eval=True, enable_debug=False)
elif (args.mode == 'train_debug'):
train(logger, hyperparams, enable_eval=False, enable_debug=True)
elif (args.mode == 'eval'):
evaluate(logger, hyperparams, enable_debug=False)
elif (args.mode == 'eval_debug'):
evaluate(logger, hyperparams, enable_debug=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
main(args)
| 45.682216 | 130 | 0.689195 | [
"Apache-2.0"
] | Atul-Anand-Jha/reading_comprehension_tf | reading_comprehension/reading_comprehension_run.py | 15,669 | Python |
from __future__ import print_function, absolute_import, division
import argparse
import os
import zipfile
import tarfile
import numpy as np
import h5py
from glob import glob
from shutil import rmtree
import sys
sys.path.append('../')
from common.h36m_dataset import H36M_NAMES
output_filename_pt = 'data_2d_h36m_sh_pt_mpii'
output_filename_ft = 'data_2d_h36m_sh_ft_h36m'
subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cam_map = {
'54138969': 0,
'55011271': 1,
'58860488': 2,
'60457274': 3,
}
metadata = {
'num_joints': 16,
'keypoints_symmetry': [
[3, 4, 5, 13, 14, 15],
[2, 1, 0, 12, 11, 10],
]
}
# Stacked Hourglass produces 16 joints. These are the names.
SH_NAMES = [''] * 16
SH_NAMES[0] = 'RFoot'
SH_NAMES[1] = 'RKnee'
SH_NAMES[2] = 'RHip'
SH_NAMES[3] = 'LHip'
SH_NAMES[4] = 'LKnee'
SH_NAMES[5] = 'LFoot'
SH_NAMES[6] = 'Hip'
SH_NAMES[7] = 'Spine'
SH_NAMES[8] = 'Thorax'
SH_NAMES[9] = 'Head'
SH_NAMES[10] = 'RWrist'
SH_NAMES[11] = 'RElbow'
SH_NAMES[12] = 'RShoulder'
SH_NAMES[13] = 'LShoulder'
SH_NAMES[14] = 'LElbow'
SH_NAMES[15] = 'LWrist'
# Permutation that goes from SH detections to H36M ordering.
SH_TO_GT_PERM = np.array([SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
assert np.all(SH_TO_GT_PERM == np.array([6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))
metadata['keypoints_symmetry'][0] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][0]]
metadata['keypoints_symmetry'][1] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][1]]
def process_subject(subject, file_list, output):
if subject == 'S11':
assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list))
else:
assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list))
for f in file_list:
action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')
if subject == 'S11' and action == 'Directions':
continue # Discard corrupted video
if action not in output[subject]:
output[subject][action] = [None, None, None, None]
with h5py.File(f) as hf:
# positions = hf['poses'].value
positions = np.array(hf['poses'])
positions = positions[:, SH_TO_GT_PERM, :]
output[subject][action][cam_map[cam]] = positions.astype('float32')
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'data':
print('This script must be launched from the "data" directory')
exit(0)
parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')
parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')
parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')
args = parser.parse_args()
if args.pretrained:
print('Converting pretrained dataset from', args.pretrained)
print('Extracting...')
with zipfile.ZipFile(args.pretrained, 'r') as archive:
archive.extractall('sh_pt')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_pt')
print('Done.')
if args.fine_tuned:
print('Converting fine-tuned dataset from', args.fine_tuned)
print('Extracting...')
with tarfile.open(args.fine_tuned, 'r:gz') as archive:
archive.extractall('sh_ft')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_ft')
print('Done.')
| 31.768116 | 119 | 0.62979 | [
"Apache-2.0"
] | fullmoonhalf/SemGCN | data/prepare_data_2d_h36m_sh.py | 4,384 | Python |
# -*- coding: utf-8 -*-
# pylint: disable=C,R,W
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sqlparse
from sqlparse.sql import Identifier, IdentifierList
from sqlparse.tokens import Keyword, Name
RESULT_OPERATIONS = {'UNION', 'INTERSECT', 'EXCEPT'}
PRECEDES_TABLE_NAME = {'FROM', 'JOIN', 'DESC', 'DESCRIBE', 'WITH'}
# TODO: some sql_lab logic here.
class SupersetQuery(object):
def __init__(self, sql_statement):
self.sql = sql_statement
self._table_names = set()
self._alias_names = set()
# TODO: multistatement support
logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.sql)
for statement in self._parsed:
self.__extract_from_token(statement)
self._table_names = self._table_names - self._alias_names
@property
def tables(self):
return self._table_names
def is_select(self):
return self._parsed[0].get_type() == 'SELECT'
def stripped(self):
sql = self.sql
if sql:
while sql[-1] in (' ', ';', '\n', '\t'):
sql = sql[:-1]
return sql
@staticmethod
def __precedes_table_name(token_value):
for keyword in PRECEDES_TABLE_NAME:
if keyword in token_value:
return True
return False
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@staticmethod
def __is_result_operation(keyword):
for operation in RESULT_OPERATIONS:
if operation in keyword.upper():
return True
return False
@staticmethod
def __is_identifier(token):
return (
isinstance(token, IdentifierList) or isinstance(token, Identifier))
def __process_identifier(self, identifier):
# exclude subselects
if '(' not in '{}'.format(identifier):
self._table_names.add(SupersetQuery.__get_full_name(identifier))
return
# store aliases
if hasattr(identifier, 'get_alias'):
self._alias_names.add(identifier.get_alias())
if hasattr(identifier, 'tokens'):
# some aliases are not parsed properly
if identifier.tokens[0].ttype == Name:
self._alias_names.add(identifier.tokens[0].value)
self.__extract_from_token(identifier)
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
# TODO(bkyryliuk): enforce that all the columns have names.
# Presto requires it for the CTA operation.
# TODO(bkyryliuk): drop table if allowed, check the namespace and
# the permissions.
# TODO raise if multi-statement
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
def __extract_from_token(self, token):
if not hasattr(token, 'tokens'):
return
table_name_preceding_token = False
for item in token.tokens:
if item.is_group and not self.__is_identifier(item):
self.__extract_from_token(item)
if item.ttype in Keyword:
if SupersetQuery.__precedes_table_name(item.value.upper()):
table_name_preceding_token = True
continue
if not table_name_preceding_token:
continue
if item.ttype in Keyword:
if SupersetQuery.__is_result_operation(item.value):
table_name_preceding_token = False
continue
# FROM clause is over
break
if isinstance(item, Identifier):
self.__process_identifier(item)
if isinstance(item, IdentifierList):
for token in item.tokens:
if SupersetQuery.__is_identifier(token):
self.__process_identifier(token)
| 34.822695 | 79 | 0.614868 | [
"Apache-2.0"
] | AmberCa/incubator-superset | superset/sql_parse.py | 4,910 | Python |
import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
from collections import defaultdict
from itertools import count
import argparse
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
dy.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = dy.load(filename, model)
def build_lm_graph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = dy.pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = dy.esum(errs)
return nerr
def predict_next_word(self, sentence):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = dy.softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model, learning_rate=1.0)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.build_lm_graph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/len(sent)
print("ITER {}, loss={}".format(ITER, loss))
trainer.status()
lm.save_to_disk("RNNLanguageModel.model")
print("loading the saved model...")
lm.load_from_disk("RNNLanguageModel.model")
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
| 32.676259 | 105 | 0.568692 | [
"Apache-2.0"
] | kashif/dynet | examples/rnnlm/rnnlm.py | 4,542 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from mcts.webapi_tests.fm_radio.fm_radio_test import FMRadioTestCommon
from mcts.webapi_tests.fm_radio.test_fm_radio_basic import TestFMRadioBasic
| 49.571429 | 75 | 0.801153 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Mozilla-GitHub-Standards/7842899900b1eec401075a453a03b659ec3c617f97543c62d75cb76ebb9be8b4 | mcts/webapi_tests/fm_radio/__init__.py | 347 | Python |
"""Views of problem2 app."""
from django.shortcuts import render
from .forms import FiboForm
def display(request):
"""Function view to display form in the standard manner."""
if request.method == 'POST':
form = FiboForm(request.POST)
if form.is_valid():
fibo = form.save(commit=False)
evensum = fibo.evenFiboSum()
fibo.save()
return render(request, 'problem2/solution2.html',
{'evensum': evensum, 'form': form})
else:
form = FiboForm()
return render(request, 'problem2/solution2.html', {'form': form})
| 29.52381 | 69 | 0.593548 | [
"BSD-3-Clause"
] | byteknacker/eulerapps | problem2/views.py | 620 | Python |
import unittest
from foucluster.plot import song_plot, diff_plot, heatmap_song
import configparser
import os
import json
from scipy.io.wavfile import read
import numpy as np
import pandas as pd
class TestPlot(unittest.TestCase):
@staticmethod
def _get_series(i=0):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
fourier_folder = config['Folder']['Output']
first_file = os.path.join(fourier_folder,
os.listdir(fourier_folder)[i])
with open(first_file, 'r') as b:
j = json.load(b)
name = list(j.keys())[0]
song = j[name]
return song, name
@staticmethod
def _get_song(i=0):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
song_folder = config['Folder']['Temp']
first_song = os.listdir(song_folder)[i]
rate, aud_data = read(os.path.join(song_folder,
first_song))
# Should be mono
if len(aud_data) != len(aud_data.ravel()):
aud_data = np.mean(aud_data, axis=1)
return aud_data,first_song
def test_diff(self):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
song_1, name_1 = self._get_series(i=0)
song_2, name_2 = self._get_series(i=1)
diff_plot(song_1, song_2,
filename=name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0],
folder=image_folder)
def test_song(self):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
aud_data, name = self._get_song()
song_plot(aud_data,
filename=name.split('.')[0],
folder=image_folder)
def test_heatmap(self):
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
distance_folder = config['Folder']['Distance']
df = pd.read_csv(os.path.join(distance_folder, 'positive.csv'),
sep=';',
index_col=[0, 1])
heatmap_song(df,
image_name='heatmap_positive',
image_folder=image_folder)
if __name__ == '__main__':
unittest.main()
| 29.11236 | 93 | 0.547665 | [
"MIT"
] | cperales/Fourier-Clustering-song | test/test_b_plot.py | 2,591 | Python |
# Generated by Django 2.2.4 on 2020-06-25 17:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import internationalflavor.countries.models
import internationalflavor.vat_number.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('delivery_name', models.CharField(db_index=True, max_length=30, verbose_name='full name or company name')),
('delivery_street', models.CharField(max_length=200, verbose_name='street and number')),
('delivery_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('delivery_city', models.CharField(max_length=50, verbose_name='city')),
('delivery_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('billing_name', models.CharField(max_length=100, verbose_name='full name or company name')),
('billing_street', models.CharField(max_length=200, verbose_name='street')),
('billing_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('billing_city', models.CharField(max_length=50, verbose_name='city')),
('billing_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('reg_id', models.CharField(blank=True, max_length=30, verbose_name='Company Registration No.')),
('tax_id', models.CharField(blank=True, max_length=30, verbose_name='TAX ID')),
('vat_id', internationalflavor.vat_number.models.VATNumberField(blank=True, verbose_name='VAT ID')),
('email', models.EmailField(max_length=254, verbose_name='email')),
('phone', models.CharField(max_length=30, verbose_name='phone')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'shopping cart',
'verbose_name_plural': 'shopping carts',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('quantity', models.PositiveSmallIntegerField(verbose_name='quantity')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='commerce.Cart')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'item',
'verbose_name_plural': 'items',
},
),
]
| 56.261538 | 128 | 0.637134 | [
"Apache-2.0"
] | PragmaticMates/django-commerce | commerce/migrations/0001_initial.py | 3,657 | Python |
# noqa: D400,D205
"""
Optimize
========
Multistart optimization with support for various optimizers.
"""
from .load import (
fill_result_from_history,
optimization_result_from_history,
read_result_from_file,
read_results_from_file,
)
from .optimize import minimize
from .optimizer import (
CmaesOptimizer,
DlibOptimizer,
FidesOptimizer,
IpoptOptimizer,
NLoptOptimizer,
Optimizer,
PyswarmOptimizer,
PyswarmsOptimizer,
ScipyDifferentialEvolutionOptimizer,
ScipyOptimizer,
)
from .options import OptimizeOptions
| 19.551724 | 60 | 0.746032 | [
"BSD-3-Clause"
] | m-philipps/pyPESTO | pypesto/optimize/__init__.py | 567 | Python |
from typing import Literal
from bolinette import data, types
from bolinette.data import mapping
from bolinette.exceptions import InternalError
from bolinette.utils.functions import getattr_, hasattr_, invoke
class Mapper:
def __init__(self):
self._payloads: dict[str, dict[str, mapping.Definition]] = {}
self._responses: dict[str, dict[str, mapping.Definition]] = {}
@staticmethod
def _get_def(
collection: dict[str, dict[str, mapping.Definition]], model_name: str, key: str
) -> "mapping.Definition":
m = collection.get(model_name)
if m is None:
raise InternalError(f"mapping.unknown_model:{model_name}")
d = m.get(key)
if d is None:
raise InternalError(f"mapping.unknown_definition:{model_name}.{key}")
return d
def payload(self, model_name: str, key: str):
return self._get_def(self._payloads, model_name, key)
@property
def payloads(self):
for model_name in self._payloads:
for key in self._payloads[model_name]:
yield model_name, key, self._payloads[model_name][key]
def response(self, model_name: str, key: str):
return self._get_def(self._responses, model_name, key)
@property
def responses(self):
for model_name in self._responses:
for key in self._responses[model_name]:
yield model_name, key, self._responses[model_name][key]
def _extract_defs(
self,
model: "data.Model",
model_cls: type["data.Model"],
collection: Literal["payloads", "responses"],
merge_defs: Literal["ignore", "append", "overwrite"],
):
defs = {}
for parent in model_cls.__bases__:
if issubclass(parent, data.Model) and parent != data.Model:
for _key, _def in self._extract_defs(
model, parent, collection, merge_defs
).items():
defs[_key] = _def
def_func = getattr(model_cls, collection)
if hasattr_(def_func, "__func__"):
def_func = def_func.__func__
def_gen = def_func(model)
if def_gen is None:
return defs
new_defs = list(def_gen)
for _def in new_defs:
if isinstance(_def, list):
model_key = "default"
payload = _def
else:
model_key, payload = _def
if model_key in defs:
if merge_defs == "append":
for _param in payload:
defs[model_key].append(_param)
elif merge_defs == "overwrite":
defs[model_key] = payload
else:
defs[model_key] = payload
return defs
def register(self, model: "data.Model"):
def create_defs(collection, attr_name: Literal["payloads", "responses"]):
defs = self._extract_defs(
model, type(model), attr_name, model.__blnt__.merge_defs
)
for model_key, payload in defs.items():
definition = mapping.Definition(model.__blnt__.name, model_key)
for field in payload:
definition.fields.append(field)
if definition.model_name not in collection:
collection[definition.model_name] = {}
collection[definition.model_name][definition.model_key] = definition
create_defs(self._payloads, "payloads")
create_defs(self._responses, "responses")
def marshall(
self,
definition,
entity,
*,
skip_none=False,
as_list=False,
use_foreign_key=False,
):
if entity is None:
return None
if as_list:
return [
self.marshall(
definition,
e,
skip_none=skip_none,
as_list=False,
use_foreign_key=use_foreign_key,
)
for e in entity
]
values = {}
for field in definition.fields:
self._marshall_object(values, field, entity, skip_none, use_foreign_key)
return values
def _marshall_object(
self, values, field, entity, skip_none: bool, use_foreign_key: bool
):
if isinstance(field, mapping.Field):
self._marshall_field(values, field, entity, skip_none)
elif isinstance(field, mapping.Reference) and use_foreign_key:
values[field.foreign_key] = getattr_(entity, field.foreign_key, None)
elif isinstance(field, mapping.Definition):
self._marshall_definition(values, field, entity, skip_none, use_foreign_key)
elif isinstance(field, mapping.List):
self._marshall_list(values, field, entity, skip_none, use_foreign_key)
@staticmethod
def _marshall_field(values, field: "mapping.Field", entity, skip_none: bool):
if field.function is not None:
value = field.function(entity)
else:
value = getattr_(entity, field.key, None)
if field.formatting is not None:
value = field.formatting(value)
if not skip_none or value is not None:
values[field.name] = value
def _marshall_definition(
self,
values,
definition: "mapping.Definition",
entity,
skip_none: bool,
use_foreign_key: bool,
):
d = self.response(definition.model_name, definition.model_key)
attr = None
if definition.function and callable(definition.function):
attr = definition.function(entity)
elif hasattr_(entity, definition.name):
attr = getattr_(entity, definition.name, None)
values[definition.name] = self.marshall(
d, attr, skip_none=skip_none, as_list=False, use_foreign_key=use_foreign_key
)
def _marshall_list(
self,
values,
field: "mapping.List",
entity,
skip_none: bool,
use_foreign_key: bool,
):
if field.function and callable(field.function):
e_list = invoke(field.function, entity)
else:
e_list = getattr_(entity, field.name, None)
elem = field.element
if isinstance(elem, types.db.DataType):
values[field.name] = [e for e in e_list]
elif isinstance(elem, mapping.Definition):
d = self.response(elem.model_name, elem.model_key)
values[field.name] = self.marshall(
d,
e_list,
skip_none=skip_none,
as_list=True,
use_foreign_key=use_foreign_key,
)
| 35.968254 | 88 | 0.582966 | [
"MIT"
] | TheCaptainCat/bolinette | bolinette/data/mapping/mapper.py | 6,798 | Python |
#!/usr/bin/env python3
import pandas as pd
# Kestrel analytics default paths (single input variable)
INPUT_DATA_PATH = "/data/input/0.parquet.gz"
OUTPUT_DATA_PATH = "/data/output/0.parquet.gz"
OUTPUT_DISPLAY = "/data/display/ret.html"
def analytics(dataframe):
# analyze data in dataframe
# provide insights or additional knowledge
newattr = ["newval" + str(i) for i in range(dataframe.shape[0])]
dataframe["x_new_attr"] = newattr
display = "<p>Hello World! -- a Kestrel analytics</p>"
# return the updated Kestrel variable
return dataframe, display
if __name__ == "__main__":
dfi = pd.read_parquet(INPUT_DATA_PATH)
dfo, disp = analytics(dfi)
dfo.to_parquet(OUTPUT_DATA_PATH, compression="gzip")
with open(OUTPUT_DISPLAY, "w") as o:
o.write(disp)
| 28.785714 | 68 | 0.703474 | [
"Apache-2.0"
] | IBM/kestrel-analytics | template/analytics.py | 806 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/dantooine/shared_dant_boundary_post.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.941176 | 84 | 0.731441 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/static/structure/dantooine/shared_dant_boundary_post.py | 458 | Python |
# Generated by Django 2.1.2 on 2019-03-19 22:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0008_merge_20190307_1527'),
]
operations = [
migrations.AddField(
model_name='partner',
name='is_published',
field=models.BooleanField(default=True, verbose_name='Published'),
),
]
| 22.157895 | 78 | 0.619952 | [
"MIT"
] | TEDxNTUA/tedxntua2019 | project/partners/migrations/0009_partner_is_published.py | 421 | Python |
from argparse import ArgumentParser
class ChannelMap():
channel_map = {
'master': {
'tfm': 'netcoreapp5.0',
'branch': 'master'
},
'release/3.1.2xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.2xx'
},
'release/3.1.1xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.1xx'
},
'3.1': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1'
},
'3.0': {
'tfm': 'netcoreapp3.0',
'branch': 'release/3.0'
},
'release/2.1.6xx': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1.6xx'
},
'2.1': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1'
},
'LTS': {
'tfm': 'net461', # For Full Framework download the LTS for dotnet cli.
'branch': 'LTS'
}
}
@staticmethod
def get_supported_channels() -> list:
'''List of supported channels.'''
return list(ChannelMap.channel_map.keys())
@staticmethod
def get_supported_frameworks() -> list:
'''List of supported frameworks'''
frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map]
return set(frameworks)
@staticmethod
def get_branch(channel: str) -> str:
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['branch']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_target_framework_monikers(channels: list) -> list:
'''
Translates channel names to Target Framework Monikers (TFMs).
'''
monikers = [
ChannelMap.get_target_framework_moniker(channel)
for channel in channels
]
return list(set(monikers))
@staticmethod
def get_target_framework_moniker(channel: str) -> str:
'''
Translate channel name to Target Framework Moniker (TFM)
'''
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['tfm']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_channel_from_target_framework_moniker(target_framework_moniker: str) -> str:
'''Translate Target Framework Moniker (TFM) to channel name'''
for channel in ChannelMap.channel_map:
if ChannelMap.channel_map[channel]['tfm'] == target_framework_moniker:
return channel
raise Exception('Framework %s is not supported. Supported frameworks: %s' % (target_framework_moniker, ChannelMap.get_supported_frameworks()))
| 34.702381 | 150 | 0.575643 | [
"MIT"
] | artelk/performance | scripts/channel_map.py | 2,915 | Python |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
| 31.402857 | 85 | 0.59203 | [
"ECL-2.0",
"Apache-2.0"
] | AOE-khkhan/kaolin | examples/ImageRecon/OccNet/architectures.py | 10,991 | Python |
from .CosineSimilarityLoss import *
from .SoftmaxLoss import *
from .MultipleNegativesRankingLoss import *
from .TripletLoss import *
from .MSELoss import *
from .ContrastiveLoss import *
from .OnlineContrastiveLoss import *
from .MegaBatchMarginLoss import *
from .DenoisingAutoEncoderLoss import *
# Triplet losses
from .BatchHardTripletLoss import *
from .BatchHardSoftMarginTripletLoss import *
from .BatchSemiHardTripletLoss import *
from .BatchAllTripletLoss import * | 31.6 | 45 | 0.827004 | [
"Apache-2.0"
] | WHU-Peter/sentence-transformers | sentence_transformers/losses/__init__.py | 474 | Python |
# -*- coding: utf-8 -*-
# @Time : 2020/8/8 下午4:22
# @Author : 司云中
# @File : routing.py
# @Software: Pycharm
from django.urls import path, re_path
websocket_urlpatterns = [
# 官方解释path可能存在某种bug,用re_path既可以支持正则,也可以支持path路由匹配规则
re_path(r'concern_notice',), # 用户店铺关注,当店主上架新商品的时候进行商品推送
re_path(r'buy_notice',), # 当用户购买商品后,推送购买信息
] | 23.266667 | 61 | 0.681948 | [
"Apache-2.0"
] | syz247179876/Chsc-Shop | propel_app/routing.py | 499 | Python |
"""
WARNING: This file about to undergo major refactoring by @pydanny per
Issue #99.
"""
from importlib import import_module
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from . import apiviews
from . import types
from . import utils
from . import views
class Admin2(object):
"""
The base Admin2 object.
It keeps a registry of all registered Models and collects the urls of their
related ModelAdmin2 instances.
It also provides an index view that serves as an entry point to the
admin site.
"""
index_view = views.IndexView
login_view = views.LoginView
app_index_view = views.AppIndexView
api_index_view = apiviews.IndexAPIView
def __init__(self, name='admin2'):
self.registry = {}
self.apps = {}
self.app_verbose_names = {}
self.name = name
def register(self, model, model_admin=None, **kwargs):
"""
Registers the given model with the given admin class. Once a model is
registered in self.registry, we also add it to app registries in
self.apps.
If no model_admin is passed, it will use ModelAdmin2. If keyword
arguments are given they will be passed to the admin class on
instantiation.
If a model is already registered, this will raise ImproperlyConfigured.
"""
if model in self.registry:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % model)
if not model_admin:
model_admin = types.ModelAdmin2
self.registry[model] = model_admin(model, admin=self, **kwargs)
# Add the model to the apps registry
app_label = utils.model_options(model).app_label
if app_label in self.apps.keys():
self.apps[app_label][model] = self.registry[model]
else:
self.apps[app_label] = {model: self.registry[model]}
def deregister(self, model):
"""
Deregisters the given model. Remove the model from the self.app as well
If the model is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.registry[model]
except KeyError:
raise ImproperlyConfigured(
'%s was never registered in django-admin2' % model)
# Remove the model from the apps registry
# Get the app label
app_label = utils.model_options(model).app_label
# Delete the model from it's app registry
del self.apps[app_label][model]
# if no more models in an app's registry
# then delete the app from the apps.
if self.apps[app_label] is {}:
del self.apps[app_label] # no
def register_app_verbose_name(self, app_label, app_verbose_name):
"""
Registers the given app label with the given app verbose name.
If a app_label is already registered, this will raise
ImproperlyConfigured.
"""
if app_label in self.app_verbose_names:
raise ImproperlyConfigured(
'%s is already registered in django-admin2' % app_label)
self.app_verbose_names[app_label] = app_verbose_name
def deregister_app_verbose_name(self, app_label):
"""
Deregisters the given app label. Remove the app label from the
self.app_verbose_names as well.
If the app label is not already registered, this will raise
ImproperlyConfigured.
"""
try:
del self.app_verbose_names[app_label]
except KeyError:
raise ImproperlyConfigured(
'%s app label was never registered in django-admin2' % app_label)
def autodiscover(self):
"""
Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by
trying to import them.
"""
for app_name in [x for x in settings.INSTALLED_APPS]:
try:
import_module("%s.admin2" % app_name)
except ImportError as e:
if str(e).startswith("No module named") and 'admin2' in str(e):
continue
raise e
def get_admin_by_name(self, name):
"""
Returns the admin instance that was registered with the passed in
name.
"""
for object_admin in self.registry.values():
if object_admin.name == name:
return object_admin
raise ValueError(
u'No object admin found with name {}'.format(repr(name)))
def get_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
'login_view': self.login_view,
}
def get_app_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_api_index_kwargs(self):
return {
'registry': self.registry,
'app_verbose_names': self.app_verbose_names,
'apps': self.apps,
}
def get_urls(self):
urlpatterns = [
url(regex=r'^$',
view=self.index_view.as_view(**self.get_index_kwargs()),
name='dashboard'
),
url(regex=r'^auth/user/(?P<pk>\d+)/update/password/$',
view=views.PasswordChangeView.as_view(),
name='password_change'
),
url(regex='^password_change_done/$',
view=views.PasswordChangeDoneView.as_view(),
name='password_change_done'
),
url(regex='^logout/$',
view=views.LogoutView.as_view(),
name='logout'
),
url(regex=r'^(?P<app_label>\w+)/$',
view=self.app_index_view.as_view(
**self.get_app_index_kwargs()),
name='app_index'
),
url(regex=r'^api/v0/$',
view=self.api_index_view.as_view(
**self.get_api_index_kwargs()),
name='api_index'
),
]
for model, model_admin in self.registry.items():
model_options = utils.model_options(model)
urlpatterns += [
url('^{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.urls),
url('^api/v0/{}/{}/'.format(
model_options.app_label,
model_options.object_name.lower()),
model_admin.api_urls),
]
return urlpatterns
@property
def urls(self):
# We set the application and instance namespace here
return self.get_urls(), self.name, self.name
| 33.927536 | 81 | 0.581803 | [
"BSD-3-Clause"
] | PowerOlive/django-admin2 | djadmin2/core.py | 7,023 | Python |
#!/usr/bin/python2
MIN = -10000
MAX = 10000
| 7.666667 | 18 | 0.630435 | [
"MIT"
] | AI-comp/Orientation2015Problems | a+b/tests/constants.py | 46 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, stripe and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestStripeSetting(unittest.TestCase):
pass
| 19.272727 | 45 | 0.764151 | [
"MIT"
] | Hitesh1595/stripe | stripe/stripe/doctype/stripe_setting/test_stripe_setting.py | 212 | Python |
"""
Save module for the console.
"""
import json
from typing import List, Optional
from spotdl.utils.search import parse_query
from spotdl.utils.m3u import create_m3u_file
def save(
query: List[str],
save_path: str,
downloader,
m3u_file: Optional[str] = None,
) -> None:
"""
Save metadata from spotify to the disk.
### Arguments
- query: list of strings to search for.
- save_path: Path to the file to save the metadata to.
- threads: Number of threads to use.
### Notes
- This function is multi-threaded.
"""
# Parse the query
songs = parse_query(query, downloader.threads)
# Convert the songs to JSON
save_data = [song.json for song in songs]
# Save the songs to a file
with open(save_path, "w", encoding="utf-8") as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(
m3u_file, songs, downloader.output, downloader.output_format, False
)
downloader.progress_handler.log(
f"Saved {len(save_data)} song{'s' if len(save_data) > 1 else ''} to {save_path}"
)
| 23.510204 | 88 | 0.653646 | [
"MIT"
] | phcreery/spotdl-v4 | spotdl/console/save.py | 1,152 | Python |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contrib.media_router_benchmarks import media_router_cpu_memory_metric
from telemetry.page import legacy_page_test
class MediaRouterCPUMemoryTest(legacy_page_test.LegacyPageTest):
"""Performs a measurement of Media Route CPU/memory usage."""
def __init__(self):
super(MediaRouterCPUMemoryTest, self).__init__()
self._metric = media_router_cpu_memory_metric.MediaRouterCPUMemoryMetric()
def ValidateAndMeasurePage(self, page, tab, results):
self._metric.AddResults(tab, results)
| 37.055556 | 78 | 0.803598 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | DamieFC/chromium | tools/perf/contrib/media_router_benchmarks/media_router_measurements.py | 667 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from test_elementwise_add_op import TestElementwiseAddOp
class TestElementwiseMulOp(TestElementwiseAddOp):
def init(self):
self.op_type = 'elementwise_mul'
if __name__ == '__main__':
unittest.main()
| 31.857143 | 74 | 0.765695 | [
"ECL-2.0",
"Apache-2.0"
] | Channingss/paddle2onnx | tests/test_elementwise_mul_op.py | 892 | Python |
# -*- coding: utf-8 -*-
# https://github.com/cvzi/foodemoji
import sys
import os
import timeit
try:
import foodemoji
except ImportError:
include = os.path.relpath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, include)
import foodemoji
print("Imported foodemoji from %s" % os.path.abspath(os.path.join(include, "foodemoji")))
PY2 = sys.version_info.major == 2
text = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Tagessuppe,
Fish + Chips,
Remouladensauce,
Salat der Saison
Tagessuppe,
2 Polentaschnitten mit Spinatfüllung,
Tomatensauce,
Reis,
Salat der Saison
Karotten-Ingwersuppe
Rote Grütze (vegan), Zweierlei Mousse au Chocolat
Milch
Tagessuppe,
Schweinegulasch,
Champignonsauce,
Salzkartoffeln,
Salat der Saison
Tagessuppe,
5 Cannelloni mit Ricotta-Spinat-Füllung,
Tomatensauce,
Reibekäse,
Salat der Saison
Tomatencremesuppe
Milchreis mit Kirschen, Rote Grütze (vegan)
Tagessuppe,
Feuerwurst,
Portion Senf,
Pommes frites,
Salat der Saison
Tagessuppe,
2 Kartoffelknödel,
Rahmgemüse,
Salat der Saison
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Brokkolicremesuppe
Sojajoghurt mit Früchten (vegan), Tiramisu
Milch (ML)
Tagessuppe,
paniertes Alaska-Seelachsfilet,
Dillmayonnaise,
Petersilienkartoffeln,
Salat der Saison
Tagessuppe,
veganes Geschnetzeltes „Züricher Art",
Reis,
Salat der Saison
"""
text_short = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Salat der Saison
"""
text_one_line = "Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten"
book = ""
def _setup():
global book
filename = 'italienische-reise.txt'
url = 'https://github.com/GITenberg/Italienische-Reise-Band-1_2404/raw/master/2404-8.txt'
if not os.path.isfile(filename):
if PY2:
import urllib2
furl = urllib2.urlopen(url)
book = furl.read().decode('cp1252' ,errors='ignore')
furl.close()
else:
import urllib.request
with urllib.request.urlopen(url) as furl:
book = furl.read().decode('utf-8' ,errors='ignore')
with open(filename, 'wb') as fout:
fout.write(book.encode('utf-8'))
else:
with open(filename, 'rb') as fin:
book = fin.read().decode('utf-8')
def test_long_text_100():
x = foodemoji.decorate(text)
return x[0] == text[0]
def test_long_text_linebyline_100():
x = foodemoji.decorate(text, line_by_line=True)
return x[0] == text[0]
def test_short_text_300():
x = foodemoji.decorate(text_short)
return x[0] == text_short[0]
def test_short_text_linebyline_300():
x = foodemoji.decorate(text_short, line_by_line=True)
return x[0] == text_short[0]
def test_one_line_1000():
x = foodemoji.decorate(text_one_line)
return x[0] == text_one_line[0]
def test_one_line_linebyline_1000():
x = foodemoji.decorate(text_one_line, line_by_line=True)
return x[0] == text_one_line[0]
def test_book_2():
x = foodemoji.decorate(book)
return x[0] == book[0]
def test_book_linebyline_2():
x = foodemoji.decorate(book, line_by_line=True)
return x[0] == book[0]
_setup()
if __name__ == '__main__':
for fname in sorted(list(globals().keys())):
if fname.startswith('test_'):
if fname.split('_')[-1].isdigit():
N = int(fname.split('_')[-1])
else:
N = 100
print("% 6dx\t\t%s():" % (N, fname))
t = timeit.timeit('speed.%s()' % fname, setup='import speed', number=N)
print("{:25.20f}".format(t))
| 36.952381 | 780 | 0.726804 | [
"MIT"
] | cvzi/foodemoji | tests/speed.py | 5,469 | Python |
import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
import re
from packaging import version
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from model.deeplab import Res_Deeplab
from model.deeplab_multi import DeeplabMulti
from model.deeplab_vgg import DeeplabVGG
from dataset.dark_zurich_dataset import DarkZurichDataSet
import os
from PIL import Image
from utils.tool import fliplr
import matplotlib.pyplot as plt
import torch.nn as nn
import yaml
import imageio as iio
torch.backends.cudnn.benchmark=True
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
DATA_DIRECTORY = './data/Cityscapes/data'
DATA_LIST_PATH = './dataset/cityscapes_list/train.txt'
SAVE_PATH = './data/Dark_zurich/data/pseudo_ohl-1/test'
if not os.path.isdir('./data/Dark_zurich/data/pseudo_ohl-1/'):
os.makedirs('./data/Dark_zurich/data/pseudo_ohl-1/')
os.makedirs(SAVE_PATH)
IGNORE_LABEL = 255
NUM_CLASSES = 19
RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'
RESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'
RESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'
SET = 'train' # We generate pseudo label for training set
INPUT_SIZE = '800,512'
MODEL = 'DeeplabMulti'
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG/Oracle).")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--batchsize", type=int, default=4,
help="choose gpu device.")
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of source images.")
return parser.parse_args()
def save_heatmap(output_name):
output, name = output_name
fig = plt.figure()
plt.axis('off')
heatmap = plt.imshow(output, cmap='viridis')
fig.colorbar(heatmap)
fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))
return
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
w, h = map(int, args.input_size.split(','))
config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print('ModelType:%s'%args.model)
print('NormType:%s'%config['norm_style'])
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename( os.path.dirname(args.restore_from) )
#args.save += model_name
if not os.path.exists(args.save):
os.makedirs(args.save)
confidence_path = os.path.join(args.save, 'submit/confidence')
label_path = os.path.join(args.save, 'submit/labelTrainIds')
label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')
for path in [confidence_path, label_path, label_invalid_path]:
if not os.path.exists(path):
os.makedirs(path)
if args.model == 'DeepLab':
model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])
elif args.model == 'Oracle':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_ORC
elif args.model == 'DeeplabVGG':
model = DeeplabVGG(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1080, 1920), mode='bilinear')
sm = torch.nn.Softmax(dim = 1)
log_sm = torch.nn.LogSoftmax(dim = 1)
kl_distance = nn.KLDivLoss( reduction = 'none')
prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]
prior = torch.from_numpy(prior)
for index, img_data in enumerate(zip(testloader, testloader2) ):
batch, batch2 = img_data
image, _, name = batch
image2, _, name2 = batch2
inputs = image.cuda()
inputs2 = image2.cuda()
print('\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')
if args.model == 'DeepLab':
with torch.no_grad():
output1, output2 = model(inputs)
output_batch = interp(sm(0.5* output1 + output2))
heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)
output1, output2 = model(fliplr(inputs))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs
output1, output2 = model(inputs2)
output_batch += interp(sm(0.5* output1 + output2))
output1, output2 = model(fliplr(inputs2))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs2
ratio = 0.95
output_batch = output_batch.cpu() / 4
# output_batch = output_batch *(ratio + (1 - ratio) * prior)
output_batch = output_batch.data.numpy()
heatmap_batch = heatmap_batch.cpu().data.numpy()
elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0,2,3,1)
score_batch = np.max(output_batch, axis=3)
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
threshold = 0.3274
for i in range(output_batch.shape[0]):
output_single = output_batch[i,:,:]
output_col = colorize_mask(output_single)
output = Image.fromarray(output_single)
name_tmp = name[i].split('/')[-1]
dir_name = name[i].split('/')[-2]
save_path = args.save + '/' + dir_name
if not os.path.isdir(save_path):
os.mkdir(save_path)
output.save('%s/%s' % (save_path, name_tmp))
print('%s/%s' % (save_path, name_tmp))
output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))
# heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])
# fig = plt.figure()
# plt.axis('off')
# heatmap = plt.imshow(heatmap_tmp, cmap='viridis')
# fig.colorbar(heatmap)
# fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))
if args.set == 'test' or args.set == 'val':
# label
output.save('%s/%s' % (label_path, name_tmp))
# label invalid
output_single[score_batch[i, :, :] < threshold] = 255
output = Image.fromarray(output_single)
output.save('%s/%s' % (label_invalid_path, name_tmp))
# conficence
confidence = score_batch[i, :, :] * 65535
confidence = np.asarray(confidence, dtype=np.uint16)
print(confidence.min(), confidence.max())
iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)
return args.save
if __name__ == '__main__':
with torch.no_grad():
save_path = main()
#os.system('python compute_iou.py ./data/Cityscapes/data/gtFine/train %s'%save_path)
| 42.207843 | 231 | 0.632816 | [
"MIT"
] | qimw/UACDA | generate_plabel_dark_zurich.py | 10,763 | Python |
"""
WSGI config for colaboradados_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'colaboradados_django.settings')
application = get_wsgi_application()
| 23.222222 | 80 | 0.796651 | [
"MIT"
] | dennys-bd/colaboradados_django | colaboradados_django/wsgi.py | 418 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.