max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_field_defs.py | pombredanne/django-mutant | 152 | 11118540 | from __future__ import unicode_literals
import warnings
from django.apps.registry import Apps
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
from mutant.contrib.numeric.models import IntegerFieldDefinition
from mutant.contrib.text.models import CharFieldDefinition
from mutant.models.field import (
FieldDefinition, FieldDefinitionChoice, NOT_PROVIDED,
)
from .utils import BaseModelDefinitionTestCase
class FieldDefinitionInheritanceTest(BaseModelDefinitionTestCase):
def test_proxy_inheritance(self):
obj = CharFieldDefinition.objects.create(
name='caca',
max_length=25,
model_def=self.model_def,
)
save_obj = self.model_def.fielddefinitions.select_subclasses().get()
self.assertEqual(obj, save_obj)
Model = self.model_def.model_class()
Model.objects.create(caca="NO WAY")
class FieldDefinitionDeclarationTest(SimpleTestCase):
def test_delete_override(self):
"""
Make sure a warning is raised when declaring a `FieldDefinition`
subclass that override the `delete` method.
"""
test_apps = Apps()
with warnings.catch_warnings(record=True) as catched_warnings:
class CustomFieldDefinition(FieldDefinition):
class Meta:
apps = test_apps
app_label = 'mutant'
def delete(self, *args, **kwargs):
pass
msg = 'Avoid overriding the `delete` method on `FieldDefinition` subclass `CustomFieldDefinition`'
self.assertTrue(any(msg in warning.message.args[0] for warning in catched_warnings))
def module_level_pickable_default():
module_level_pickable_default.incr += 1
return module_level_pickable_default.incr
module_level_pickable_default.incr = 0
class FieldDefaultTest(BaseModelDefinitionTestCase):
def test_clean(self):
field = IntegerFieldDefinition(name='field', model_def=self.model_def)
# Field cleaning should work when a default value isn't provided
field.clean()
with self.assertRaises(ValidationError):
field.default = 'invalid'
field.clean()
field.default = module_level_pickable_default
field.clean()
field.save()
Model = self.model_def.model_class()
self.assertEqual(Model.objects.create().field,
module_level_pickable_default.incr)
field.default = NOT_PROVIDED()
field.save()
with self.assertRaises(ValidationError):
obj = Model()
obj.field
obj.full_clean()
def test_create_with_default(self):
Model = self.model_def.model_class()
Model.objects.create()
IntegerFieldDefinition.objects.create_with_default(1337, name='field',
model_def=self.model_def)
before = Model.objects.get()
self.assertEqual(before.field, 1337)
self.assertFalse(Model().field)
class FieldDefinitionChoiceTest(BaseModelDefinitionTestCase):
def test_simple_choices(self):
field_def = CharFieldDefinition.objects.create(name='gender',
max_length=1,
model_def=self.model_def)
male_choice = FieldDefinitionChoice(field_def=field_def,
value='Male', label='Male')
# Value is longer than the max_length
self.assertRaises(ValidationError, male_choice.clean)
# A length of 1 should work
male_choice.value = 'M'
male_choice.full_clean()
male_choice.save()
# Cleaning should raise validation error when passed invalid choice
Model = self.model_def.model_class()
obj = Model(gender='T')
self.assertRaises(ValidationError, obj.full_clean)
# Create another allowed choice
female_choice = FieldDefinitionChoice(field_def=field_def,
value='F', label='Female')
female_choice.value = 'F'
female_choice.full_clean()
female_choice.save()
# It should now be possible to create valid objects with this choice
obj = Model(gender='F')
obj.full_clean()
# Make sure choices are correctly set
choices = Model._meta.get_field('gender').get_choices(include_blank=False)
self.assertEqual(choices, [('M', 'Male'), ('F', 'Female')])
def test_grouped_choices(self):
field_def = CharFieldDefinition.objects.create(name='media',
max_length=5,
model_def=self.model_def)
# Create Audio choices
FieldDefinitionChoice.objects.create(field_def=field_def, group='Audio',
value='vinyl', label='Vinyl')
FieldDefinitionChoice.objects.create(field_def=field_def, group='Audio',
value='cd', label='CD')
# Create Video choices
FieldDefinitionChoice.objects.create(field_def=field_def, group='Video',
value='vhs', label='VHS Tape')
FieldDefinitionChoice.objects.create(field_def=field_def, group='Video',
value='dvd', label='DVD')
# Create Unknown choices
FieldDefinitionChoice.objects.create(field_def=field_def,
value='unknown', label='Unknown')
# Make sure choices are correctly created
Model = self.model_def.model_class()
choices = Model._meta.get_field('media').get_choices(include_blank=False)
expected_choices = [
('Audio', (
('vinyl', 'Vinyl'),
('cd', 'CD'),
)),
('Video', (
('vhs', 'VHS Tape'),
('dvd', 'DVD'),
)),
('unknown', 'Unknown')
]
self.assertEqual(choices, expected_choices)
class FieldDefinitionManagerTest(BaseModelDefinitionTestCase):
def test_natural_key(self):
fd = CharFieldDefinition.objects.create(name='name', max_length=5,
model_def=self.model_def)
natural_key = fd.natural_key()
self.assertEqual(
FieldDefinition.objects.get_by_natural_key(*natural_key), fd
)
|
cami/scripts/objects.py | hugmyndakassi/hvmi | 677 | 11118543 | <filename>cami/scripts/objects.py
#
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import struct
import yaml
from intro_defines import defines, section_hints
class CamiYAMLObject(yaml.YAMLObject):
"""
Every object created from an .yaml file should extend this class.
Please do not overwrite __setstate__ method!!! Instead, implement post_create.
"""
def __setstate__(self, state):
self.__dict__.update(state)
self.post_create(state)
_save_object(self)
# overwritable
def post_create(self, state):
pass
__objects = {}
def _save_object(obj):
tp = type(obj)
if tp in __objects:
if obj not in __objects[tp]:
__objects[tp].append(obj)
else:
__objects[tp] = [obj]
def get_all_objects(tp):
try:
return __objects[tp]
except KeyError:
return []
class CamiParsableObject:
def serialize(self, start):
raise NotImplementedError
class FilePointerException(Exception):
pass
class CamiObject(CamiParsableObject):
_file_pointer = None
def set_file_pointer(self, file_pointer):
if self._file_pointer is not None:
raise FilePointerException("File pointer already set!")
self._file_pointer = file_pointer
def get_file_pointer(self):
if self._file_pointer is None:
raise FilePointerException("File pointer not set!")
return self._file_pointer
class FileHeader(CamiObject):
"""
struct _CAMI_HEADER
{
DWORD Magic;
struct
{
DWORD Minor;
DWORD Major;
} Version;
DWORD BuildNumber;
DWORD FileSize;
DWORD NumberOfSections;
DWORD PointerToSectionsHeaders;
}
"""
struct_layout = "<IIIIIII"
def __init__(self, buildnumber, version):
self.magic = defines["CAMI_MAGIC_WORD"]
self.buildnumber = buildnumber
self.version = version
self.filesize = 0
def set_sections(self, sections):
self.sections = sections
def get_binary_size(self):
return struct.calcsize(self.struct_layout)
def get_binary(self):
return struct.pack(
self.struct_layout,
self.magic,
self.version[1],
self.version[0],
self.buildnumber,
self.filesize,
self.sections.get_entry_count(),
self.sections.get_file_pointer(),
)
def serialize(self, start):
self.set_file_pointer(start)
data = self.sections.serialize(start + self.get_binary_size())
self.filesize = self.get_binary_size() + len(data)
return self.get_binary() + data
class SectionsTable(CamiObject):
"""
struct _CAMI_SECTION_HEADER
{
DWORD Hint;
DWORD EntryCount;
DWORD _Reserved;
DWORD DescriptorTable;
}
"""
entry_layout = "<IIII"
def create_entry(self, hint, entry_count, data_table):
""" Generate a sections table entry
Args:
hint: The section hint. Must be a combination of values from intro_defines.section_hints dict.
entry_count: How many entries are in the CamiDataTable
data_table: CamiDataTable with entries describing section data
Returns:
bytes: args packed in a binary form.
"""
return struct.pack(self.entry_layout, hint, entry_count, 0, data_table)
def __init__(self):
self._sections = []
def add_section(self, section):
if section in self._sections:
raise Exception("Section is already in section_table")
self._sections.append(section)
def get_entry_count(self):
return len(self._sections)
def get_binary_size(self):
return struct.calcsize(self.entry_layout) * self.get_entry_count()
def get_binary(self):
rtr = bytes()
for section in self._sections:
rtr += self.create_entry(section.section_hint, section.get_entry_count(), section.get_file_pointer())
return rtr
def serialize(self, start):
self.set_file_pointer(start)
start += self.get_binary_size()
data = bytes()
for section in self._sections:
data += section.serialize(start + len(data))
return self.get_binary() + data
class CamiAtom(CamiParsableObject):
""" This is an abstract class which describes a CamiDataTable entry"""
def get_descriptor(self):
raise NotImplementedError
class CamiDataTable(CamiObject):
def __init__(self, entries=[]):
if not issubclass(self.entry_type, CamiAtom):
raise Exception("CamiDataTable entry must be a CamiAtom")
self._entries = []
self.set_entries(entries)
def process_list(self):
"""
This is an abstract method which is called before serializing the list.
"""
pass
def _check_type(self, obj):
if not issubclass(type(obj), self.entry_type):
raise Exception("Invalid object type. Expected %s (or a subclass), got %s." % (self.entry_type, type(obj)))
def set_entries(self, entries):
for entry in entries:
self._check_type(entry)
self._entries = []
self._entries.extend(entries)
def get_entries(self):
return self._entries
def add_entry(self, entry):
self._check_type(entry)
self._entries.append(entry)
def get_entry_count(self):
return len(self._entries)
def get_binary_size(self):
return len(self._entries) * struct.calcsize(self.entry_type.descriptor_layout)
def get_binary(self):
rtr = bytes()
for entry in self._entries:
rtr += entry.get_descriptor()
return rtr
def __eq__(self, other):
return type(self) == type(other) and self._entries == other._entries
def __repr__(self):
r = self.__class__.__name__ + "(0x%lx)" % id(self) + " : "
if not self._entries:
r += "Empty"
for entry in self._entries:
r += "<" + repr(entry) + "> "
return r
def serialize(self, start):
try:
self.set_file_pointer(start)
except FilePointerException:
return bytes()
self.process_list()
start += self.get_binary_size()
raw = bytes()
for entry in self._entries:
raw += entry.serialize(start + len(raw))
return self.get_binary() + raw
|
tools/mo/openvino/tools/mo/front/caffe/elu.py | ryanloney/openvino-1 | 1,127 | 11118558 | <reponame>ryanloney/openvino-1<gh_stars>1000+
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.activation_ops import Elu
from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes
from openvino.tools.mo.front.extractor import FrontExtractorOp
class ELUFrontExtractor(FrontExtractorOp):
op = 'ELU'
enabled = True
@classmethod
def extract(cls, node):
param = node.pb.elu_param
attrs = collect_attributes(param)
Elu.update_node_stat(node, attrs)
return cls.enabled
|
realtime_voice_conversion/stream/decode_stream.py | karanokuri/realtime-yukarin | 296 | 11118609 | <reponame>karanokuri/realtime-yukarin
import numpy
from yukarin.acoustic_feature import AcousticFeature
from ..segment.feature_segment import FeatureSegmentMethod
from ..segment.wave_segment import WaveSegmentMethod
from ..stream.base_stream import BaseStream
from ..yukarin_wrapper.vocoder import Vocoder
class DecodeStream(BaseStream[AcousticFeature, numpy.ndarray]):
def __init__(
self,
vocoder: Vocoder,
):
super().__init__(
in_segment_method=FeatureSegmentMethod(
sampling_rate=1000 // vocoder.acoustic_param.frame_period,
wave_sampling_rate=vocoder.out_sampling_rate,
order=vocoder.acoustic_param.order,
),
out_segment_method=WaveSegmentMethod(
sampling_rate=vocoder.out_sampling_rate,
),
)
self.vocoder = vocoder
def process(self, start_time: float, time_length: float, extra_time: float) -> numpy.ndarray:
out_feature = self.fetch(
start_time=start_time,
time_length=time_length,
extra_time=extra_time,
)
wave = self.vocoder.decode(
acoustic_feature=out_feature,
).wave
wave[numpy.isnan(wave)] = 0
return wave
|
python/maths/Ugly Number/nth_ugly_number.py | SounakMandal/AlgoBook | 191 | 11118630 | def ugly_num(num):
while num != 1:
if num % 2 == 0:
num//=2
elif num % 3 == 0:
num//=3
elif num % 5 == 0:
num//=5
else:
return num
return num
N = int(input("Enter the Nth Ugly Number to be displayed"))
i=0
num=1
while N != i:
if (ugly_num(num) == 1):
i+=1
m=num
num+=1
print(m) |
vilya/views/api/projects.py | mubashshirjamal/code | 1,582 | 11118679 | # -*- coding: utf-8 -*-
import json
from quixote.errors import TraversalError
from vilya.models.project import CodeDoubanProject
from vilya.models.team import Team
from vilya.views.api.utils import jsonize
_q_exports = []
def _q_index(request):
sortby = request.get_form_var('sortby')
if sortby in CodeDoubanProject.PROJECTS_SORT_BYS:
project_list = CodeDoubanProject.get_projects(sortby=sortby)
else:
project_list = CodeDoubanProject.get_projects()
team_uid = request.get_form_var('by_dept', '')
team = Team.get_by_uid(team_uid)
if team:
project_ids = team.project_ids
project_list = (CodeDoubanProject.gets(project_ids)
if project_ids else [])
without_commits = request.get_form_var('without_commits') or False
data = {}
data['projects'] = [project.get_info(
without_commits=without_commits) for project in project_list]
return json.dumps(data)
def _q_lookup(request, name):
return ProjectUI(request, name)
class ProjectUI:
_q_exports = []
def __init__(self, request, name):
self.name = name
self._project = CodeDoubanProject.get_by_name(name)
def __call__(self, request):
return self._index(request)
def _q_index(self, request):
return self._index(request)
@jsonize
def _index(self, request):
if not self._project:
raise TraversalError()
dic = self._project.as_dict()
dic['readme'] = self._project.readme
return dic
def _q_lookup(self, request, name):
self.name = self.name + "/" + name
project = CodeDoubanProject.get_by_name(self.name)
if not project:
raise TraversalError()
return json.dumps(project.as_dict())
|
cupyx/_gufunc.py | prkhrsrvstv1/cupy | 6,180 | 11118683 | <reponame>prkhrsrvstv1/cupy
import cupy
class GeneralizedUFunc(cupy._core._gufuncs._GUFunc):
__doc__ = cupy._core._gufuncs._GUFunc.__doc__
|
dictionary/utils/admin.py | ankitgc1/django-sozluk-master | 248 | 11118704 | <reponame>ankitgc1/django-sozluk-master
from functools import wraps
from django.contrib.admin.models import CHANGE, LogEntry
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import redirect, reverse
# Admin site specific utilities
def log_admin(msg, authorizer, model_type, model_object, flag=CHANGE):
LogEntry.objects.log_action(
user_id=authorizer.id,
content_type_id=ContentType.objects.get_for_model(model_type).pk,
object_id=model_object.id,
object_repr=str(model_object),
change_message=msg,
action_flag=flag,
)
def logentry_instance(msg, authorizer, model_type, model_object, flag=CHANGE):
return LogEntry(
user_id=authorizer.pk,
content_type=ContentType.objects.get_for_model(model_type),
object_id=model_object.pk,
object_repr=str(model_object),
change_message=msg,
action_flag=flag,
)
def logentry_bulk_create(*logentry_instances):
LogEntry.objects.bulk_create(*logentry_instances)
class IntermediateActionHandler:
def __init__(self, queryset, url_name):
self.queryset = queryset
self.url_name = url_name
def get_source_list(self):
return "-".join(map(str, self.queryset.values_list("id", flat=True)))
@property
def redirect_url(self):
return redirect(reverse(self.url_name) + f"?source_list={self.get_source_list()}")
def intermediate(action):
"""
Decorator for admin actions with intermediate pages (IntermediateActionView).
The decorated action should return the name of the url.
"""
@wraps(action)
def decorator(model_admin, request, queryset):
view_name = action(model_admin, request, queryset)
handler = IntermediateActionHandler(queryset, view_name)
return handler.redirect_url
return decorator
|
LeetCode/python3/216.py | ZintrulCre/LeetCode_Archiver | 279 | 11118777 | <reponame>ZintrulCre/LeetCode_Archiver<gh_stars>100-1000
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
ret = []
self.Backtrack(k, n, [], ret, 1)
return ret
def Backtrack(self, k, sum, combination, ret, m):
if k == 0:
if sum == 0:
ret.append(combination)
return combination
for i in range(m, 10):
if sum - i < 0:
break
combination.append(i)
combination = self.Backtrack(k - 1, sum - i, combination, ret, i + 1)
combination = combination[:-1]
return combination |
skfda/preprocessing/__init__.py | jiduque/scikit-fda | 147 | 11118784 | <filename>skfda/preprocessing/__init__.py
from . import registration
from . import smoothing
from . import dim_reduction
|
tinderbotz/helpers/constants_helper.py | cr4zy8/TinderBotz | 202 | 11118790 | import enum
# Using enum class create enumerations
class Socials(enum.Enum):
SNAPCHAT = "snapchat"
INSTAGRAM = "instagram"
PHONENUMBER = "phone"
FACEBOOK = "facebook"
class Sexuality(enum.Enum):
MEN = "Men"
WOMEN = "Women"
EVERYONE = "Everyone"
class Language(enum.Enum):
ENGLISH = "English"
AFRIKAANS = "Afrikaans"
ARABIC = "Arabic"
BULGARIAN = "Bulgarian"
BOSNIAN = "Bosnian"
CROATIAN = "Croatian"
CZECH = "Czech"
DANISH = "Danish"
DUTCH = "Dutch"
ESTONIAN = "Estonian"
FINNISH = "Finnish"
FRENCH = "French"
GEORGIAN = "Georgian"
GERMAN = "German"
GREEK = "Greek"
HINDI = "Hindi"
HUNGARIAN = "Hungarian"
INDONESIAN = "Indonesian"
ITALIAN = "Italian"
JAPANESE = "Japanese"
KOREAN = "Korean"
LATVIAN = "Latvian"
LITHUANIAN = "Lithuanian"
MACEDONIAN = "Macedonian"
MALAY = "Malay"
POLISH = "Polish"
PORTUGUESE = "Portuguese"
ROMANIAN = "Romanian"
RUSSIAN = "Russian"
SERBIAN = "Serbian"
SPANISH = "Spanish"
SLOVAK = "Slovak"
SLOVENIAN = "Slovenian"
SWEDISH = "Swedish"
TAMIL = "Tamil"
THAI = "Thai"
TURKISH = "Turkish"
UKRAINIAN = "Ukrainian"
VIETNAMESE = "Vietnamese"
class Printouts(enum.Enum):
BANNER = '''
_____ _ _ _ _
|_ _(_)_ __ __| | ___ _ __| |__ ___ | |_ ____
| | | | '_ \ / _` |/ _ \ '__| '_ \ / _ \| __|_ /
| | | | | | | (_| | __/ | | |_) | (_) | |_ / /
|_| |_|_| |_|\__,_|\___|_| |_.__/ \___/ \__/___|
----------------------------------------------------'''
EXPLANATION = '''
Hi guys,
This code is opensource and available on GitHub.
repository: https://github.com/frederikme/TinderBotz
If you find the code useful, it would mean a lot if you can star the repository to show your appreciation.
If you're interested in learning how to write these bots yourself,
I will be making tutorials about python selenium automation soon.
youtube_channel: https://www.youtube.com/channel/UC1i3N9R9XYxt5Imi-auLPuA
tutorials that will be coming:
1. Scraping news on websites -> For absolute beginners, as an intro to selenium
2. A simplified Tinderbot -> For beginners
3. Writing an automated chess bot to play on Chess.com using stockfish (currently a private repository). -> Advanced
Have a nice day,
Frederikme
'''
|
tests/test_utils/test_alias_multinomial.py | mitming/mmselfsup | 355 | 11118846 | <filename>tests/test_utils/test_alias_multinomial.py<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmselfsup.utils import AliasMethod
def test_alias_multinomial():
example_in = torch.Tensor([1, 2, 3, 4])
example_alias_method = AliasMethod(example_in)
assert (example_alias_method.prob.numpy() <= 1).all()
assert len(example_in) == len(example_alias_method.alias)
# test assertion if N is smaller than 0
with pytest.raises(AssertionError):
example_alias_method.draw(-1)
with pytest.raises(AssertionError):
example_alias_method.draw(0)
example_res = example_alias_method.draw(5)
assert len(example_res) == 5
|
examples/human_based/run_test_TLO.py | JokerHB/mealpy | 162 | 11118853 | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:11, 07/06/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec_basic.cec2014_nobias import *
from mealpy.human_based.TLO import BaseTLO, OriginalTLO, ITLO
# Setting parameters
obj_func = F5
verbose = True
epoch = 100
pop_size = 50
# A - Different way to provide lower bound and upper bound. Here are some examples:
## 1. When you have different lower bound and upper bound for each parameters
lb1 = [-3, -5, 1]
ub1 = [5, 10, 100]
# md1 = BaseTLO(obj_func, lb1, ub1, verbose, epoch, pop_size)
# best_pos1, best_fit1, list_loss1 = md1.train()
# print(md1.solution[1])
#
# ## 2. When you have same lower bound and upper bound for each parameters, then you can use:
# ## + int or float: then you need to specify your problem size (number of dimensions)
# problemSize = 10
# lb2 = -5
# ub2 = 10
# md2 = BaseTLO(obj_func, lb2, ub2, verbose, epoch, pop_size, problem_size=problemSize) # Remember the keyword "problem_size"
# best_pos1, best_fit1, list_loss1 = md2.train()
# print(md2.solution[1])
#
# ## + array: 2 ways
# lb3 = [-5]
# ub3 = [10]
# md3 = BaseTLO(obj_func, lb3, ub3, verbose, epoch, pop_size, problem_size=problemSize) # Remember the keyword "problem_size"
# best_pos1, best_fit1, list_loss1 = md3.train()
# print(md3.solution[1])
#
# lb4 = [-5] * problemSize
# ub4 = [10] * problemSize
# md4 = BaseTLO(obj_func, lb4, ub4, verbose, epoch, pop_size) # No need the keyword "problem_size"
# best_pos1, best_fit1, list_loss1 = md4.train()
# print(md4.solution[1])
# # B - Test with algorithm has batch size idea
#
#
# # C - Test with different variants of this algorithm
#
# md1 = OriginalTLO(obj_func, lb1, ub1, verbose, epoch, pop_size)
# best_pos1, best_fit1, list_loss1 = md1.train()
# print(md1.solution[0])
# print(md1.solution[1])
# print(md1.loss_train)
md1 = ITLO(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)
|
Trakttv.bundle/Contents/Libraries/Shared/trakt_sync/differ/handlers/list.py | disrupted/Trakttv.bundle | 1,346 | 11118876 | <filename>Trakttv.bundle/Contents/Libraries/Shared/trakt_sync/differ/handlers/list.py
from trakt_sync.differ.handlers.core.base import Handler
class List(Handler):
name = 'list'
def on_added(self, current):
yield self.add(current)
def on_removed(self, base):
yield self.remove(base)
def on_common(self, base, current):
if base.index == current.index:
return
yield self.change((base, current))
def properties(self, item):
if type(item) is tuple:
return {
'index': self.properties_change(item, 'index')
}
return {
'index': item.index
}
|
polly/utils/pyscop/jscop2iscc.py | mkinsner/llvm | 2,338 | 11118883 | <reponame>mkinsner/llvm<filename>polly/utils/pyscop/jscop2iscc.py
#!/usr/bin/env python
import argparse, isl, os
import json
def printDomain(scop):
domain = isl.USet('{}')
for statement in scop['statements']:
domain = domain.union(isl.USet(statement['domain']))
print "D :=",
print str(domain) + ";"
def printAccesses(scop):
read = isl.UMap('{}')
for statement in scop['statements']:
for access in statement['accesses']:
if access['kind'] == 'read':
read = read.union(isl.UMap(access['relation']))
print "R :=",
print str(read) + ";"
write = isl.UMap('{}')
for statement in scop['statements']:
for access in statement['accesses']:
if access['kind'] == 'write':
write = write.union(isl.UMap(access['relation']))
print "W :=",
print str(write) + ";"
def printSchedule(scop):
schedule = isl.UMap('{}')
for statement in scop['statements']:
schedule = schedule.union(isl.UMap(statement['schedule']))
print "S :=",
print str(schedule) + ";"
def __main__():
description = 'Translate JSCoP into iscc input'
parser = argparse.ArgumentParser(description)
parser.add_argument('inputFile', metavar='N', type=file,
help='The JSCoP file')
args = parser.parse_args()
inputFile = args.inputFile
scop = json.load(inputFile)
printDomain(scop)
printAccesses(scop)
printSchedule(scop)
print 'R := R * D;'
print 'W := W * D;'
print 'Dep := (last W before R under S)[0];'
print 'schedule D respecting Dep minimizing Dep;'
__main__()
|
tests/acceptance/test_shell.py | michaszcz/procrastinate | 215 | 11118915 | import asyncio
import pytest
pytestmark = pytest.mark.asyncio
@pytest.fixture
async def shell(process_env):
PIPE = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_exec(
"procrastinate",
"shell",
env=process_env(),
stdin=PIPE,
stdout=PIPE,
stderr=None,
)
yield proc
proc.kill()
await proc.wait()
@pytest.fixture
def write(shell):
async def _(s: str):
shell.stdin.write((s + "\n").encode())
await shell.stdin.drain()
return _
@pytest.fixture
def read(shell):
async def _():
# Read lines 1 by 1, concat them into a string that we return.
# We accept to wait an indefinite amount of time at the beginning
# but once we have started reading bytes, not reading anything
lines = []
prefix = "procrastinate>"
while True:
try:
line = await asyncio.wait_for(shell.stdout.readline(), 0.1)
except asyncio.TimeoutError:
if lines:
break
continue
line = line.decode()
if line.startswith(prefix):
line = line[len(prefix) :]
line = line.strip()
if line:
lines.append(line)
return lines
return _
async def test_shell(read, write, defer):
assert await read() == [
"Welcome to the procrastinate shell. Type help or ? to list commands."
]
defer("sum_task", ["--lock=a"], a=5, b=7)
defer("sum_task", ["--lock=lock"], a=3, b=8)
defer("sum_task", ["--queue=other", "--lock=lock"], a=1, b=2)
defer("increment_task", ["--lock=b"], a=5)
await write("cancel 2")
assert await read() == ["#2 ns:tests.acceptance.app.sum_task on default - [failed]"]
await write("cancel 3")
assert await read() == ["#3 ns:tests.acceptance.app.sum_task on other - [failed]"]
await write("cancel 4")
assert await read() == [
"#4 tests.acceptance.app.increment_task on default - [failed]"
]
await write("list_jobs")
assert await read() == [
"#1 ns:tests.acceptance.app.sum_task on default - [todo]",
"#2 ns:tests.acceptance.app.sum_task on default - [failed]",
"#3 ns:tests.acceptance.app.sum_task on other - [failed]",
"#4 tests.acceptance.app.increment_task on default - [failed]",
]
await write("list_jobs queue=other details")
assert await read() == [
"#3 ns:tests.acceptance.app.sum_task on other - [failed] (attempts=0, scheduled_at=None, args={'a': 1, 'b': 2}, lock=lock)",
]
await write("list_queues")
assert await read() == [
"default: 3 jobs (todo: 1, doing: 0, succeeded: 0, failed: 2)",
"other: 1 jobs (todo: 0, doing: 0, succeeded: 0, failed: 1)",
]
await write("list_tasks")
assert await read() == [
"ns:tests.acceptance.app.sum_task: 3 jobs (todo: 1, doing: 0, succeeded: 0, failed: 2)",
"tests.acceptance.app.increment_task: 1 jobs (todo: 0, doing: 0, succeeded: 0, failed: 1)",
]
await write("list_locks")
assert await read() == [
"a: 1 jobs (todo: 1, doing: 0, succeeded: 0, failed: 0)",
"b: 1 jobs (todo: 0, doing: 0, succeeded: 0, failed: 1)",
"lock: 2 jobs (todo: 0, doing: 0, succeeded: 0, failed: 2)",
]
|
Tests/image_tests/renderpasses/graphs/OptixDenoiser.py | gonsolo/Falcor | 1,615 | 11118959 | <filename>Tests/image_tests/renderpasses/graphs/OptixDenoiser.py
from falcor import *
def render_graph_OptixDenoiser():
g = RenderGraph("OptixDenoiser")
loadRenderPassLibrary("AccumulatePass.dll")
loadRenderPassLibrary("GBuffer.dll")
loadRenderPassLibrary("OptixDenoiser.dll")
loadRenderPassLibrary("PathTracer.dll")
loadRenderPassLibrary("ToneMapper.dll")
VBufferRT = createPass("VBufferRT")
g.addPass(VBufferRT, "VBufferRT")
AccumulatePass = createPass("AccumulatePass")
g.addPass(AccumulatePass, "AccumulatePass")
ToneMappingPass = createPass("ToneMapper")
g.addPass(ToneMappingPass, "ToneMappingPass")
PathTracer = createPass("PathTracer")
g.addPass(PathTracer, "PathTracer")
OptixDenoiser = createPass("OptixDenoiser")
g.addPass(OptixDenoiser, "OptixDenoiser")
g.addEdge("VBufferRT.vbuffer", "PathTracer.vbuffer")
g.addEdge("PathTracer.color", "AccumulatePass.input")
g.addEdge("AccumulatePass.output", "ToneMappingPass.src")
g.addEdge("ToneMappingPass.dst", "OptixDenoiser.color")
g.addEdge("PathTracer.albedo", "OptixDenoiser.albedo")
g.addEdge("PathTracer.normal", "OptixDenoiser.normal")
g.addEdge("VBufferRT.mvec", "OptixDenoiser.mvec")
# Color outputs
g.markOutput("OptixDenoiser.output")
g.markOutput("PathTracer.color")
# OptixDenoiser inputs
g.markOutput("ToneMappingPass.dst")
g.markOutput("PathTracer.albedo")
g.markOutput("PathTracer.normal")
g.markOutput("VBufferRT.mvec")
return g
OptixDenoiser = render_graph_OptixDenoiser()
try: m.addGraph(OptixDenoiser)
except NameError: None
|
tfne/algorithms/codeepneat/_codeepneat_speciation_mod.py | githealthy18/Tensorflow-Neuroevolution | 121 | 11118990 | import statistics
import logging
class CoDeepNEATSpeciationMOD:
def _speciate_modules_basic(self, mod_spec_parents, new_module_ids):
""""""
### Removal of Parental But Not Elite Modules ###
# Remove modules from module container that served as parents and were kept, though do not belong to any species
for spec_id, spec_parents in mod_spec_parents.items():
spec_elites = self.pop.mod_species[spec_id]
for mod_id in spec_parents:
if mod_id not in spec_elites:
del self.pop.modules[mod_id]
### Species Assignment ###
# Basic speciation assigns each new module to the species with the according module type, as for each module
# type there is only one species. Preprocess species by creating the type to id association
species_type_to_id = dict()
for spec_id, spec_mod_ids in self.pop.mod_species.items():
species_type = self.pop.modules[spec_mod_ids[0]].get_module_type()
species_type_to_id[species_type] = spec_id
for mod_id in new_module_ids:
module_type = self.pop.modules[mod_id].get_module_type()
according_mod_spec_id = species_type_to_id[module_type]
self.pop.mod_species[according_mod_spec_id].append(mod_id)
def _speciate_modules_param_distance_fixed(self, mod_spec_parents, new_module_ids):
""""""
### Removal of Parental But Not Elite Modules ###
# Remove modules from module container that served as parents and were kept, though do not belong to any species
for spec_id, spec_parents in mod_spec_parents.items():
spec_elites = self.pop.mod_species[spec_id]
for mod_id in spec_parents:
if mod_id not in spec_elites:
del self.pop.modules[mod_id]
### Species Assignment ###
# Traverse all new module ids, determine their type and compare their parameter distance with other species of
# that type. If the distance to one species of the same type is below the config specified 'mod_spec_distance'
# then assign the new module to that species. If not, create a new species. Create a preprocessed dict that
# lists all species of one type as only species with the same type are relevant for comparison.
species_type_to_id = dict()
for spec_id, spec_mod_repr_id in self.pop.mod_species_repr.items():
species_type = self.pop.modules[spec_mod_repr_id].get_module_type()
if species_type in species_type_to_id:
species_type_to_id[species_type].append(spec_id)
else:
species_type_to_id[species_type] = [spec_id]
min_spec_size = self.mod_spec_mod_elitism + self.mod_spec_min_offspring + 1
for mod_id in new_module_ids:
module_type = self.pop.modules[mod_id].get_module_type()
# Calculate the distance of the module to each species representative and associate each species with its
# distance in the module_spec_distances dict
module_spec_distances = dict()
for spec_mod_type, spec_ids in species_type_to_id.items():
if module_type != spec_mod_type:
continue
for spec_id in spec_ids:
spec_mod_repr = self.pop.modules[self.pop.mod_species_repr[spec_id]]
module_spec_distances[spec_id] = spec_mod_repr.get_distance(self.pop.modules[mod_id])
# Determine species whose representative has the minimum distance to the new module. If that minimum
# distance is lower than the config set module species distance, assign the new module to that species.
# If the minimum distance is higher than the module species distance, create a new species with the new
# module as the representative, assuming the population size allows for it.
min_distance_spec = min(module_spec_distances, key=module_spec_distances.get)
if module_spec_distances[min_distance_spec] <= self.mod_spec_distance:
self.pop.mod_species[min_distance_spec].append(mod_id)
elif module_spec_distances[min_distance_spec] > self.mod_spec_distance \
and min_spec_size * len(self.pop.mod_species) >= self.mod_pop_size:
logging.warning(f"Warning: New Module (#{mod_id}) has sufficient distance to other species"
f"representatives but has been assigned to species {min_distance_spec} as the"
f"population size does not allow for more species.")
self.pop.mod_species[min_distance_spec].append(mod_id)
else:
# Create a new species with the new module as the representative
self.pop.mod_species_counter += 1
self.pop.mod_species[self.pop.mod_species_counter] = [mod_id]
self.pop.mod_species_repr[self.pop.mod_species_counter] = mod_id
species_type_to_id[module_type].append(self.pop.mod_species_counter)
def _speciate_modules_param_distance_dynamic(self, mod_spec_parents, new_module_ids):
""""""
# Perform param-distance-fixed speciation as identical to dynamic variant and subsequently adjust distance
self._speciate_modules_param_distance_fixed(mod_spec_parents, new_module_ids)
### Dynamic Adjustment of Species Distance ###
# If the species count is too low, decrease the species distance by 5 percent. If the species count is too
# high, determine the distances of each species representative to all other species representatives and choose
# the distance that would set the species count right. Average that optimal distance for each species repr out
# to get the new species distance.
if len(self.pop.mod_species) < self.mod_spec_species_count:
self.mod_spec_distance = self.mod_spec_distance * 0.95
elif len(self.pop.mod_species) > self.mod_spec_species_count:
optimal_spec_distance_per_species = list()
for spec_id, spec_mod_repr_id in self.pop.mod_species_repr.items():
mod_repr = self.pop.modules[spec_mod_repr_id]
# Determine distance of species repr to all other species repr
other_spec_mod_repr_ids = [mod_id for mod_id in self.pop.mod_species_repr.values()
if mod_id != spec_mod_repr_id]
sorted_distances_to_other_specs = sorted([mod_repr.get_distance(self.pop.modules[other_mod_id])
for other_mod_id in other_spec_mod_repr_ids])
# Set optimal distance of current species repr such that it conforms to 'mod_spec_species_count' by
# choosing the distance that would result in only the desired species count for the current
# representative
optimal_spec_distance = sorted_distances_to_other_specs[self.mod_spec_species_count - 1]
optimal_spec_distance_per_species.append(optimal_spec_distance)
# Average out all optimal distances for each species repr to get the new distance
self.mod_spec_distance = statistics.mean(optimal_spec_distance_per_species)
|
tests/test_utils.py | Pijuli/django-jazzmin | 972 | 11119030 | from unittest.mock import patch, MagicMock, Mock
import pytest
from django.db.models.functions import Upper
from django.urls import reverse
from jazzmin.utils import (
order_with_respect_to,
get_admin_url,
get_custom_url,
get_model_meta,
get_app_admin_urls,
get_view_permissions,
)
from .test_app.library.factories import BookFactory, UserFactory
from .test_app.library.books.models import Book
def test_order_with_respect_to():
"""
When we ask for ordering, we get it as expected
"""
def apps(*args):
return [{"app_label": x} for x in args]
original_list = apps("b", "c", "a")
assert order_with_respect_to(original_list, ["c", "b"], getter=lambda x: x["app_label"]) == apps("c", "b", "a")
assert order_with_respect_to(original_list, ["nothing"], getter=lambda x: x["app_label"]) == original_list
assert order_with_respect_to(original_list, ["a"], getter=lambda x: x["app_label"])[0]["app_label"] == "a"
assert order_with_respect_to([1, 2, 3], [3, 2, 1]) == [3, 2, 1]
assert order_with_respect_to([1, 2, 3], [3]) == [3, 1, 2]
assert order_with_respect_to(["morty", "pickle", "rick"], ["pickle", "morty"]) == [
"pickle",
"morty",
"rick",
]
@pytest.mark.django_db
def test_get_admin_url(admin_user):
"""
We can get admin urls for Model classes, instances, or app.model strings
"""
book = BookFactory()
assert get_admin_url(book) == reverse("admin:books_book_change", args=(book.pk,))
assert get_admin_url(Book) == reverse("admin:books_book_changelist")
assert get_admin_url(Book, q="test") == reverse("admin:books_book_changelist") + "?q=test"
assert get_admin_url("books.Book") == reverse("admin:books_book_changelist")
with patch("jazzmin.utils.reverse") as mock_reverse:
get_admin_url("Books.Book")
mock_reverse.assert_called_once_with("admin:Books_book_changelist", current_app="admin")
assert get_admin_url("cheese:bad_pattern") == "#"
assert get_admin_url("fake_app.fake_model") == "#"
assert get_admin_url(1) == "#"
def test_get_custom_url():
"""
We handle urls that can be reversed, and that cant, and external links
"""
assert get_custom_url("http://somedomain.com") == "http://somedomain.com"
assert get_custom_url("/relative/path") == "/relative/path"
assert get_custom_url("admin:books_book_changelist") == "/en/admin/books/book/"
@pytest.mark.django_db
def test_get_model_meta(admin_user):
"""
We can fetch model meta
"""
assert get_model_meta("auth.user") == admin_user._meta
assert get_model_meta("books.book") == Book._meta
assert get_model_meta("nothing") is None
assert get_model_meta("nothing.nothing") is None
@pytest.mark.django_db
def test_get_app_admin_urls():
"""
We can get all the admin urls for an app
"""
assert get_app_admin_urls("books") == [
{"url": "/en/admin/books/genre/", "model": "books.genre", "name": "Genres"},
{"url": "/en/admin/books/book/", "model": "books.book", "name": "Books"},
{"url": "/en/admin/books/author/", "model": "books.author", "name": "Authors"},
]
assert get_app_admin_urls("nothing") == []
@pytest.mark.django_db
def test_get_model_permissions():
"""
We can create the correct model permissions from user permissions
"""
user = UserFactory(permissions=("books.view_book", "books.view_author"))
assert get_view_permissions(user) == {"books.book", "books.author"}
# test for camel cased app names
user = MagicMock()
user.get_all_permissions = Mock(return_value={"BookShelf.view_author", "BookShelf.view_book"})
assert get_view_permissions(user) == {"BookShelf.book", "BookShelf.author"}
@pytest.mark.django_db
def test_get_model_permissions_lowercased():
"""
When our permissions are upper cased (we had an app with an upper case letter) we still get user perms in lower case
"""
user = UserFactory(permissions=("books.view_book", "books.view_author"))
user.user_permissions.update(codename=Upper("codename"))
assert get_view_permissions(user) == {"books.book", "books.author"}
|
modules/nltk_contrib/scripttranscriber/Utils/kunyomi.py | h4ck3rm1k3/NLP-project | 123 | 11119054 | """
Native Japanese pronunciations for characters
"""
__author__ = """
<EMAIL> (<NAME>)
<EMAIL> (<NAME>)
"""
DUMMY_PHONE_ = 'DUM'
KUNYOMI_ = {}
def LoadKunyomiWbTable(table):
if KUNYOMI_: return ## already loaded
p = open(table)
lines = p.readlines()
p.close()
for line in lines:
line = line.split()
KUNYOMI_[line[0]] = ' '.join(line[1:])
def KanjiToWorldBet(string):
output = []
some_success = False
for c in unicode(string, 'utf8'):
c = c.encode('utf-8')
try:
output.append(KUNYOMI_[c])
some_success = True
except KeyError:
output.append(DUMMY_PHONE_)
return ' '.join(output), some_success
|
demos/kitchen_sink/libs/baseclass/bottom_app_bar.py | abang90/testapp | 1,111 | 11119081 | <gh_stars>1000+
from kivy.uix.screenmanager import Screen
class KitchenSinkBottomAppBar(Screen):
def callback_for_bottom_app_bar(self, app, text, value):
if value and app.data_screens["Bottom App Bar"]["object"]:
toolbar = self.ids.bottom_toolbar
if text == "Off":
toolbar.remove_notch()
elif text == "On":
toolbar.set_notch()
elif text == "Attached - End":
toolbar.mode = "end"
elif text == "Attached - Center":
toolbar.mode = "center"
elif text == "Free - End":
toolbar.mode = "free-end"
elif text == "Free - Center":
toolbar.mode = "free-center"
|
bin/process_AR.py | seralf/tate_collection | 330 | 11119106 | '''
For AR experiment
Using the level2list.json as index and the artworks jsons as content
associate AR artworks with index
outputs json of acno for each object in level2list
only if that index contains the artwork
'''
import json
from os import walk
level2list = json.loads(open('../processed/level2list.json').read().decode('utf-8'))
level2listfinal = []
# make space for the artwork no.
# each row has one artlist associated
def init():
for row in level2list:
row['artlist'] = []
def open_art_files(targetpath):
for dirname, dirnames, filenames in walk(targetpath):
for filename in filenames:
filepath = '/'.join([dirname,filename])
fileopen = open(filepath).read().decode('utf-8')
jsonopen = json.loads(fileopen)
acno = jsonopen['acno']
title = jsonopen['title']
thumbnail =jsonopen['thumbnailUrl']
url = jsonopen['url']
contributorCount = jsonopen['contributorCount']
if contributorCount is 1:
artist = jsonopen['contributors'][0]['fc']
else:
artistlist = []
for c in jsonopen['contributors']:
contrib.append(c['fc'])
artist = ' ,'.join(artistlist)
subjectCount = jsonopen['subjectCount']
if subjectCount is not 0 and thumbnail is not None:
subjects0 = jsonopen['subjects']['children']
for child0 in subjects0:
subjects1 = child0['children']
for child1 in subjects1:
subjects2 = child1['children']
for child2 in subjects2:
item = {}
item['id'] = acno
item['title'] = title
item['artist'] = artist
item['url'] = url
# open txt files with base64 encode for a 40x40 thumbnail
item['thumb'] = open_matching_base64_file(acno)
matchdict = next((item for item in level2list if item['id'] == child2['id']), None)
# add the acno to the corresponding jsonopen
matchdict['artlist'].append(item)
def open_matching_base64_file(matchingid):
try:
txtopen = open('./images0/ar40/' + matchingid + '.tile.txt','rb')
txtcontent = txtopen.read()
return txtcontent
except:
return 'none'
def finish():
for row in level2list:
if len(row['artlist']):
level2listfinal.append(row)
def write_file(data, filename):
jsondata = json.dumps(data,sort_keys = True,separators = (',',':'))
output = open('../processed/' + filename,'w')
output.writelines(jsondata)
output.close
init()
open_art_files('../artworks/ar')
finish()
write_file(level2listfinal,'level2list_AR.json')
|
tradingbot/components/broker/__init__.py | stungkit/TradingBot | 218 | 11119113 | <reponame>stungkit/TradingBot<filename>tradingbot/components/broker/__init__.py
from .abstract_interfaces import ( # NOQA # isort:skip
AbstractInterface,
AccountBalances,
StocksInterface,
AccountInterface,
)
from .av_interface import AVInterface, AVInterval # NOQA # isort:skip
from .ig_interface import IGInterface, IG_API_URL # NOQA # isort:skip
from .yf_interface import YFinanceInterface, YFInterval # NOQA # isort:skip
from .factories import BrokerFactory, InterfaceNames # NOQA # isort:skip
from .broker import Broker # NOQA # isort:skip
|
unittest_reinvent/running_modes/reinforcement_tests/test_reinforce_tanimoto_similarity.py | lilleswing/Reinvent-1 | 183 | 11119144 | <reponame>lilleswing/Reinvent-1
import os
import shutil
import unittest
from reinvent_models.lib_invent.enums.generative_model_regime import GenerativeModelRegimeEnum
from reinvent_models.model_factory.configurations.model_configuration import ModelConfiguration
from reinvent_models.model_factory.generative_model import GenerativeModel
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter import DiversityFilter
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import \
DiversityFilterParameters
from reinvent_scoring.scoring.enums.diversity_filter_enum import DiversityFilterEnum
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
from reinvent_scoring.scoring.enums.scoring_function_enum import ScoringFunctionNameEnum
from reinvent_scoring.scoring.scoring_function_factory import ScoringFunctionFactory
from reinvent_scoring.scoring.scoring_function_parameters import ScoringFunctionParameters
from running_modes.configurations import ReinforcementLoggerConfiguration
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.configurations.reinforcement_learning.inception_configuration import InceptionConfiguration
from running_modes.configurations.reinforcement_learning.reinforcement_learning_configuration import \
ReinforcementLearningConfiguration
from running_modes.enums.logging_mode_enum import LoggingModeEnum
from running_modes.enums.model_type_enum import ModelTypeEnum
from running_modes.enums.running_mode_enum import RunningModeEnum
from running_modes.reinforcement_learning import CoreReinforcementRunner, Inception
from running_modes.reinforcement_learning.logging import ReinforcementLogger
from running_modes.utils import set_default_device_cuda
from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, PRIOR_PATH
from unittest_reinvent.fixtures.test_data import PROPANE, ASPIRIN
class TestReinforceTanimotoSimilarity(unittest.TestCase):
def setUp(self):
set_default_device_cuda()
lm_enum = LoggingModeEnum()
run_mode_enum = RunningModeEnum()
sf_enum = ScoringFunctionNameEnum()
sf_component_enum = ScoringFunctionComponentNameEnum()
filter_enum = DiversityFilterEnum()
model_regime = GenerativeModelRegimeEnum()
model_type_enum = ModelTypeEnum()
self.workfolder = MAIN_TEST_PATH
smiles = [PROPANE, ASPIRIN]
ts_parameters = vars(ComponentParameters(name="tanimoto similarity", weight=1,
specific_parameters={"smiles": smiles},
component_type=sf_component_enum.TANIMOTO_SIMILARITY))
sf_parameters = ScoringFunctionParameters(name=sf_enum.CUSTOM_SUM, parameters=[ts_parameters])
scoring_function = ScoringFunctionFactory(sf_parameters)
scaffold_parameters = DiversityFilterParameters(filter_enum.IDENTICAL_MURCKO_SCAFFOLD, 0.05, 25, 0.4)
prior_config = ModelConfiguration(model_type_enum.DEFAULT, model_regime.INFERENCE, PRIOR_PATH)
actor_config = ModelConfiguration(model_type_enum.DEFAULT, model_regime.TRAINING, PRIOR_PATH)
prior = GenerativeModel(prior_config)
actor = GenerativeModel(actor_config)
inception_config = InceptionConfiguration(smiles, 100, 10)
inception = Inception(inception_config, scoring_function, prior)
log_config = ReinforcementLoggerConfiguration(recipient=lm_enum.LOCAL,
logging_path=f"{self.workfolder}/log", result_folder=self.workfolder,
logging_frequency=0, job_name="unit_test_job")
configuration = GeneralConfigurationEnvelope(parameters={}, logging={},
run_type=run_mode_enum.REINFORCEMENT_LEARNING, version="2.0")
logger = ReinforcementLogger(configuration, log_config)
diversity_filter = self._setup_scaffold_filter(scaffold_parameters)
config = ReinforcementLearningConfiguration(prior=PRIOR_PATH, agent=PRIOR_PATH, n_steps=3)
self.runner = CoreReinforcementRunner(prior, actor, config, scoring_function, diversity_filter, inception, logger)
@staticmethod
def _setup_scaffold_filter(diversity_filter_parameters):
diversity_filter = DiversityFilter(diversity_filter_parameters)
return diversity_filter
def tearDown(self):
if os.path.isdir(self.workfolder):
shutil.rmtree(self.workfolder)
def test_reinforcement_with_similarity_run_1(self):
self.runner.run()
self.assertEqual(os.path.isdir(f"{self.workfolder}/log"), True)
|
DiffAugment-biggan-imagenet/compare_gan/tpu/tpu_summaries.py | Rian-T/data-efficient-gans | 1,902 | 11119164 | <filename>DiffAugment-biggan-imagenet/compare_gan/tpu/tpu_summaries.py
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a helper class for using summaries on TPU via a host call.
TPUEstimator does not support writing TF summaries out of the box and TPUs can't
perform operations that write files to disk. To monitor tensor values during
training you can copy the tensors back to the CPU of the host machine via
a host call function. This small library provides a convienent API to do this.
Example:
from compare_gan.tpu import tpu_summaries
def model_fn(features, labels, params, mode):
summary = tpu_summries.TpuSummaries(my_model_dir)
summary.scalar("my_scalar_summary", tensor1)
summary.scalar("my_counter", tensor2, reduce_fn=tf.math.reduce_sum)
return TPUEstimatorSpec(
host_call=summary.get_host_call(),
...)
Warning: The host call function will run every step. Writing large tensors to
summaries can slow down your training. High ranking outfeed operations in your
XProf profile can be an indication for this.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import tensorflow as tf
summary = tf.contrib.summary # TensorFlow Summary API v2.
TpuSummaryEntry = collections.namedtuple(
"TpuSummaryEntry", "summary_fn name tensor reduce_fn")
class TpuSummaries(object):
"""Class to simplify TF summaries on TPU.
An instance of the class provides simple methods for writing summaries in the
similar way to tf.summary. The difference is that each summary entry must
provide a reduction function that is used to reduce the summary values from
all the TPU cores.
"""
def __init__(self, log_dir, save_summary_steps=250):
self._log_dir = log_dir
self._entries = []
# While False no summary entries will be added. On TPU we unroll the graph
# and don't want to add multiple summaries per step.
self.record = True
self._save_summary_steps = save_summary_steps
def image(self, name, tensor, reduce_fn):
"""Add a summary for images. Tensor must be of 4-D tensor."""
if not self.record:
return
self._entries.append(
TpuSummaryEntry(summary.image, name, tensor, reduce_fn))
def scalar(self, name, tensor, reduce_fn=tf.math.reduce_mean):
"""Add a summary for a scalar tensor."""
if not self.record:
return
tensor = tf.convert_to_tensor(tensor)
if tensor.shape.ndims == 0:
tensor = tf.expand_dims(tensor, 0)
self._entries.append(
TpuSummaryEntry(summary.scalar, name, tensor, reduce_fn))
def get_host_call(self):
"""Returns the tuple (host_call_fn, host_call_args) for TPUEstimatorSpec."""
# All host_call_args must be tensors with batch dimension.
# All tensors are streamed to the host machine (mind the band width).
global_step = tf.train.get_or_create_global_step()
host_call_args = [tf.expand_dims(global_step, 0)]
host_call_args.extend([e.tensor for e in self._entries])
logging.info("host_call_args: %s", host_call_args)
return (self._host_call_fn, host_call_args)
def _host_call_fn(self, step, *args):
"""Function that will run on the host machine."""
# Host call receives values from all tensor cores (concatenate on the
# batch dimension). Step is the same for all cores.
step = step[0]
logging.info("host_call_fn: args=%s", args)
with summary.create_file_writer(self._log_dir).as_default():
with summary.record_summaries_every_n_global_steps(
self._save_summary_steps, step):
for i, e in enumerate(self._entries):
value = e.reduce_fn(args[i])
e.summary_fn(e.name, value, step=step)
return summary.all_summary_ops()
|
docs/source/conf.py | SMILELab-FL/FedLab | 171 | 11119166 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../.."))
sys.path.insert(0, target_dir)
# print(target_dir)
# -- Project information -----------------------------------------------------
project = 'FedLab'
copyright = '2021, SMILE Lab'
author = 'SMILE Lab'
# The full version, including alpha/beta/rc tags
import fedlab
release = fedlab.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'autoapi.extension', # this one is really important
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinxcontrib.napoleon',
'sphinx.ext.autosectionlabel', # allows referring sections its title, affects `ref`
'sphinx_design',
'sphinxcontrib.bibtex',
]
# for 'sphinxcontrib.bibtex' extension
bibtex_bibfiles = ['refs.bib']
bibtex_default_style = 'unsrt'
autodoc_mock_imports = ["numpy", "torch", "torchvision", "pandas"]
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# configuration for 'autoapi.extension'
autoapi_type = 'python'
autoapi_dirs = ['../../fedlab']
autoapi_template_dir = '_autoapi_templates'
add_module_names = False # makes Sphinx render package.module.Class as Class
# Add more mapping for 'sphinx.ext.intersphinx'
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'PyTorch': ('http://pytorch.org/docs/master/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/dev/', None)}
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Config for 'sphinx.ext.todo'
todo_include_todos = True
# multi-language docs
language = 'en'
locale_dirs = ['../locales/'] # path is example but recommended.
gettext_compact = False # optional.
gettext_uuid = True # optional.
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "furo"
html_favicon = "../imgs/favicon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
html_theme_options = {
# "announcement": """
# <a style=\"text-decoration: none; color: white;\"
# href=\"https://github.com/sponsors/urllib3\">
# <img src=\"/en/latest/_static/favicon.png\"/> Support urllib3 on GitHub Sponsors
# </a>
# """,
"sidebar_hide_name": True,
"light_logo": "FedLab-logo.svg",
"dark_logo": "FedLab-logo.svg",
}
# html_logo = "FedLab-logo.svg"
|
tests/guinea-pigs/unittest/subtest_skip.py | Tirzono/teamcity-messages | 105 | 11119201 | import sys
from teamcity.unittestpy import TeamcityTestRunner
if sys.version_info < (3, 4):
from unittest2 import main, TestCase
else:
from unittest import main, TestCase
class TestXXX(TestCase):
def testSubtestSkip(self):
for i in range(0, 3):
with self.subTest(i=i):
if i == 2:
self.skipTest("skip reason")
main(testRunner=TeamcityTestRunner)
|
Stackless/unittests/test_thread.py | masamitsu-murase/stackless | 854 | 11119205 | from __future__ import absolute_import
# import common
import unittest
import stackless
import sys
import time
import struct
import _teststackless
from _stackless import _test_nostacklesscall as apply_not_stackless
from support import test_main # @UnusedImport
from support import StacklessTestCase, AsTaskletTestCase, testcase_leaks_references
try:
import threading
try:
import thread
except ImportError:
import _thread as thread
withThreads = True
except:
withThreads = False
class threading(object):
Thread = object
class SkipMixin(object):
def skipUnlessSoftswitching(self):
if not stackless.enable_softswitch(None):
self.skipTest("test requires softswitching")
def GetRemoteTasklets(callables):
"""Get a non-scheduled tasklet on a remote thread"""
c = stackless.channel()
def tfunc():
# thread func. Create a tasklet, remove it, and send it to the master.
# then wait for the tasklet to finish.
try:
c2 = stackless.channel()
tasklets = []
for callable in callables:
def helper(callable):
try:
callable()
except:
c2.send_throw(*sys.exc_info())
else:
c2.send(None)
t = stackless.tasklet(helper)(callable)
t.remove()
tasklets.append(t)
c.send(tasklets)
except:
c.send_throw(*sys.exc_info())
stackless.__reduce__()
for callable in callables:
c2.receive()
stackless.run() # drain the scheduler
thread = threading.Thread(target=tfunc)
thread.start()
d = c.receive(), thread
return d
class LingeringThread(threading.Thread):
""" A thread that lingers on after executing its main function"""
def __init__(self, *args, **kwargs):
self.real_target = kwargs["target"]
kwargs["target"] = self.thread_func
super(LingeringThread, self).__init__(*args, **kwargs)
self.shutdown = threading.Event()
def thread_func(self, *args, **kwargs):
result = self.real_target(*args, **kwargs)
self.linger()
return result
def linger(self):
# wait until join is called
self.shutdown.wait()
def join(self):
self.shutdown.set()
super(LingeringThread, self).join()
time.sleep(0.01) # give the thread a chance to clean up
def __enter__(self):
pass
def __exit__(self, ex, val, tb):
self.join()
class SchedulingThread(LingeringThread):
""" A thread that runs a scheduling loop after executing its main function"""
def linger(self):
while not self.shutdown.is_set():
stackless.run()
time.sleep(0.001)
def GetRemoteTasklet(callable, args):
"""Get a non-scheduled tasklet on a remote thread"""
tasklets, thread = GetRemoteTasklets([lambda:callable(*args)])
return tasklets[0], thread
@unittest.skipUnless(withThreads, "requires thread support")
class TestRemoteSchedule(AsTaskletTestCase):
def setUp(self):
super(TestRemoteSchedule, self).setUp()
self.events = []
def testFoo(self):
def foo():
pass
t, thread = GetRemoteTasklet(foo, ())
try:
t.run()
finally:
thread.join(2)
def testRun(self):
def foo():
self.events.append(0)
t, thread = GetRemoteTasklet(foo, ())
try:
t.run()
finally:
thread.join(2)
self.assertEqual(self.events, list(range(len(self.events))))
def testInsert(self):
def foo():
self.events.append(0)
t, thread = GetRemoteTasklet(foo, ())
try:
t.insert()
finally:
thread.join(2)
self.assertEqual(self.events, list(range(len(self.events))))
def testRunOrder(self):
def a():
self.events.append(0)
def b():
self.events.append(1)
def c():
self.events.append(2)
(t1, t2, t3), thread = GetRemoteTasklets((a, b, c))
try:
with stackless.atomic():
t2.insert()
t3.insert()
t1.run() # t1 should run first
finally:
thread.join(2)
self.assertEqual(self.events, list(range(3)))
@unittest.skipUnless(withThreads, "requires thread support")
class TestRebindCrash(SkipMixin, StacklessTestCase):
"""A crash from <NAME>, occurring when transferring tasklet to a thread"""
def create_remote_tasklet(self, nontrivial=False, job=None):
result = []
e1 = threading.Event()
e2 = threading.Event()
def remove():
stackless.schedule_remove(retval=None)
def taskletfunc():
result.append(stackless.getcurrent())
if nontrivial:
_teststackless.test_cstate(remove)
else:
remove()
if job:
job()
def threadfunc():
t = stackless.tasklet(taskletfunc)()
t.run()
e1.set()
while not e2.is_set():
stackless.run()
time.sleep(0.001)
e2.wait() # wait until we can die
t = threading.Thread(target=threadfunc)
t.start()
e1.wait()
# callable to end the thread
def end():
e2.set()
t.join()
return end, result[0]
def to_current_thread(self, task):
"""
Get a tasklet for the current thread.
If the tasklet already belongs to the current thread, this
method returns the tasklet unmodified.
Otherwise, this method tries to
unbind the tasklet and returns a newly created tasklet. If
unbinding fails, the method raises :exc:`RuntimeError`.
"""
self.skipUnlessSoftswitching()
if task.thread_id == thread.get_ident():
return task
reducedTask = task.__reduce__()
# raise RuntimeError, if task is alive but not paused
task.bind(None)
if False: # Stackless will crash if set to False
frameList = reducedTask[2][3]
for i in range(len(frameList)):
frame = frameList[i]
if isinstance(frame, stackless.cframe):
reducedFrame = frame.__reduce__()
newFrame = reducedFrame[0](*reducedFrame[1])
newFrame.__setstate__(reducedFrame[2])
frameList[i] = newFrame
# rebind the task
task = reducedTask[0](*reducedTask[1])
for i in range(len(reducedTask[2][3])):
if not isinstance(reducedTask[2][3][i], stackless.cframe):
reducedTask[2][3][i] = reducedTask[2][3][i].frame
task.__setstate__(reducedTask[2])
return task
def test_crash(self):
self.skipUnlessSoftswitching()
end, task = self.create_remote_tasklet()
try:
task = self.to_current_thread(task)
task.run()
finally:
end()
def test_no_rebind(self):
result = []
e = threading.Event()
def job():
result.append(thread.get_ident())
e.set()
end, task = self.create_remote_tasklet(job=job)
try:
task.run()
e.wait()
self.assertNotEqual(result[0], thread.get_ident())
finally:
end()
def test_rebind(self):
self.skipUnlessSoftswitching()
result = []
def job():
result.append(thread.get_ident())
end, task = self.create_remote_tasklet(job=job)
try:
task.bind_thread()
task.run()
self.assertEqual(result[0], thread.get_ident())
finally:
end()
def test_rebind_nontrivial(self):
end, task = self.create_remote_tasklet(nontrivial=True)
try:
self.assertRaisesRegex(RuntimeError, "C state", task.bind_thread)
finally:
end()
@unittest.skipUnless(withThreads, "requires thread support")
class RemoteTaskletTests(SkipMixin, StacklessTestCase):
ThreadClass = LingeringThread
def setUp(self):
super(RemoteTaskletTests, self).setUp()
self.taskletExecuted = False
self.event = threading.Event()
self.channel = stackless.channel()
def create_tasklet(self, action, *args, **kw):
self.tasklet = stackless.tasklet(action)(*args, **kw)
self.event.set()
def tasklet_action(self):
self.taskletExecuted = True
def create_thread_task(self, action=None):
if not action:
action = self.tasklet_action
theThread = self.ThreadClass(target=self.create_tasklet,
args=(action,))
theThread.start()
self.event.wait()
self.event.clear()
t = self.tasklet
return theThread, t
class TestRemove(RemoteTaskletTests):
def test_remove_balance(self):
""" Test that remove from the runqueue of a remote thread does not affect the
bookkeeping of the current thread.
"""
before = stackless.getruncount()
thread, task = self.create_thread_task()
try:
after = stackless.getruncount()
self.assertEqual(before, after)
task.remove()
after = stackless.getruncount()
# only the runnable count on the remote thread
# should change
self.assertEqual(before, after)
finally:
thread.join()
def test_insert_balance(self):
""" Test that insert into the runqueue of a remote thread does not affect the
bookkeeping of the current thread.
"""
thread, task = self.create_thread_task()
try:
task.remove()
before = stackless.getruncount()
task.insert()
after = stackless.getruncount()
# only the runnable count on the remote thread
# should change
self.assertEqual(before, after)
finally:
thread.join()
class DeadThreadTest(RemoteTaskletTests):
def test_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
self.assertTrue(t.alive)
theThread.join()
time.sleep(0.01) # give the thread a short time to clean up
# now the tasklet should have been killed.
self.assertFalse(t.alive)
def test_removed_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
self.assertTrue(t.scheduled)
t.remove()
self.assertFalse(t.scheduled)
theThread.join()
time.sleep(0.01) # give the thread a short time to clean up
# now the tasklet should have been killed.
self.assertFalse(t.alive)
def test_rebound_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
t.remove()
t.bind_thread()
theThread.join()
# now the tasklet should be alive
self.assertTrue(t.alive)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def test_bind_runnable(self):
theThread, t = self.create_thread_task()
self.assertRaisesRegex(RuntimeError, "runnable", t.bind_thread)
theThread.join()
def test_death(self):
"""test tasklets from dead threads"""
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
def test_rebind_from_dead(self):
"""test that rebinding a fresh tasklet from a dead thread works"""
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
t.bind_thread()
self.assertEqual(t.thread_id, stackless.getcurrent().thread_id)
@testcase_leaks_references("test catches TaskletExit and refuses to die in its own thread")
def test_rebind_from_dead_fail_cstate(self):
# A test for https://github.com/stackless-dev/stackless/issues/92
loop = True
def task():
while loop:
try:
stackless.main.switch()
except TaskletExit:
pass
def other_thread_main():
tlet.bind_thread()
tlet.run()
tlet = stackless.tasklet().bind(apply_not_stackless, (task,))
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
t.join()
time.sleep(0.1) # other_thread needs some time to be destroyed
self.assertEqual(tlet.thread_id, -1)
self.assertFalse(tlet.alive)
self.assertFalse(tlet.restorable)
self.assertGreater(tlet.nesting_level, 0)
loop = False
try:
self.assertRaisesRegex(RuntimeError, "tasklet has C state on its stack", tlet.bind_thread)
except AssertionError:
tlet.kill() # causes an assertion error in debug builds of 2.7.9-slp
raise
# the tasklet has no thread
self.assertEqual(tlet.thread_id, -1)
self.tasklet_is_uncollectable(tlet)
def test_methods_on_dead(self):
"""test that tasklet methods on a dead tasklet behave well"""
class MyException(Exception):
pass
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
self.assertFalse(t.alive)
self.assertFalse(t.paused)
self.assertFalse(t.blocked)
self.assertFalse(t.scheduled)
self.assertTrue(t.restorable)
self.assertFalse(t.atomic)
self.assertFalse(t.block_trap)
self.assertFalse(t.ignore_nesting)
self.assertIsNone(t.next)
self.assertIsNone(t.prev)
# must not raise an exception
t.trace_function
t.profile_function
self.assertEqual(t.thread_id, -1)
t.bind(None)
self.assertEqual(t.thread_id, -1)
t.remove()
self.assertEqual(t.thread_id, -1)
t.bind(lambda: None)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.setup)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.bind, lambda: None, ())
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.insert)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.run)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.switch)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.raise_exception, MyException, 'test')
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.throw, MyException)
self.assertEqual(t.thread_id, -1)
t.__reduce__()
self.assertEqual(t.thread_id, -1)
t.set_atomic(t.set_atomic(True))
self.assertEqual(t.thread_id, -1)
t.set_ignore_nesting(t.set_ignore_nesting(1))
self.assertEqual(t.thread_id, -1)
t.bind(None)
self.assertEqual(t.thread_id, -1)
class BindThreadTest(RemoteTaskletTests):
"""More unittests for tasklet.bind_thread"""
def testForeignThread_scheduled(self):
theThread, t = self.create_thread_task()
try:
self.assertEqual(t.thread_id, theThread.ident)
self.assertTrue(t.alive)
self.assertFalse(t.paused)
t.remove()
self.assertTrue(t.paused)
t.bind_thread()
self.assertTrue(t.alive)
self.assertTrue(t.paused)
self.assertNotEqual(t.thread_id, theThread.ident)
self.assertEqual(t.thread_id, thread.get_ident())
t.insert()
self.assertFalse(t.paused)
stackless.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
finally:
theThread.join()
def test_bind_to_current_tid(self):
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
t.remove()
with theThread:
self.assertEqual(t.thread_id, theThread.ident)
t.bind_thread(current_id)
self.assertEqual(t.thread_id, current_id)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def test_bind_to_bogus_tid(self):
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
t.remove()
with theThread:
self.assertEqual(t.thread_id, theThread.ident)
self.assertRaises(OverflowError, t.bind_thread, -2)
# try the max long value, it is very likely not a valid id
self.assertRaises(ValueError, t.bind_thread,
((1 << (struct.calcsize('@L')*8-1))-1))
t.bind_thread(current_id)
self.assertEqual(t.thread_id, current_id)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
class SchedulingBindThreadTests(RemoteTaskletTests):
ThreadClass = SchedulingThread
def tasklet_action(self):
self.channel.receive()
self.taskletExecuted = True
self.channel.send(None)
def test_bind_to_other_tid(self):
self.skipUnlessSoftswitching()
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
with theThread:
otherThread, t2 = self.create_thread_task()
with otherThread:
self.assertEqual(t.thread_id, theThread.ident)
t.bind_thread(otherThread.ident)
self.assertEqual(t.thread_id, otherThread.ident)
self.channel.send(None)
self.channel.receive()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def tasklet_runnable_action(self):
"""A tasklet that keeps itself runnable"""
while not self.channel.balance:
stackless.schedule()
time.sleep(0.001)
self.channel.receive()
def test_rebind_runnable(self):
theThread, t = self.create_thread_task(self.tasklet_runnable_action)
with theThread:
self.assertRaisesRegex(RuntimeError, 'runnable', t.bind_thread)
self.channel.send(None)
class SwitchTest(RemoteTaskletTests):
ThreadClass = SchedulingThread
def tasklet_action(self):
stackless.schedule_remove() # pause it
self.taskletExecuted = True
def test_switch(self):
"""Test that inter-thread switching fails"""
theThread, t = self.create_thread_task()
with theThread:
time.sleep(0.01)
self.assertTrue(t.paused)
self.assertRaisesRegex(RuntimeError, "different thread", t.switch)
class SetupFromDifferentThreadTest(RemoteTaskletTests):
# Test case for issue #60 https://github.com/stackless-dev/stackless/issue/60
def create_tasklet(self, action, *args, **kw):
self.tasklet = stackless.tasklet(action)
self.event.set()
def test_setup_from_other_thread(self):
theThread, t = self.create_thread_task()
t.setup()
theThread.join()
@unittest.skipUnless(withThreads, "requires thread support")
class TestThreadLocalStorage(StacklessTestCase):
class ObjectWithDestructor(object):
def __init__(self, event):
self.event = event
def __del__(self):
self.event.set()
def test_destructor_at_end_of_thread(self):
# Test case for issue #121 https://github.com/stackless-dev/stackless/issue/121
# Run a destructor during clean up of thread local storage
# Until issue #121 got fixed, this caused a reference leak
tls = threading.local()
deleted = threading.Event()
def other_thread():
tls.owd = self.ObjectWithDestructor(deleted)
self.assertFalse(deleted.is_set())
t = threading.Thread(target=other_thread, name="other thread")
t.start()
t.join()
time.sleep(0.1) # give the thread time to clean up
self.assertTrue(deleted.is_set())
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
python/dgl/_ffi/object_generic.py | ketyi/dgl | 9,516 | 11119212 | <reponame>ketyi/dgl
"""Common implementation of Object generic related logic"""
# pylint: disable=unused-import
from __future__ import absolute_import
from numbers import Number, Integral
from .. import _api_internal
from .base import string_types
# Object base class
_CLASS_OBJECT_BASE = None
def _set_class_object_base(cls):
global _CLASS_OBJECT_BASE
_CLASS_OBJECT_BASE = cls
class ObjectGeneric(object):
"""Base class for all classes that can be converted to object."""
def asobject(self):
"""Convert value to object"""
raise NotImplementedError()
def convert_to_object(value):
"""Convert a python value to corresponding object type.
Parameters
----------
value : str
The value to be inspected.
Returns
-------
object : Object
The corresponding object value.
"""
if isinstance(value, _CLASS_OBJECT_BASE):
return value
if isinstance(value, (list, tuple)):
value = [convert_to_object(x) for x in value]
return _api_internal._List(*value)
if isinstance(value, dict):
vlist = []
for item in value.items():
if (not isinstance(item[0], _CLASS_OBJECT_BASE) and
not isinstance(item[0], string_types)):
raise ValueError("key of map must already been a container type")
vlist.append(item[0])
vlist.append(convert_to_object(item[1]))
return _api_internal._Map(*vlist)
if isinstance(value, ObjectGeneric):
return value.asobject()
return _api_internal._Value(value)
|
Stock/Common/Ui/Basic/Other/DyStockIndustryCompareWindow.py | Leonardo-YXH/DevilYuan | 135 | 11119224 | from PyQt5.QtWidgets import QTabWidget
class DyStockIndustryCompareWindow(QTabWidget):
""" 股票行业比较窗口 """
def __init__(self, eventEngine, tableCls, targetCode, targetName, baseDate):
"""
@tableCls: DyStockTableWidget class, 这么做是防止import递归
@targetCode, @targetName: 跟哪个股票进行行业比较
"""
super().__init__()
self._eventEngine = eventEngine
self._tableCls = tableCls
self._targetCode = targetCode
self._targetName = targetName
self._baseDate = baseDate
def addCategorys(self, dfs):
for category, df in dfs.items():
header = list(df.columns)
data = df.values.tolist()
widget = self._tableCls(self._eventEngine, name=category, baseDate=self._baseDate)
widget.appendStocks(data, header, autoForegroundColName=header[-1])
widget.markByData('名称', self._targetName)
self.addTab(widget, category)
|
observations/r/larynx.py | hajime9652/observations | 199 | 11119282 | <filename>observations/r/larynx.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def larynx(path):
"""data from Section 1.8
The `larynx` data frame has 90 rows and 5 columns.
This data frame contains the following columns:
stage
Stage of disease (1=stage 1, 2=stage2, 3=stage 3, 4=stage 4)
time
Time to death or on-study time, months
age
Age at diagnosis of larynx cancer
diagyr
Year of diagnosis of larynx cancer
delta
Death indicator (0=alive, 1=dead)
<NAME> Moeschberger (1997) *Survival Analysis Techniques for Censored
and truncated data*, Springer. Kardaun Stat. Nederlandica 37 (1983),
103-126.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `larynx.csv`.
Returns:
Tuple of np.ndarray `x_train` with 90 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'larynx.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/KMsurv/larynx.csv'
maybe_download_and_extract(path, url,
save_file_name='larynx.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
torchbenchmark/models/fastNLP/reproduction/text_classification/train_bert.py | Chillee/benchmark | 2,693 | 11119323 | import sys
sys.path.append('../../')
from reproduction.text_classification.data.IMDBLoader import IMDBLoader
from fastNLP.embeddings import BertEmbedding
from reproduction.text_classification.model.lstm import BiLSTMSentiment
from fastNLP import Trainer
from fastNLP import CrossEntropyLoss, AccuracyMetric
from fastNLP import cache_results
from fastNLP import Tester
# 对返回结果进行缓存,下一次运行就会自动跳过预处理
@cache_results('imdb.pkl')
def get_data():
data_bundle = IMDBLoader().process('imdb/')
return data_bundle
data_bundle = get_data()
print(data_bundle)
# 删除超过512, 但由于英语中会把word进行word piece处理,所以截取的时候做一点的裕量
data_bundle.datasets['train'].drop(lambda x:len(x['words'])>400)
data_bundle.datasets['dev'].drop(lambda x:len(x['words'])>400)
data_bundle.datasets['test'].drop(lambda x:len(x['words'])>400)
bert_embed = BertEmbedding(data_bundle.vocabs['words'], requires_grad=False,
model_dir_or_name="en-base-uncased")
model = BiLSTMSentiment(bert_embed, len(data_bundle.vocabs['target']))
Trainer(data_bundle.datasets['train'], model, optimizer=None, loss=CrossEntropyLoss(), device=0,
batch_size=10, dev_data=data_bundle.datasets['dev'], metrics=AccuracyMetric()).train()
# 在测试集上测试一下效果
Tester(data_bundle.datasets['test'], model, batch_size=32, metrics=AccuracyMetric()).test() |
unittests/NAPI/js-native-api/test_symbol/binding.gyp | ScriptBox99/microsoft-hermes-windows | 1,666 | 11119339 | {
"targets": [
{
"target_name": "test_symbol",
"sources": [
"../entry_point.c",
"test_symbol.c"
]
}
]
}
|
Note-5 DQN与HS300指数择时/D3QN_Scale/params.py | summerRainn/DeepLearningNotes | 345 | 11119383 | ACTION_SIZE = 3
MAX_GRAD_NORM = 10
INPUT_SHAPE = [None, 50, 58, 5]
MEMORY_SIZE = 2048
BATCH_SIZE = 128
GAMMA = 0.9
TARGET_STEP_SIZE = 512
TRAIN_STEP_SIZE = 32
# ENTROPY_BETA = 0.1
# POLICY_BETA = 1
# VALUE_BETA = 1
# ACTOR_NORM_BETA = 1e-3
# CRITIC_NORM_BETA = 0.1 |
office__word__doc_docx/table__hyperlink_cell.py | DazEB2/SimplePyScripts | 117 | 11119413 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install python-docx
import docx
from hyperlink import add_hyperlink
headers = ('NAME', 'DESCRIPTION')
rows = [
('php', 'PHP — скриптовый язык программирования общего назначения, активно применяемый для разработки веб-приложений. Используйте эту метку, если у Вас возникли вопросы по применению данного языка или о самом языке.'),
('javascript', 'JavaScript (не путать с Java) — динамический, интерпретируемый язык со слабой типизацией, обычно используемый для написания скриптов на стороне клиента. Эта метка предназначена для вопросов, связанных с ECMAScript, его различными диалектами и реализациями (за исключением ActionScript). Если нет меток, относящихся к фреймворкам, предполагается, что код в ответах также не должен требовать сторонних библиотек.'),
('java', 'Java (не путать с JavaScript) — строго типизированный объектно-ориентированный язык программирования. Приложения Java обычно транслируются в специальный байт-код, поэтому они могут работать на любой компьютерной архитектуре ,с помощью виртуальной Java-машины (JVM). Используйте эту метку для вопросов, относящихся к языку Java или инструментам из платформы Java.'),
('android', 'Android — это операционная система от Google, основанная на ядре Linux, для цифровых устройств: телефонов, планшетов, автомобилей, телевизоров, часов и очков Google Glass. Пожалуйста, используйте специфические для Android метки, например [android-intent], а не просто [intent].'),
('c#', 'C# (произносится «си шарп») — мультипарадигменный язык программирования, флагманский язык фреймворка .NET. В основе его лежит объектно-ориентированный подход, но он поддерживает элементы функционального программирования, взаимодействие с COM, нативным кодом и скриптовыми языками. Язык и платформа .NET обладают огромной стандартной библиотекой, а также многочисленными фреймворками.'),
('html', 'Стандартный язык разметки гипертекста, повсеместно применяемый в интернете. Последняя действующая версия спецификации HTML5.1. Как правило, используется совместно с шаблонизатором, серверными и клиентскими скриптами, каскадными таблицами стилей для динамической генерации удобочитаемых веб страниц из информации, хранящейся в БД сервера. '),
('c++', 'C++ — язык программирования общего назначения, синтаксис которого основан на языке C.'),
('jquery', 'Популярная JavaScript-библиотека. Метка применяетcя к вопросам использования JQuery и ее дополнений. Должна быть дополнена меткой [javascript].'),
('css', 'CSS, или каскадные таблицы стилей, — формальный язык описания внешнего вида документа, язык стилей, определяющий отображение HTML-документов.'),
('mysql', 'Популярная реляционная СУБД, полностью поддерживающая стандартный SQL. Используйте метку только для вопросов, специфических для MySQL, иначе используйте [sql].')
]
document = docx.Document()
table = document.add_table(rows=1, cols=len(headers), style='Table Grid')
heading_cells = table.rows[0].cells
for i, value in enumerate(headers):
# heading_cells[i].text = value
# Bold column
heading_cells[i].paragraphs[0].add_run(value).bold = True
for row in rows:
cells = table.add_row().cells
name, description = row
p = cells[0].paragraphs[0]
add_hyperlink(p, "https://ru.stackoverflow.com/tags/{}/info".format(name), name)
cells[1].text = description
# Save
file_name_doc = 'word.docx'
document.save(file_name_doc)
# Open file
import os
os.startfile(file_name_doc)
|
__other__/points-of-hundreds/main.py | whitmans-max/python-examples | 140 | 11119440 | <reponame>whitmans-max/python-examples
def convert(text):
parts = []
while text:
parts.insert(0, text[-3:])
text = text[:-3]
return '.'.join(parts)
print(convert(str(123)))
print(convert(str(1234)))
print(convert(str(12345)))
print(convert(str(123456)))
print(convert(str(1234567)))
'''
123
1.234
12.345
123.456
1.234.567
'''
|
code/BootEA.py | kongmoumou/BootEA | 131 | 11119500 | import sys
import time
from train_funcs import get_model, generate_related_mat, train_tris_k_epo, train_alignment_1epo
from train_bp import bootstrapping, likelihood
from model import P
import utils as ut
def train(folder):
ori_triples1, ori_triples2, triples1, triples2, model = get_model(folder)
hits1 = None
labeled_align = set()
ents1, ents2 = None, None
# related_mat = generate_related_mat(folder, triples1, triples2, model.ref_ent1, model.ref_ent2)
related_mat = None
if P.epsilon > 0:
trunc_ent_num = int(len(ori_triples1.ent_list) * (1 - P.epsilon))
assert trunc_ent_num > 0
print("trunc ent num:", trunc_ent_num)
else:
trunc_ent_num = 0
assert not trunc_ent_num > 0
if "15" in folder:
for t in range(1, 50 + 1):
print("iteration ", t)
train_tris_k_epo(model, triples1, triples2, 5, trunc_ent_num, None, None)
train_alignment_1epo(model, triples1, triples2, ents1, ents2, 1)
train_tris_k_epo(model, triples1, triples2, 5, trunc_ent_num, None, None)
labeled_align, ents1, ents2 = bootstrapping(model, related_mat, labeled_align)
train_alignment_1epo(model, triples1, triples2, ents1, ents2, 1)
hits1 = model.test(selected_pairs=labeled_align)
likelihood(model, labeled_align)
model.test(selected_pairs=labeled_align)
ut.pair2file(folder + "results_BootEA_trunc" + str(P.epsilon), hits1)
else:
for t in range(1, 50 + 1):
print("iteration ", t)
train_tris_k_epo(model, triples1, triples2, 5, trunc_ent_num, None, None, is_test=False)
train_alignment_1epo(model, triples1, triples2, ents1, ents2, 1)
train_tris_k_epo(model, triples1, triples2, 5, trunc_ent_num, None, None, is_test=False)
labeled_align, ents1, ents2 = bootstrapping(model, related_mat, labeled_align)
train_alignment_1epo(model, triples1, triples2, ents1, ents2, 1)
if t % 5 == 0 or t == 49:
hits1 = model.test(selected_pairs=labeled_align)
ut.pair2file(folder + "results_BootEA_trunc" + str(P.epsilon), hits1)
model.save(folder, "BootEA_trunc" + str(P.epsilon))
if __name__ == '__main__':
t = time.time()
if len(sys.argv) == 2:
folder = sys.argv[1]
else:
folder = '../dataset/DBP15K/zh_en/mtranse/0_3/'
# folder = '../dataset/DWY100K/dbp_wd/mapping/0_3/'
train(folder)
print("total time = {:.3f} s".format(time.time() - t))
|
cdlib/datasets/__init__.py | TnTo/cdlib | 248 | 11119532 | from .remote import *
|
observations/r/cuckoohosts.py | hajime9652/observations | 199 | 11119537 | <filename>observations/r/cuckoohosts.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def cuckoohosts(path):
"""Comparison of cuckoo eggs with host eggs
These data compare mean length, mean breadth, and egg color, between
cuckoos and their hosts.
A data frame with 10 observations on the following 12 variables.
clength
mean length of cuckoo eggs in given host's nest
cl.sd
standard deviation of cuckoo egg lengths
cbreadth
mean breadth of cuckoo eggs in given host's nest
cb.sd
standard deviation of cuckoo egg breadths
cnum
number of cuckoo eggs
hlength
length of host eggs
hl.sd
standard deviation of host egg lengths
hbreadth
breadth of host eggs
hb.sd
standard deviation of host egg breadths
hnum
number of host eggs
match
number of eggs where color matched
nomatch
number where color did not match
<NAME>., 1902. The egg of *cuculus canorus*. an inquiry into the
dimensions of the cuckoo's egg and the relation of the variations to the
size of the eggs of the foster-parent, with notes on coloration, &c.
*Biometrika*, 1:164–176.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `cuckoohosts.csv`.
Returns:
Tuple of np.ndarray `x_train` with 10 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'cuckoohosts.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/cuckoohosts.csv'
maybe_download_and_extract(path, url,
save_file_name='cuckoohosts.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tests/SampleApps/python/stackoverflow-flask/app/main.py | samruddhikhandale/Oryx | 403 | 11119568 | <gh_stars>100-1000
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/data')
def get_data():
return app.send_static_file('data.json')
if __name__ == '__main__':
app.run()
|
AdvancedElectricLongboard/OpenOCD/share/openocd/contrib/rpc_examples/ocd_rpc_example.py | AdvancedElectricLongboard/LongboardSTM32FW | 708 | 11119656 | <filename>AdvancedElectricLongboard/OpenOCD/share/openocd/contrib/rpc_examples/ocd_rpc_example.py
#!/usr/bin/env python3
"""
OpenOCD RPC example, covered by GNU GPLv3 or later
Copyright (C) 2014 <NAME> (<EMAIL>)
Example output:
./ocd_rpc_example.py
echo says hi!
target state: halted
target halted due to debug-request, current mode: Thread
xPSR: 0x01000000 pc: 0x00000188 msp: 0x10000fd8
variable @ 0x10000000: 0x01c9c380
variable @ 0x10000000: 0xdeadc0de
memory (before): ['0xdeadc0de', '0x00000011', '0xaaaaaaaa', '0x00000023',
'0x00000042', '0x0000ffff']
memory (after): ['0x00000001', '0x00000000', '0xaaaaaaaa', '0x00000023',
'0x00000042', '0x0000ffff']
"""
import socket
import itertools
def strToHex(data):
return map(strToHex, data) if isinstance(data, list) else int(data, 16)
def hexify(data):
return "<None>" if data is None else ("0x%08x" % data)
def compareData(a, b):
for i, j, num in zip(a, b, itertools.count(0)):
if i != j:
print("difference at %d: %s != %s" % (num, hexify(i), hexify(j)))
class OpenOcd:
COMMAND_TOKEN = '\x1a'
def __init__(self, verbose=False):
self.verbose = verbose
self.tclRpcIp = "127.0.0.1"
self.tclRpcPort = 6666
self.bufferSize = 4096
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def __enter__(self):
self.sock.connect((self.tclRpcIp, self.tclRpcPort))
return self
def __exit__(self, type, value, traceback):
try:
self.send("exit")
finally:
self.sock.close()
def send(self, cmd):
"""Send a command string to TCL RPC. Return the result that was read."""
data = (cmd + OpenOcd.COMMAND_TOKEN).encode("utf-8")
if self.verbose:
print("<- ", data)
self.sock.send(data)
return self._recv()
def _recv(self):
"""Read from the stream until the token (\x1a) was received."""
data = bytes()
while True:
chunk = self.sock.recv(self.bufferSize)
data += chunk
if bytes(OpenOcd.COMMAND_TOKEN, encoding="utf-8") in chunk:
break
if self.verbose:
print("-> ", data)
data = data.decode("utf-8").strip()
data = data[:-1] # strip trailing \x1a
return data
def readVariable(self, address):
raw = self.send("ocd_mdw 0x%x" % address).split(": ")
return None if (len(raw) < 2) else strToHex(raw[1])
def readMemory(self, wordLen, address, n):
self.send("array unset output") # better to clear the array before
self.send("mem2array output %d 0x%x %d" % (wordLen, address, n))
output = self.send("ocd_echo $output").split(" ")
return [int(output[2*i+1]) for i in range(len(output)//2)]
def writeVariable(self, address, value):
assert value is not None
self.send("mww 0x%x 0x%x" % (address, value))
def writeMemory(self, wordLen, address, n, data):
array = " ".join(["%d 0x%x" % (a, b) for a, b in enumerate(data)])
self.send("array unset 1986ве1т") # better to clear the array before
self.send("array set 1986ве1т { %s }" % array)
self.send("array2mem 1986ве1т 0x%x %s %d" % (wordLen, address, n))
if __name__ == "__main__":
def show(*args):
print(*args, end="\n\n")
with OpenOcd() as ocd:
ocd.send("reset")
show(ocd.send("ocd_echo \"echo says hi!\"")[:-1])
show(ocd.send("capture \"ocd_halt\"")[:-1])
# Read the first few words at the RAM region (put starting adress of RAM
# region into 'addr')
addr = 0x10000000
value = ocd.readVariable(addr)
show("variable @ %s: %s" % (hexify(addr), hexify(value)))
ocd.writeVariable(addr, 0xdeadc0de)
show("variable @ %s: %s" % (hexify(addr), hexify(ocd.readVariable(addr))))
data = [1, 0, 0xaaaaaaaa, 0x23, 0x42, 0xffff]
wordlen = 32
n = len(data)
read = ocd.readMemory(wordlen, addr, n)
show("memory (before):", list(map(hexify, read)))
ocd.writeMemory(wordlen, addr, n, data)
read = ocd.readMemory(wordlen, addr, n)
show("memory (after):", list(map(hexify, read)))
compareData(read, data)
ocd.send("resume")
|
rotkehlchen/tests/api/test_adex.py | rotkehlchenio/rotkehlchen | 137 | 11119681 | <gh_stars>100-1000
import random
import warnings as test_warnings
from contextlib import ExitStack
from http import HTTPStatus
import pytest
import requests
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.chain.ethereum.modules.adex.types import Bond, ChannelWithdraw, Unbond
from rotkehlchen.chain.ethereum.types import string_to_ethereum_address
from rotkehlchen.constants.assets import A_ADX
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_ok_async_response,
assert_proper_response_with_result,
assert_simple_ok_response,
wait_for_async_task,
)
from rotkehlchen.tests.utils.rotkehlchen import setup_balances
ADEX_TEST_ADDR = string_to_ethereum_address('0x8Fe178db26ebA2eEdb22575265bf10A63c395a3d')
@pytest.mark.parametrize('ethereum_accounts', [[ADEX_TEST_ADDR]])
@pytest.mark.parametrize('ethereum_modules', [['uniswap']])
def test_get_balances_module_not_activated(
rotkehlchen_api_server,
ethereum_accounts, # pylint: disable=unused-argument
):
response = requests.get(
api_url_for(rotkehlchen_api_server, 'adexbalancesresource'),
)
assert_error_response(
response=response,
contained_in_msg='adex module is not activated',
status_code=HTTPStatus.CONFLICT,
)
@pytest.mark.parametrize('ethereum_accounts', [[ADEX_TEST_ADDR]])
@pytest.mark.parametrize('ethereum_modules', [['adex']])
@pytest.mark.parametrize('start_with_valid_premium', [True])
def test_get_balances_premium(
rotkehlchen_api_server,
ethereum_accounts, # pylint: disable=unused-argument
):
"""Test get balances for premium users works as expected"""
async_query = random.choice([False, True])
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
# original_queries=['adex_staking'],
extra_flags=['mocked_adex_staking_balance'],
)
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
rotkehlchen_api_server, 'adexbalancesresource'),
json={'async_query': async_query},
)
if async_query:
task_id = assert_ok_async_response(response)
outcome = wait_for_async_task(rotkehlchen_api_server, task_id)
assert outcome['message'] == ''
result = outcome['result']
else:
result = assert_proper_response_with_result(response)
if len(result) != 1:
test_warnings.warn(
UserWarning(f'Test account {ADEX_TEST_ADDR} has no balances'),
)
return
assert FVal(result[ADEX_TEST_ADDR][0]['adx_balance']['amount']) == FVal('113547.9817118382760270384899') # noqa: E501
assert result[ADEX_TEST_ADDR][0]['adx_balance']['usd_value'] is not None
@pytest.mark.skip('Needs to be fixed by Victor after the changes to the subgraph')
@pytest.mark.parametrize('ethereum_accounts', [[ADEX_TEST_ADDR]])
@pytest.mark.parametrize('ethereum_modules', [['adex']])
@pytest.mark.parametrize('start_with_valid_premium', [True])
@pytest.mark.parametrize('default_mock_price_value', [FVal(2)])
def test_get_events(rotkehlchen_api_server, ethereum_accounts): # pylint: disable=unused-argument
async_query = random.choice([False, True])
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
setup = setup_balances(
rotki,
ethereum_accounts=ethereum_accounts,
btc_accounts=None,
original_queries=['zerion', 'logs', 'blocknobytime'],
)
with ExitStack() as stack:
# patch ethereum/etherscan to not autodetect tokens
setup.enter_ethereum_patches(stack)
response = requests.get(api_url_for(
rotkehlchen_api_server, 'adexhistoryresource'),
json={'async_query': async_query, 'to_timestamp': 1611747322},
)
if async_query:
task_id = assert_ok_async_response(response)
outcome = wait_for_async_task(rotkehlchen_api_server, task_id)
assert outcome['message'] == ''
result = outcome['result']
else:
result = assert_proper_response_with_result(response)
identity_address = '0x2a6c38D16BFdc7b4a20f1F982c058F07BDCe9204'
tom_pool_id = '0x2ce0c96383fb229d9776f33846e983a956a7d95844fac57b180ed0071d93bb28'
bond_id = '0x540cab9883923c01e657d5da4ca5674b6e4626b4a148224635495502d674c7c5'
channel_id = '0x30d87bab0ef1e7f8b4c3b894ca2beed41bbd54c481f31e5791c1e855c9dbf4ba'
result = result[ADEX_TEST_ADDR]
expected_events = [Bond(
tx_hash='0x9989f47c6c0a761f98f910ac24e2438d858be96c12124a13be4bb4b3150c55ea',
address=ADEX_TEST_ADDR,
identity_address=identity_address,
timestamp=1604366004,
bond_id=bond_id,
pool_id=tom_pool_id,
value=Balance(FVal(100000), FVal(200000)),
nonce=0,
slashed_at=0,
), ChannelWithdraw(
tx_hash='0xa9ee91af823c0173fc5ada908ff9fe3f4d7c84a2c9da795f0889b3f4ace75b13',
address=ADEX_TEST_ADDR,
identity_address=identity_address,
timestamp=1607453764,
channel_id=channel_id,
pool_id=tom_pool_id,
value=Balance(FVal('5056.894263641728544592'), FVal('10113.788527283457089184')),
token=A_ADX,
log_index=316,
), Unbond(
tx_hash='0xa9ee91af823c0173fc5ada908ff9fe3f4d7c84a2c9da795f0889b3f4ace75b13',
address=ADEX_TEST_ADDR,
identity_address=identity_address,
timestamp=1607453764,
bond_id=bond_id,
pool_id=tom_pool_id,
value=Balance(FVal(100000), FVal(200000)),
)]
assert len(result['events']) == 8
assert result['events'][:len(expected_events)] == [x.serialize() for x in expected_events]
assert 'staking_details' in result
# Make sure events end up in the DB
assert len(rotki.data.db.get_adex_events()) != 0
# test adex data purging from the db works
response = requests.delete(api_url_for(
rotkehlchen_api_server,
'namedethereummoduledataresource',
module_name='adex',
))
assert_simple_ok_response(response)
assert len(rotki.data.db.get_adex_events()) == 0
|
tests/test_cli.py | termim/geocoder | 1,506 | 11119684 | #!/usr/bin/env python
# coding: utf8
import subprocess
location = 'Ottawa, Ontario'
def test_cli_google():
assert not subprocess.call(['geocode', location, '--provider', 'google'])
def test_cli_osm():
assert not subprocess.call(['geocode', location, '--provider', 'osm'])
|
challenge_1/python/igniteflow/src/challenge_1.py | rchicoli/2017-challenges | 271 | 11119688 | #!/usr/bin/env python
"""
#Reverse a String
##Premise
- For this coding challenge, your task is to reverse a string for any given
string input.
Example: Given s = "hello", return "olleh".
- Try to make the solution as short and simple as possible in your respective
language of choice.
"""
def reverse_string(string):
return ''.join([string[idx - 1] for idx in range(len(string), 0, -1)])
if __name__ == '__main__':
str_to_reverse = 'easy as abc'
print 'Before: ', str_to_reverse
print 'After: ', reverse_string(str_to_reverse)
|
txdav/caldav/datastore/test/test_util.py | backwardn/ccs-calendarserver | 462 | 11119706 | <filename>txdav/caldav/datastore/test/test_util.py
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txdav.caldav.datastore.util.
"""
import textwrap
from twisted.trial.unittest import TestCase as BaseTestCase
from txweb2.http_headers import MimeType
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.ical import Component
from twistedcaldav.test.util import TestCase
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
from txdav.caldav.datastore.util import dropboxIDFromCalendarObject, \
StorageTransportBase, migrateHome
from txdav.common.icommondatastore import HomeChildNameAlreadyExistsError
class DropboxIDTests(TestCase):
"""
Test dropbox ID extraction from calendar data.
"""
class FakeCalendarResource(object):
"""
Fake object resource to work with tests.
"""
def __init__(self, data):
self.ical = Component.fromString(data)
def component(self):
return self.ical
def uid(self):
return self.ical.resourceUID()
@inlineCallbacks
def test_noAttachOrXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_okXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
X-APPLE-DROPBOX:http://example.com/calendars/__uids__/1234/dropbox/12345-67890X.dropbox
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890X.dropbox"
)
@inlineCallbacks
def test_emptyXdash(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
X-APPLE-DROPBOX:
END:VEVENT
END:VCALENDAR
""")
self.assertEquals((yield dropboxIDFromCalendarObject(resource)), "12345-67890.dropbox")
@inlineCallbacks
def test_okAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Y.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Y.dropbox"
)
@inlineCallbacks
def test_badAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
ATTACH;VALUE=URI:tag:bogus
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_inlineAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
ATTACH:bmFzZTY0
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_multipleAttach(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>@example.com
ATTACH;VALUE=URI:tag:bogus
ATTACH:bmFzZTY0
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Z.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Z.dropbox"
)
@inlineCallbacks
def test_okAttachRecurring(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
RRULE:FREQ=YEARLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20081114T000000Z
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
ATTACH;VALUE=URI:http://example.com/calendars/__uids__/1234/dropbox/12345-67890Y.dropbox/text.txt
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890Y.dropbox"
)
@inlineCallbacks
def test_okAttachAlarm(self):
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:12345-67890
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
BEGIN:VALARM
ACTION:AUDIO
ATTACH;VALUE=URI:Ping
TRIGGER:-PT15M
X-WR-ALARMUID:5548D654-8FDA-49DB-8983-8FCAD1F322B1
END:VALARM
END:VEVENT
END:VCALENDAR
""")
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"12345-67890.dropbox"
)
@inlineCallbacks
def test_UIDbadPath(self):
test_UIDs = (
("12345/67890", "12345-67890"),
("http://12345,67890", "12345,67890"),
("https://12345,67890", "12345,67890"),
("12345:67890", "1234567890"),
("12345.67890", "1234567890"),
("12345/6:7.890", "12345-67890"),
)
for uid, result in test_UIDs:
resource = DropboxIDTests.FakeCalendarResource("""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
UID:%s
DTSTART:20071114T000000Z
ATTENDEE:mailto:<EMAIL>
ATTENDEE:mailto:<EMAIL>
END:VEVENT
END:VCALENDAR
""" % (uid,))
self.assertEquals(
(yield dropboxIDFromCalendarObject(resource)),
"%s.dropbox" % (result,),
)
class StorageTransportTests(TestCase):
def test_MissingContentType(self):
test_files = (
("plain.txt", MimeType.fromString("text/plain"),),
("word.doc", MimeType.fromString("application/msword"),),
("markup.html", MimeType.fromString("text/html"),),
("octet", MimeType.fromString("application/octet-stream"),),
("bogus.bog", MimeType.fromString("application/octet-stream"),),
)
class FakeAttachment(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
for filename, result in test_files:
item = StorageTransportBase(FakeAttachment(filename), None, None)
self.assertEquals(item._contentType, result)
self.assertEquals(item._dispositionName, None)
item = StorageTransportBase(FakeAttachment(filename), result, filename)
self.assertEquals(item._contentType, result)
self.assertEquals(item._dispositionName, filename)
class HomeMigrationTests(CommonCommonTests, BaseTestCase):
"""
Tests for L{migrateHome}.
"""
@inlineCallbacks
def setUp(self):
yield super(HomeMigrationTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(
u"conflict1",
u"conflict2",
u"empty_home",
u"non_empty_home",
)
)
@inlineCallbacks
def test_migrateEmptyHome(self):
"""
Migrating an empty home into an existing home should destroy all the
existing home's calendars.
"""
yield populateCalendarsFrom({
"empty_home": {
# Some of the upgrade logic will ensure that sufficient default
# calendars exist for basic usage, so this home is actually only
# *mostly* empty; the important thing is that the default
# calendar is removed.
"other-default-calendar": {}
},
"non_empty_home": {
"calendar": {},
"inbox": {},
# XXX: implementation is configuration-sensitive regarding the
# 'tasks' calendar and it shouldn't be.
"tasks": {},
"polls": {},
}
}, self.storeUnderTest())
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
self.assertIdentical((yield emptyHome.calendarWithName("calendar")), None)
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
yield migrateHome(emptyHome, nonEmpty)
yield self.commit()
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
self.assertIdentical((yield nonEmpty.calendarWithName("calendar")), None)
self.assertNotIdentical((yield nonEmpty.calendarWithName("inbox")), None)
self.assertNotIdentical((yield nonEmpty.calendarWithName("other-default-calendar")), None)
@staticmethod
def sampleEvent(uid, summary=None):
"""
Create the iCalendar text for a sample event that has no organizer nor
any attendees.
"""
if summary is None:
summary = "event " + uid
return textwrap.dedent(
"""\
BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:{uid}
DTSTART;VALUE=DATE:20060201
DURATION:P1D
CREATED:20060101T210000Z
DTSTAMP:20051222T210146Z
LAST-MODIFIED:20051222T210203Z
SEQUENCE:1
SUMMARY:{summary}
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(uid=uid, summary=summary)
), {}
@inlineCallbacks
def createConflicted(self, c1=None, c2=None):
"""
Create two calendar homes with calendars with the same names within
them. Parameters are both a mapping of calendar object names to
2-tuples of (iCalendar data, metadata).
@param c1: the calendar data for conflict1/conflicted/*
@param c2: the calendar data for conflict2/conflicted/*
"""
if c1 is None:
c1 = {"1.ics": self.sampleEvent("uid1")}
if c2 is None:
c2 = {"2.ics": self.sampleEvent("uid2")}
defaults = {"calendar": {}, "inbox": {}, "tasks": {}, "polls": {}}
def conflicted(caldata):
d = defaults.copy()
d.update(conflicted=caldata)
return d
yield populateCalendarsFrom({
"conflict1": conflicted(c1),
"conflict2": conflicted(c2),
}, self.storeUnderTest())
@inlineCallbacks
def test_migrateConflict(self):
"""
Migrating a home with conflicting (non-default) calendars will cause an
error.
"""
yield self.createConflicted()
txn = self.transactionUnderTest()
conflict1 = yield txn.calendarHomeWithUID("conflict1")
conflict2 = yield txn.calendarHomeWithUID("conflict2")
try:
yield migrateHome(conflict1, conflict2)
except HomeChildNameAlreadyExistsError:
pass
else:
self.fail("No exception raised.")
@inlineCallbacks
def test_migrateMergeCalendars(self):
"""
Migrating a home with a conflicting (non-default) calendar in merge
mode will cause the properties on the conflicting calendar to be
overridden by the new calendar of the same name, and calendar objects
to be copied over.
"""
yield self.createConflicted()
from txdav.base.propertystore.base import PropertyName
from txdav.xml import element as davxml
class StubConflictingElement(davxml.WebDAVTextElement):
namespace = "http://example.com/ns/stub-conflict"
name = "conflict"
beforeProp = StubConflictingElement.fromString("before")
afterProp = StubConflictingElement.fromString("after")
conflictPropName = PropertyName.fromElement(beforeProp)
txn = self.transactionUnderTest()
conflict1 = yield txn.calendarHomeWithUID("conflict1")
conflict2 = yield txn.calendarHomeWithUID("conflict2")
cal1 = yield conflict1.calendarWithName("conflicted")
cal2 = yield conflict2.calendarWithName("conflicted")
p1 = cal1.properties()
p2 = cal2.properties()
p1[conflictPropName] = afterProp
p2[conflictPropName] = beforeProp
yield migrateHome(conflict1, conflict2, merge=True)
self.assertEquals(p2[conflictPropName].children[0].data, "after")
obj1 = yield cal2.calendarObjectWithName("1.ics")
obj2 = yield cal2.calendarObjectWithName("2.ics")
# just a really cursory check to make sure they're really there.
self.assertEquals(obj1.uid(), "uid1")
self.assertEquals(obj2.uid(), "uid2")
@inlineCallbacks
def test_migrateMergeConflictingObjects(self):
"""
When merging two homes together, calendar objects may conflict in the
following ways:
First, an object may have the same name and the same UID as an object
in the target calendar. We assume the target object is always be newer
than the source object, so this type of conflict will leave the source
object unmodified. This type of conflict is expected, and may happen
as a result of an implicitly scheduled event where the principal owning
the merged calendars is an attendee of the conflicting object, and
received a re-invitation.
Second, an object may have a different name, but the same UID as an
object in the target calendar. While this type of conflict is not
expected -- most clients will choose names for objects that correspond
to the iCalendar UIDs of their main component -- it is treated the same
way as the first conflict.
Third, an object may have the same UID as an object on a different
calendar in the target home. This may also happen if a scheduled event
was previously on a different (most likely non-default) calendar.
Technically this is actually valid, and it is possible to have the same
object in multiple calendars as long as the object is not scheduled;
however, that type of conflict is extremely unlikely as the client
would have to generate the same event twice.
Basically, in all expected cases, conflicts will only occur because an
update to a scheduled event was sent out and the target home accepted
it. Therefore, conflicts are always resolved in favor of ignoring the
source data and trusting that the target data is more reliable.
"""
# Note: these tests are all performed with un-scheduled data because it
# is simpler. Although the expected conflicts will involve scheduled
# data the behavior will be exactly the same.
yield self.createConflicted(
{
"same-name": self.sampleEvent("same-name", "source"),
"other-name": self.sampleEvent("other-uid", "source other"),
"other-calendar": self.sampleEvent("oc", "source calendar"),
"no-conflict": self.sampleEvent("no-conflict", "okay"),
},
{
"same-name": self.sampleEvent("same-name", "target"),
"different-name": self.sampleEvent("other-uid", "tgt other"),
},
)
txn = self.transactionUnderTest()
c2 = yield txn.calendarHomeWithUID("conflict2")
otherCal = yield c2.createCalendarWithName("othercal")
yield otherCal.createCalendarObjectWithName(
"some-name", Component.fromString(
self.sampleEvent("oc", "target calendar")[0]
)
)
yield self.commit()
txn = self.transactionUnderTest()
c1 = yield txn.calendarHomeWithUID("conflict1")
c2 = yield txn.calendarHomeWithUID("conflict2")
yield migrateHome(c1, c2, merge=True)
yield self.commit()
txn = self.transactionUnderTest()
c2 = yield txn.calendarHomeWithUID("conflict2")
targetCal = yield c2.calendarWithName("conflicted")
yield self.checkSummary("same-name", "target", targetCal)
yield self.checkSummary("different-name", "tgt other", targetCal)
yield self.checkSummary("other-calendar", None, targetCal)
yield self.checkSummary("other-name", None, targetCal)
yield self.checkSummary("no-conflict", "okay", targetCal)
yield self.checkSummary("oc", "target calendar", otherCal)
@inlineCallbacks
def checkSummary(self, name, summary, cal):
"""
Verify that the summary of the calendar object for the given name in
the given calendar matches.
"""
obj = yield cal.calendarObjectWithName(name)
if summary is None:
self.assertIdentical(obj, None,
name + " existed but shouldn't have")
else:
txt = ((yield obj.component()).mainComponent()
.getProperty("SUMMARY").value())
self.assertEquals(txt, summary)
@inlineCallbacks
def test_migrateMergeDontDeleteDefault(self):
"""
If we're doing a merge migration, it's quite possible that the user has
scheduled events onto their default calendar already. In fact the
whole point of a merge migration is to preserve data that might have
been created there. So, let's make sure that we I{don't} delete any
data from the default calendars in the case that we're merging.
"""
yield populateCalendarsFrom({
"empty_home": {
# see test_migrateEmptyHome above.
"other-default-calendar": {}
},
"non_empty_home": {
"calendar": {
"some-name": self.sampleEvent("some-uid", "some summary"),
}, "inbox": {}, "tasks": {}
}
}, self.storeUnderTest())
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
self.assertIdentical((yield emptyHome.calendarWithName("calendar")),
None)
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
yield migrateHome(emptyHome, nonEmpty, merge=True)
yield self.commit()
txn = self.transactionUnderTest()
emptyHome = yield txn.calendarHomeWithUID("empty_home")
nonEmpty = yield txn.calendarHomeWithUID("non_empty_home")
self.assertNotIdentical(
(yield nonEmpty.calendarWithName("inbox")), None
)
defaultCal = (yield nonEmpty.calendarWithName("calendar"))
self.assertNotIdentical(
(yield defaultCal.calendarObjectWithName("some-name")), None
)
|
tacker/sol_refactored/common/coordinate.py | h1r0mu/tacker | 116 | 11119752 | <filename>tacker/sol_refactored/common/coordinate.py
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from oslo_log import log as logging
from tacker.common import coordination
from tacker.sol_refactored.common import exceptions as sol_ex
LOG = logging.getLogger(__name__)
# NOTE: It is used to prevent operation for the same vnf instance
# from being processed at the same time. It can be applied between
# threads of a process and different processes (e.g. tacker-server
# and tacker-conductor) on a same host.
# Note that race condition of very short time is not considered.
def lock_vnf_instance(inst_arg, delay=False):
# NOTE: tacker-server issues RPC call to tacker-conductor
# (just) before the lock released. 'delay' is for tacker-conductor
# to be able to wait if it receives RPC call before tacker-server
# releases the lock.
def operation_lock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
coord = coordination.COORDINATOR
# ensure coordination start
# NOTE: it is noop if already started.
coord.start()
sig = inspect.signature(func)
call_args = sig.bind(*args, **kwargs).arguments
inst_id = inst_arg.format(**call_args)
lock = coord.get_lock(inst_id)
blocking = False if not delay else 10
# NOTE: 'with lock' is not used since it can't handle
# lock failed exception well.
if not lock.acquire(blocking=blocking):
LOG.debug("Locking vnfInstance %s failed.", inst_id)
raise sol_ex.OtherOperationInProgress(inst_id=inst_id)
try:
LOG.debug("vnfInstance %s locked.", inst_id)
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return operation_lock
|
test/files/column_operators_binops.py | stefano-maggiolo/sqlalchemy2-stubs | 106 | 11119812 | <filename>test/files/column_operators_binops.py<gh_stars>100-1000
from sqlalchemy import ARRAY
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import registry
from sqlalchemy.sql import ColumnElement
mapper_registry: registry = registry()
@mapper_registry.mapped
class A:
__tablename__ = "a"
id = Column(Integer, primary_key=True)
string = Column(String, primary_key=True)
arr = Column(ARRAY(Integer), primary_key=True)
lt1: "ColumnElement[Boolean]" = A.id > A.id
lt2: "ColumnElement[Boolean]" = A.id > 1
lt3: "ColumnElement[Boolean]" = 1 < A.id
le1: "ColumnElement[Boolean]" = A.id >= A.id
le2: "ColumnElement[Boolean]" = A.id >= 1
le3: "ColumnElement[Boolean]" = 1 <= A.id
eq1: "ColumnElement[Boolean]" = A.id == A.id
eq2: "ColumnElement[Boolean]" = A.id == 1
eq3: "ColumnElement[Boolean]" = 1 == A.id
ne1: "ColumnElement[Boolean]" = A.id != A.id
ne2: "ColumnElement[Boolean]" = A.id != 1
ne3: "ColumnElement[Boolean]" = 1 != A.id
gt1: "ColumnElement[Boolean]" = A.id < A.id
gt2: "ColumnElement[Boolean]" = A.id < 1
gt3: "ColumnElement[Boolean]" = 1 > A.id
ge1: "ColumnElement[Boolean]" = A.id <= A.id
ge2: "ColumnElement[Boolean]" = A.id <= 1
ge3: "ColumnElement[Boolean]" = 1 >= A.id
# contains
# TODO "in" doesn't seem to pick up the typing of __contains__?
# but also seems to be related to Array, as it works with Integer.
# error: Access to generic instance variables via class is ambiguous
# error: Incompatible types in assignment (expression has type "bool", variable has type "ColumnElement[Boolean]") # noqa
# contains1: "ColumnElement[Boolean]" = A.id in A.arr
lshift1: "ColumnElement[Boolean]" = A.id << A.id
lshift2: "ColumnElement[Boolean]" = A.id << 1
rshift1: "ColumnElement[Boolean]" = A.id >> A.id
rshift2: "ColumnElement[Boolean]" = A.id >> 1
concat1: "ColumnElement[String]" = A.string.concat(A.string)
concat2: "ColumnElement[String]" = A.string.concat(1)
concat3: "ColumnElement[String]" = A.string.concat("a")
like1: "ColumnElement[Boolean]" = A.string.like("test")
like2: "ColumnElement[Boolean]" = A.string.like("test", escape="/")
ilike1: "ColumnElement[Boolean]" = A.string.ilike("test")
ilike2: "ColumnElement[Boolean]" = A.string.ilike("test", escape="/")
in_: "ColumnElement[Boolean]" = A.id.in_([1, 2])
not_in: "ColumnElement[Boolean]" = A.id.not_in([1, 2])
not_like1: "ColumnElement[Boolean]" = A.string.not_like("test")
not_like2: "ColumnElement[Boolean]" = A.string.not_like("test", escape="/")
not_ilike1: "ColumnElement[Boolean]" = A.string.not_ilike("test")
not_ilike2: "ColumnElement[Boolean]" = A.string.not_ilike("test", escape="/")
is_: "ColumnElement[Boolean]" = A.string.is_("test")
is_not: "ColumnElement[Boolean]" = A.string.is_not("test")
startswith: "ColumnElement[Boolean]" = A.string.startswith("test")
endswith: "ColumnElement[Boolean]" = A.string.endswith("test")
contains: "ColumnElement[Boolean]" = A.string.contains("test")
match: "ColumnElement[Boolean]" = A.string.match("test")
regexp_match: "ColumnElement[Boolean]" = A.string.regexp_match("test")
regexp_replace: "ColumnElement[String]" = A.string.regexp_replace(
"pattern", "replacement"
)
between: "ColumnElement[Boolean]" = A.string.between("a", "b")
# TODO Not sure why we can safely assign this to a String?
# add1: "ColumnElement[String]" = A.id + A.id
add1: "ColumnElement[Integer]" = A.id + A.id
add2: "ColumnElement[Integer]" = A.id + 1
add3: "ColumnElement[Integer]" = 1 + A.id
sub1: "ColumnElement[Integer]" = A.id - A.id
sub2: "ColumnElement[Integer]" = A.id - 1
sub3: "ColumnElement[Integer]" = 1 - A.id
mul1: "ColumnElement[Integer]" = A.id * A.id
mul2: "ColumnElement[Integer]" = A.id * 1
mul3: "ColumnElement[Integer]" = 1 * A.id
div1: "ColumnElement[Integer]" = A.id / A.id
div2: "ColumnElement[Integer]" = A.id / 1
div3: "ColumnElement[Integer]" = 1 / A.id
mod1: "ColumnElement[Integer]" = A.id % A.id
mod2: "ColumnElement[Integer]" = A.id % 1
mod3: "ColumnElement[Integer]" = 1 % A.id
|
utils/__init__.py | ChrisSun06/Context-Aware-Consistency | 771 | 11119814 | <filename>utils/__init__.py
from .logger import Logger |
descarteslabs/vectors/feature.py | carderne/descarteslabs-python | 167 | 11119829 | <reponame>carderne/descarteslabs-python<gh_stars>100-1000
import copy
from shapely.geometry import shape
from descarteslabs.common.dotdict import DotDict
class Feature(object):
"""
An object matching the format of a GeoJSON Feature with geometry and properties.
Attributes
----------
geometry : shapely.geometry.Polygon or dict
If the Shapely package is installed, it will be a shapely shape,
otherwise a dict of a simple GeoJSON geometry type.
The geometry must be one of these GeoJSON types:
* Point
* MultiPoint
* Polygon
* MultiPolygon
* LineString
* MultiLineString
* GeometryCollection
properties : DotDict
Fields describing the geometry.
Values can be strings up to 256 characters, numeric types, or ``None``.
"""
geojson_type = "Feature"
def __init__(self, geometry, properties, id=None):
"""
Example
-------
>>> polygon = {
... 'type': 'Polygon',
... 'coordinates': [[[-95, 42], [-93, 42], [-93, 40], [-95, 41], [-95, 42]]]}
>>> properties = {"temperature": 70.13, "size": "large"}
>>> Feature(geometry=polygon, properties=properties) # doctest: +SKIP
Feature({
'geometry': {
'coordinates': (((-95.0, 42.0), (-93.0, 42.0), (-93.0, 40.0), (-95.0, 41.0), (-95.0, 42.0)),),
'type': 'Polygon'
},
'id': None,
'properties': {
'size': 'large',
'temperature': 70.13
}
})
"""
if geometry is None:
raise ValueError("geometry should not be None")
self.geometry = shape(geometry)
self.properties = DotDict(properties)
self.id = id
@classmethod
def _create_from_jsonapi(cls, response):
geometry = response.attributes.get("geometry")
properties = response.attributes.get("properties")
self = cls(geometry=geometry, properties=properties, id=response.id)
return self
@property
def geojson(self):
"""
Returns the ``Feature`` as a GeoJSON dict.
Returns
-------
dict
GeoJSON Feature as a dict with the following keys:
.. highlight:: none
::
geometry: GeoJSON geometry object. A dict with the following
keys:
coordinates: Coordinates of the GeoJSON object. A list,
list(list) or list(list(list)) depending on
the type of the geometry object.
type: GeoJSON object type.
properties: A dict with feature properties.
type: "Feature"
Example
-------
>>> polygon = {
... 'type': 'Polygon',
... 'coordinates': [[[-95, 42], [-93, 42], [-93, 40], [-95, 41], [-95, 42]]]}
>>> properties = {"temperature": 70.13, "size": "large", "tags": None}
>>> feature = Feature(geometry=polygon, properties=properties)
>>> feature.geojson # doctest: +SKIP
{'geometry': {'coordinates': (((-95.0, 42.0),
(-93.0, 42.0),
(-93.0, 40.0),
(-95.0, 41.0),
(-95.0, 42.0)),),
'type': 'Polygon'},
'id': None,
'properties': {
'size': 'large',
'temperature': 70.13
},
'type': 'Feature'
}
"""
properties = copy.deepcopy(self.properties)
geometry = self.geometry.__geo_interface__
return dict(
properties=properties, geometry=geometry, id=self.id, type=self.geojson_type
)
@property
def __geo_interface__(self):
return self.geojson["geometry"]
def _repr_json_(self):
return DotDict(self.geojson)
def __repr__(self):
return "Feature({})".format(repr(DotDict(self.geojson)))
|
galileo/framework/tf/python/layers/feature_combiner.py | YaoPu2021/galileo | 115 | 11119842 | # Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense
from galileo.framework.tf.python.layers.feature_encoder import (
DenseFeatureEncoder,
SparseFeatureEncoder,
)
from galileo.platform.export import export
@export('galileo.tf')
class FeatureCombiner(Layer):
r'''
FeatureCombiner combine dense and sparse feature
args:
dense_feature_dims: int or list[int], dense feature dims
sparse_feature_maxs: int or list[int], the max value in sparse
feature set, used to set embedding size
sparse_feature_embedding_dims: int or list[int], for sub layer embedding
hidden_dim: must be specified when add feature
feature_combiner: str in ['concat', 'sum'], combine strategy,
how to combine feature when use multi feature
concat: concat all dense and sparse features
optional encode to hidden_dim, when hidden_dim is given
add: encode dense and sparse features to hidden_dim and add
'''
def __init__(self,
dense_feature_dims=None,
sparse_feature_maxs=None,
sparse_feature_embedding_dims=None,
hidden_dim: int = None,
feature_combiner: str = 'concat',
**kwargs):
super().__init__(**kwargs)
if dense_feature_dims is None and sparse_feature_maxs is None:
raise ValueError('one of dense or sparse feature '
'must be specified')
if feature_combiner not in ['add', 'concat']:
raise ValueError('feature_combiner is either "add" or "concat".')
if feature_combiner == 'add' and hidden_dim is None:
raise ValueError('hidden_dim must be specified when add feature.')
dense_feature_dims = dense_feature_dims or []
sparse_feature_maxs = sparse_feature_maxs or []
sparse_feature_embedding_dims = sparse_feature_embedding_dims or []
self.feature_combiner = feature_combiner
self.hidden_dim = hidden_dim
if isinstance(dense_feature_dims, int):
dense_feature_dims = [dense_feature_dims]
if feature_combiner == 'add':
# add combiner use a same hidden_dim
sparse_feature_embedding_dims = hidden_dim
if isinstance(sparse_feature_maxs, int):
sparse_feature_maxs = [sparse_feature_maxs]
if isinstance(sparse_feature_embedding_dims, int):
sparse_feature_embedding_dims = [sparse_feature_embedding_dims
] * len(sparse_feature_maxs)
assert len(sparse_feature_maxs) == len(sparse_feature_embedding_dims)
self.dense_feature_encoder = None
if dense_feature_dims and feature_combiner == 'add':
self.dense_feature_encoder = DenseFeatureEncoder(
dense_feature_dims, hidden_dim)
self.sparse_feature_encoder = None
if sparse_feature_maxs and sparse_feature_embedding_dims:
self.sparse_feature_encoder = SparseFeatureEncoder(
sparse_feature_maxs, sparse_feature_embedding_dims)
self.fc = None
if feature_combiner == 'concat' and hidden_dim:
self.fc = Dense(hidden_dim, use_bias=False)
self.dense_feature_dims = dense_feature_dims
self.sparse_feature_maxs = sparse_feature_maxs
self.sparse_feature_embedding_dims = sparse_feature_embedding_dims
def call(self, inputs):
r'''
\param inputs list/dict
\return tensor
'''
if isinstance(inputs, (list, tuple)):
dense_feature, sparse_feature = inputs[:2]
elif isinstance(inputs, dict):
dense_feature = inputs.get('dense')
sparse_feature = inputs.get('sparse')
else:
dense_feature, sparse_feature = inputs, None
features = []
if dense_feature is not None:
if isinstance(dense_feature, (list, tuple)):
dense_feature = tf.concat(dense_feature, axis=-1)
if self.dense_feature_encoder:
dense_feature = self.dense_feature_encoder(dense_feature)
features.append(dense_feature)
if sparse_feature is not None and self.sparse_feature_encoder:
sparse_embeddings = self.sparse_feature_encoder(sparse_feature)
features.extend(sparse_embeddings)
if self.feature_combiner == 'add':
feature = tf.add_n(features)
else:
feature = tf.concat(features, axis=-1)
if self.fc is not None:
feature = self.fc(feature)
return feature
def get_config(self):
config = super().get_config()
config.update(
dict(
dense_feature_dims=self.dense_feature_dims,
sparse_feature_maxs=self.sparse_feature_maxs,
sparse_feature_embedding_dims=self.
sparse_feature_embedding_dims,
hidden_dim=self.hidden_dim,
feature_combiner=self.feature_combiner,
))
return config
|
access_undenied_aws/__init__.py | JonHolman/access-undenied-aws | 109 | 11119844 | <gh_stars>100-1000
import logging
logger = logging.getLogger("access-undenied-aws")
|
mechanics/swig/tests/test_siconos_mechanisms.py | ljktest/siconos | 137 | 11119881 | #!/usr/bin/env python
def test_slider_crank():
"""Run siconos_mechanisms for bodydef and local options of slider crank
"""
import siconos.tests.siconos_mechanisms
|
src/DynamixelSDK/python/tests/protocol2_0/indirect_address.py | ERP1234/5DOF_Robot_arm | 361 | 11119942 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
#*******************************************************************************
#*********************** Indirect Address Example ***********************
# Required Environment to run this example :
# - Protocol 2.0 supported DYNAMIXEL(X, P, PRO/PRO(A), MX 2.0 series)
# - DYNAMIXEL Starter Set (U2D2, U2D2 PHB, 12V SMPS)
# How to use the example :
# - Select the DYNAMIXEL in use at the MY_DXL in the example code.
# - Build and Run from proper architecture subdirectory.
# - For ARM based SBCs such as Raspberry Pi, use linux_sbc subdirectory to build and run.
# - https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/overview/
# Author: <NAME> (Leon)
# Maintainer : Zerom, <NAME>
# *******************************************************************************
import os
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from dynamixel_sdk import * # Uses Dynamixel SDK library
#********* DYNAMIXEL Model definition *********
#***** (Use only one definition at a time) *****
MY_DXL = 'X_SERIES' # X330 (5.0 V recommended), X430, X540, 2X430
# MY_DXL = 'MX_SERIES' # MX series with 2.0 firmware update.
# MY_DXL = 'PRO_SERIES' # H54, H42, M54, M42, L54, L42
# MY_DXL = 'PRO_A_SERIES' # PRO series with (A) firmware update.
# MY_DXL = 'P_SERIES' # PH54, PH42, PM54
# MY_DXL = 'XL320' # [WARNING] Operating Voltage : 7.4V
# Control table address
if MY_DXL == 'X_SERIES' or MY_DXL == 'MX_SERIES':
ADDR_TORQUE_ENABLE = 64
ADDR_LED_RED = 65
LEN_LED_RED = 1 # Data Byte Length
ADDR_GOAL_POSITION = 116
LEN_GOAL_POSITION = 4 # Data Byte Length
ADDR_MOVING = 122
LEN_MOVING = 1 # Data Byte Length
ADDR_PRESENT_POSITION = 132
LEN_PRESENT_POSITION = 4 # Data Byte Length
ADDR_INDIRECTADDRESS_FOR_WRITE = 168
LEN_INDIRECTDATA_FOR_WRITE = 5 # Sum of Data of Length. i.e) LED (1 byte) + Goal Position data (4 bytes)
ADDR_INDIRECTADDRESS_FOR_READ = 178
LEN_INDIRECTDATA_FOR_READ = 5 # Sum of Data of Length. i.e) Moving (1 byte) + Present Position data (4 bytes)
ADDR_INDIRECTDATA_FOR_WRITE = 224
ADDR_INDIRECTDATA_FOR_READ = 229
DXL_MINIMUM_POSITION_VALUE = 0 # Refer to the Minimum Position Limit of product eManual
DXL_MAXIMUM_POSITION_VALUE = 4095 # Refer to the Maximum Position Limit of product eManual
BAUDRATE = 57600
elif MY_DXL == 'PRO_SERIES':
ADDR_INDIRECTADDRESS_FOR_WRITE = 49
ADDR_INDIRECTADDRESS_FOR_READ = 59
ADDR_TORQUE_ENABLE = 562 # Control table address is different in DYNAMIXEL model
ADDR_LED_RED = 563 # R.G.B Address: 563 (red), 564 (green), 565 (blue)
LEN_LED_RED = 1 # Data Byte Length
ADDR_GOAL_POSITION = 596
LEN_GOAL_POSITION = 4 # Data Byte Length
ADDR_MOVING = 610
LEN_MOVING = 1 # Data Byte Length
ADDR_PRESENT_POSITION = 611
LEN_PRESENT_POSITION = 4 # Data Byte Length
ADDR_INDIRECTDATA_FOR_WRITE = 634
LEN_INDIRECTDATA_FOR_WRITE = 5 # Sum of Data of Length. i.e) LED (1 byte) + Goal Position data (4 bytes)
ADDR_INDIRECTDATA_FOR_READ = 639
LEN_INDIRECTDATA_FOR_READ = 5 # Sum of Data of Length. i.e) Moving (1 byte) + Present Position data (4 bytes)
DXL_MINIMUM_POSITION_VALUE = -150000 # Refer to the Minimum Position Limit of product eManual
DXL_MAXIMUM_POSITION_VALUE = 150000 # Refer to the Maximum Position Limit of product eManual
BAUDRATE = 57600
elif MY_DXL == 'P_SERIES' or MY_DXL == 'PRO_A_SERIES':
ADDR_INDIRECTADDRESS_FOR_WRITE = 168
ADDR_INDIRECTADDRESS_FOR_READ = 178
ADDR_TORQUE_ENABLE = 512 # Control table address is different in DYNAMIXEL model
ADDR_LED_RED = 513 # R.G.B Address: 513 (red), 544 (green), 515 (blue)
LEN_LED_RED = 1 # Data Byte Length
ADDR_GOAL_POSITION = 564
LEN_GOAL_POSITION = 4 # Data Byte Length
ADDR_MOVING = 570
LEN_MOVING = 1 # Data Byte Length
ADDR_PRESENT_POSITION = 580
LEN_PRESENT_POSITION = 4 # Data Byte Length
ADDR_INDIRECTDATA_FOR_WRITE = 634
LEN_INDIRECTDATA_FOR_WRITE = 5 # Sum of Data of Length. i.e) LED (1 byte) + Goal Position data (4 bytes)
ADDR_INDIRECTDATA_FOR_READ = 639
LEN_INDIRECTDATA_FOR_READ = 5 # Sum of Data of Length. i.e) Moving (1 byte) + Present Position data (4 bytes)
DXL_MINIMUM_POSITION_VALUE = -150000 # Refer to the Minimum Position Limit of product eManual
DXL_MAXIMUM_POSITION_VALUE = 150000 # Refer to the Maximum Position Limit of product eManual
BAUDRATE = 57600
# DYNAMIXEL Protocol Version (1.0 / 2.0)
# https://emanual.robotis.com/docs/en/dxl/protocol2/
PROTOCOL_VERSION = 2.0
# Factory default ID of all DYNAMIXEL is 1
DXL_ID = 1
# Use the actual port assigned to the U2D2.
# ex) Windows: "COM*", Linux: "/dev/ttyUSB*", Mac: "/dev/tty.usbserial-*"
DEVICENAME = '/dev/ttyUSB0'
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_LED_VALUE = 0 # Dynamixel LED will light between this value
DXL_MAXIMUM_LED_VALUE = 1 # and this value
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
dxl_led_value = [DXL_MINIMUM_LED_VALUE, DXL_MAXIMUM_LED_VALUE] # Dynamixel LED value
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Initialize GroupSyncWrite instance
groupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_INDIRECTDATA_FOR_WRITE, LEN_INDIRECTDATA_FOR_WRITE)
# Initialize GroupSyncRead instace for Present Position
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_INDIRECTDATA_FOR_READ, LEN_INDIRECTDATA_FOR_READ)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Disable Dynamixel Torque :
# Indirect address would not accessible when the torque is already enabled
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("[ID:%03d] Dynamixel has been successfully connected" % DXL_ID)
# INDIRECTDATA parameter storages replace LED, goal position, present position and moving status storages
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_WRITE + 0, ADDR_GOAL_POSITION + 0)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_WRITE + 2, ADDR_GOAL_POSITION + 1)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_WRITE + 4, ADDR_GOAL_POSITION + 2)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_WRITE + 6, ADDR_GOAL_POSITION + 3)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_WRITE + 8, ADDR_LED_RED)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_READ + 0, ADDR_PRESENT_POSITION + 0)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_READ + 2, ADDR_PRESENT_POSITION + 1)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_READ + 4, ADDR_PRESENT_POSITION + 2)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_READ + 6, ADDR_PRESENT_POSITION + 3)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, DXL_ID, ADDR_INDIRECTADDRESS_FOR_READ + 8, ADDR_MOVING)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Enable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Add parameter storage for multiple values
dxl_addparam_result = groupSyncRead.addParam(DXL_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL_ID)
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(0x1b):
break
# Allocate goal position value into byte array
param_indirect_data_for_write = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_LOBYTE(DXL_HIWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_HIWORD(dxl_goal_position[index]))]
param_indirect_data_for_write.append(dxl_led_value[index])
# Add values to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL_ID, param_indirect_data_for_write)
if dxl_addparam_result != True:
print("[ID:%03d]groupSyncWrite addparam failed" % DXL_ID)
quit()
# Syncwrite all
dxl_comm_result = groupSyncWrite.txPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Clear syncwrite parameter storage
groupSyncWrite.clearParam()
while 1:
# Syncread present position from indirectdata2
dxl_comm_result = groupSyncRead.txRxPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Check if groupsyncread data of Dynamixel present position value is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL_ID, ADDR_INDIRECTDATA_FOR_READ, LEN_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL_ID)
quit()
# Check if groupsyncread data of Dynamixel moving status is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL_ID, ADDR_INDIRECTDATA_FOR_READ + LEN_PRESENT_POSITION, LEN_MOVING)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL_ID)
quit()
# Get Dynamixel present position value
dxl_present_position = groupSyncRead.getData(DXL_ID, ADDR_INDIRECTDATA_FOR_READ, LEN_PRESENT_POSITION)
# Get Dynamixel moving status value
dxl_moving = groupSyncRead.getData(DXL_ID, ADDR_INDIRECTDATA_FOR_READ + LEN_PRESENT_POSITION, LEN_MOVING)
print("[ID:%03d] GoalPos:%d PresPos:%d IsMoving:%d" % (DXL_ID, dxl_goal_position[index], dxl_present_position, dxl_moving))
if not (abs(dxl_goal_position[index] - dxl_present_position) > DXL_MOVING_STATUS_THRESHOLD):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
# Clear syncread parameter storage
groupSyncRead.clearParam()
# Disable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
portHandler.closePort()
|
Company Specific Interview Questions/Google/Solutions/Python/google_find_odds.py | strangestroad/interview-techdev-guide | 320 | 11119961 | <filename>Company Specific Interview Questions/Google/Solutions/Python/google_find_odds.py
t=int(input())
while t!=0:
n=int(input())
l=list(map(int,input().split()))
l.sort()
s=list(set(l))
for i in range(0,len(s)):
if l.count(s[i])%2!=0:
print(s[i],end=" ")
print()
t=t-1
|
test/units/test_oci_app_catalog_listing_resource_version_facts.py | slmjy/oci-ansible-modules | 106 | 11119986 | <reponame>slmjy/oci-ansible-modules<filename>test/units/test_oci_app_catalog_listing_resource_version_facts.py
# Copyright (c) 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
import pytest
from nose.plugins.skip import SkipTest
from ansible.module_utils import six
from ansible.module_utils.oracle import oci_utils
from ansible.modules.cloud.oracle import oci_app_catalog_listing_resource_version_facts
try:
import oci
from oci.util import to_dict
from oci.core.models import AppCatalogListingResourceVersion
from oci.exceptions import ServiceError
except ImportError:
raise SkipTest(
"test_oci_app_catalog_listing_resource_version_facts.py requires `oci` module"
)
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception(kwargs["msg"])
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
@pytest.fixture()
def compute_client(mocker):
mock_compute_client = mocker.patch("oci.core.compute_client.ComputeClient")
return mock_compute_client.return_value
@pytest.fixture()
def list_all_resources_patch(mocker):
return mocker.patch.object(oci_utils, "list_all_resources")
@pytest.fixture()
def call_with_backoff_patch(mocker):
return mocker.patch.object(oci_utils, "call_with_backoff")
def get_app_catalog_listing_resource_version(**kwargs):
app_catalog_subscription = AppCatalogListingResourceVersion(
listing_id="ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx",
listing_resource_version="1.0",
)
for attr, val in six.iteritems(kwargs):
setattr(app_catalog_subscription, attr, val)
return app_catalog_subscription
def get_app_catalog_listing_resource_versions():
return [
get_app_catalog_listing_resource_version(
listing_id="ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx",
listing_resource_version="1.0",
),
get_app_catalog_listing_resource_version(
listing_id="ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx",
listing_resource_version="2.0",
),
]
def get_module(**kwargs):
params = {
"listing_id": "ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx",
"resource_version": "1.0",
}
params.update(kwargs)
module = FakeModule(**params)
return module
def get_response(status=200, headers=None, data=None, request=None):
if not headers:
headers = dict()
return oci.Response(status, headers, data, request)
def test_get_app_catalog_listing_resource_version_raises_service_error(
compute_client, call_with_backoff_patch
):
call_with_backoff_patch.side_effect = ServiceError(
500, "InternalServerError", dict(), "Internal Server Error"
)
with pytest.raises(ServiceError) as exc_info:
oci_app_catalog_listing_resource_version_facts.get_app_catalog_listing_resource_version(
compute_client, get_module()
)
se = exc_info.value
assert se.status == 500
assert se.code == "InternalServerError"
assert se.message == "Internal Server Error"
def test_get_app_catalog_listing_resource_version(
compute_client, call_with_backoff_patch
):
app_catalog_listing_resource_version = get_app_catalog_listing_resource_version()
call_with_backoff_patch.return_value = get_response(
data=app_catalog_listing_resource_version
)
module = get_module(resource_version="1.0")
result = oci_app_catalog_listing_resource_version_facts.get_app_catalog_listing_resource_version(
compute_client, module
)
assert len(result) == 1
call_with_backoff_patch.assert_called_once()
call_with_backoff_patch.assert_called_with(
compute_client.get_app_catalog_listing_resource_version,
listing_id=module.params["listing_id"],
resource_version=module.params["resource_version"],
)
assert result[0]["listing_id"] == app_catalog_listing_resource_version.listing_id
assert (
result[0]["listing_resource_version"]
== app_catalog_listing_resource_version.listing_resource_version
)
def test_list_app_catalog_listing_resource_versions_raises_service_error(
compute_client, list_all_resources_patch
):
list_all_resources_patch.side_effect = ServiceError(
500, "InternalServerError", dict(), "Internal Server Error"
)
with pytest.raises(ServiceError) as exc_info:
oci_app_catalog_listing_resource_version_facts.list_app_catalog_listing_resource_versions(
compute_client, get_module()
)
se = exc_info.value
assert se.status == 500
assert se.code == "InternalServerError"
assert se.message == "Internal Server Error"
def test_list_app_catalog_listing_resource_versions_when_no_listing_resource_versions_exist(
compute_client, list_all_resources_patch
):
module = get_module()
list_all_resources_patch.return_value = []
result = oci_app_catalog_listing_resource_version_facts.list_app_catalog_listing_resource_versions(
compute_client, module
)
list_all_resources_patch.assert_called_once()
list_all_resources_patch.assert_called_with(
compute_client.list_app_catalog_listing_resource_versions,
listing_id=module.params["listing_id"],
)
assert len(result) == 0
def test_list_app_catalog_listing_resource_versions_when_listing_resource_versions_exist(
compute_client, list_all_resources_patch, call_with_backoff_patch
):
module = get_module()
app_catalog_listing_resource_versions = get_app_catalog_listing_resource_versions()
list_all_resources_patch.return_value = app_catalog_listing_resource_versions
call_with_backoff_patch.side_effect = [
get_response(data=app_catalog_listing_resource_version)
for app_catalog_listing_resource_version in app_catalog_listing_resource_versions
]
result = oci_app_catalog_listing_resource_version_facts.list_app_catalog_listing_resource_versions(
compute_client, module
)
list_all_resources_patch.assert_called_once()
list_all_resources_patch.assert_called_with(
compute_client.list_app_catalog_listing_resource_versions,
listing_id=module.params["listing_id"],
)
assert len(result) == 2
assert list_all_resources_patch.call_count == 1
assert call_with_backoff_patch.call_count == 2
|
python/torch_mlir/dialects/torch/importer/jit_ir/torchscript_annotations.py | sogartar/torch-mlir | 213 | 11119999 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
from typing import List, Optional, Tuple
import torch
import torch_mlir
from torch_mlir.dialects.torch.importer.jit_ir import ClassAnnotator
# Decorators
# Currently, these decorators are very low-level and map 1:1 with
# methods on `ClassAnnotator`. Eventually, we expect there to
# be a more elaborate Python layer which allows all the different annotations
# to be expressed conveniently and gives clearer error reports when
# the annotations aren't acceptable.
# This module is kept separate from torch_mlir_e2e_test.torchscript.annotations so that
# we can use that module from code without C++ dependencies, which prevent us
# from interfacing the test framework across environments.
# Utilities for extracting decorated information into ClassAnnotator.
def _recursively_extract_annotations(
module: torch.nn.Module, scripted: torch.jit.ScriptModule,
class_annotator: ClassAnnotator):
assert module.__class__.__name__ == scripted.original_name or (
isinstance(module, torch.jit.RecursiveScriptModule) and module is
scripted), "script module does not come from specified module"
# Extract information on methods.
for method_name, scripted_method in scripted.__dict__.items():
if not isinstance(scripted_method, torch.ScriptMethod):
continue
method = getattr(module, method_name)
if hasattr(method, '_torch_mlir_export'):
class_annotator.exportPath(scripted._c._type(), [method_name])
if hasattr(method, '_torch_mlir_arg_annotations'):
class_annotator.annotateArgs(
scripted._c._type(), [method_name],
method._torch_mlir_arg_annotations)
# Recurse.
for name, child in module.named_children():
scripted_child = getattr(scripted, name)
_recursively_extract_annotations(child, scripted_child,
class_annotator)
def extract_annotations(program: torch.nn.Module,
scripted: torch.jit.ScriptModule,
class_annotator: ClassAnnotator):
"""Populate the ClassAnnotator with annotations extracted from `program`."""
class_annotator.exportNone(scripted._c._type())
_recursively_extract_annotations(program, scripted, class_annotator)
|
tests/test_lookup.py | Asana/carbonate | 167 | 11120048 | <gh_stars>100-1000
import unittest
from mock import Mock
from carbonate.cluster import Cluster
from carbonate.lookup import lookup
class LookupTest(unittest.TestCase):
def setUp(self):
self.config = Mock()
def test_lookup(self):
self.config.replication_factor = Mock(return_value=2)
self.config.destinations = Mock(
return_value=['192.168.9.13:2124:0', '192.168.9.15:2124:0',
'192.168.6.20:2124:0', '192.168.6.19:2124:0',
'192.168.6.16:2124:0']
)
self.cluster = Cluster(self.config)
assert lookup('metric.one', self.cluster) == \
['192.168.6.16:2124:0', '192.168.6.19:2124:0']
|
CAM_pytorch/data/MyDataSet.py | agnes-yang/PytorchNetHub | 274 | 11120060 | <reponame>agnes-yang/PytorchNetHub
#!/usr/bin/python
# -*- coding:utf-8 -*-
# power by Mr.Li
import os
from torch.utils import data
from torchvision import transforms as T
import cv2
import random
from utils.config import opt
class MyDataSet(data.Dataset):
'''
主要目标: 获取所有图片的地址,并根据训练,验证,测试划分数据
'''
def __init__(self, root, transforms=None, train=True, test=False):
self.test = test #状态
self.train = train
self.root = root #数据集路径
# 读取文件夹下所有图像
if root!='':
pos_root=os.path.join(root, 'pos')
neg_root = os.path.join(root, 'neg')
pos_imgs = [os.path.join(pos_root, img) for img in os.listdir(pos_root)]
neg_imgs = [os.path.join(neg_root, img) for img in os.listdir(neg_root)]
imgs = pos_imgs + neg_imgs
# 打乱数据集
random.shuffle(imgs)
else:
print('数据集为空???')
imgs = []
imgs_num = len (imgs)
# 划分数据集
if train:
self.imgs = imgs[:int(0.8 * imgs_num)]
else:
self.imgs = imgs[int(0.8 * imgs_num):]
# 对图像进行转化(若未指定转化,则执行默认操作)
if transforms is None:
normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if self.test or not train: # 测试集和验证集
self.transforms = T.Compose([
T.ToTensor(),
normalize
])
else: # 训练集
self.transforms = T.Compose([
T.ToTensor(),
normalize
])
def __getitem__(self, index):
'''
一次返回一张图片的数据
'''
# 图片的完整路径
img_path = self.imgs[index]
# 读取图像
img = cv2.imread(img_path)
img = self.BGR2RGB(img) # 因为pytorch自身提供的预训练好的模型期望的输入是RGB
img = cv2.resize(img, (64, 128))
# 对图片进行转化
img = self.transforms(img)
# 标签真值
if 'neg' in img_path:
label=0 # 没有人
else:
label=1 # 有人
return img,label
def __len__(self):
return len(self.imgs)
def BGR2RGB(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def get_test_img(self):
# 读取图像
img_origin = cv2.imread(opt.test_img)
img = self.BGR2RGB(img_origin) # 因为pytorch自身提供的预训练好的模型期望的输入是RGB
img = cv2.resize(img, (64, 128))
# 对图片进行转化
img = self.transforms(img)
return img_origin,img
|
Validation/CaloTowers/python/calotowersValidationSequence_cff.py | ckamtsikis/cmssw | 852 | 11120069 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from Validation.CaloTowers.CaloTowersParam_cfi import *
import Validation.CaloTowers.CaloTowersParam_cfi
AllCaloTowersValidation = Validation.CaloTowers.CaloTowersParam_cfi.calotowersAnalyzer.clone()
calotowersValidationSequence = cms.Sequence(AllCaloTowersValidation)
|
feedhq/feeds/management/commands/favicons.py | feedhq/feedhq | 361 | 11120072 | from . import SentryCommand
from ...models import enqueue_favicon, UniqueFeed
class Command(SentryCommand):
"""Fetches favicon updates and saves them if there are any"""
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', dest='all',
default=False,
help='Force update of all existing favicons')
def handle_sentry(self, *args, **kwargs):
urls = UniqueFeed.objects.filter(muted=False).values_list(
'url', flat=True).distinct()
for url in urls:
enqueue_favicon(url, force_update=kwargs['all'])
|
strategies/ichimokuStrat1.py | webclinic017/backtrader-pyqt-ui | 105 | 11120074 | <reponame>webclinic017/backtrader-pyqt-ui
###############################################################################
#
# Copyright (C) 2021 - Skinok
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import sys
import backtrader as bt
import metaStrategy as mt
# Create a subclass of Strategy to define the indicators and logic
class ichimokuStrat1(mt.MetaStrategy):
params = (
('atrperiod', 14), # ATR Period (standard)
('atrdist_x', 1.5), # ATR distance for stop price
('atrdist_y', 1.35), # ATR distance for take profit price
('tenkan', 9),
('kijun', 26),
('senkou', 52),
('senkou_lead', 26), # forward push
('chikou', 26), # backwards push
)
def notify_order(self, order):
if order.status == order.Completed:
#print("Order completed")
pass
if not order.alive():
self.order = None # indicate no order is pending
def __init__(self, *argv):
# used to modify parameters
super().__init__(argv[0])
# Ichi indicator
self.ichi = bt.indicators.Ichimoku(self.datas[0],
tenkan=self.params.tenkan,
kijun=self.params.kijun,
senkou=self.params.senkou,
senkou_lead=self.params.senkou_lead,
chikou=self.params.chikou)
# Cross of tenkan and kijun -
#1.0 if the 1st data crosses the 2nd data upwards - long
#-1.0 if the 1st data crosses the 2nd data downwards - short
self.tkcross = bt.indicators.CrossOver(self.ichi.tenkan_sen, self.ichi.kijun_sen)
# To set the stop price
self.atr = bt.indicators.ATR(self.data, period=self.p.atrperiod)
# Long Short ichimoku logic
self.long = bt.And( (self.data.close[0] > self.ichi.senkou_span_a(0)),
(self.data.close[0] > self.ichi.senkou_span_b(0)),
(self.tkcross == 1))
self.short = bt.And((self.data.close[0] < self.ichi.senkou_span_a(0)),
(self.data.close[0] < self.ichi.senkou_span_b(0)),
(self.tkcross == -1))
def start(self):
print(" Starting IchimokuStart1 strategy")
self.order = None # sentinel to avoid operrations on pending order
def next(self):
if self.order:
return # pending order execution
if not self.position: # not in the market
if self.short:
self.order = self.sell()
ldist = self.atr[0] * self.p.atrdist_x
self.lstop = self.data.close[0] + ldist
pdist = self.atr[0] * self.p.atrdist_y
self.take_profit = self.data.close[0] - pdist
if self.long:
self.order = self.buy()
ldist = self.atr[0] * self.p.atrdist_x
self.lstop = self.data.close[0] - ldist
pdist = self.atr[0] * self.p.atrdist_y
self.take_profit = self.data.close[0] + pdist
else: # in the market
pclose = self.data.close[0]
pstop = self.lstop # seems to be the bug
if ((pstop<pclose<self.take_profit)|(pstop>pclose>self.take_profit)):
self.close() # Close position
|
f5/bigip/tm/cm/device.py | nghia-tran/f5-common-python | 272 | 11120077 | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® cluster device submodule
REST URI
``http://localhost/mgmt/tm/cm/device/``
GUI Path
``Device Management --> Devices``
REST Kind
``tm:cm:device:*``
"""
from f5.bigip.mixins import CommandExecutionMixin
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
class Devices(Collection, CommandExecutionMixin):
"""BIG-IP® cluster devices collection.
"""
def __init__(self, cm):
super(Devices, self).__init__(cm)
self._meta_data['allowed_lazy_attributes'] = [Device]
self._meta_data['attribute_registry'] =\
{'tm:cm:device:devicestate': Device}
self._meta_data['allowed_commands'].append('mv')
class Device(Resource):
"""BIG-IP® cluster device object.
"""
def __init__(self, device_s):
super(Device, self).__init__(device_s)
self._meta_data['required_json_kind'] = 'tm:cm:device:devicestate'
self._meta_data['required_creation_parameters'].update(('partition',))
|
unsupervisedRR/nnutils/pcreg_trainer.py | Sebastian-Jung/unsupervisedRR | 105 | 11120079 | <reponame>Sebastian-Jung/unsupervisedRR<filename>unsupervisedRR/nnutils/pcreg_trainer.py
import torch
from ..models.model_util import nn_gather
from ..utils.losses import get_rgb_loss
from ..utils.metrics import evaluate_3d_correspondances, evaluate_pose_Rt
from .trainer import BasicTrainer
def batchify(x):
return x.flatten(start_dim=1).mean(dim=1)
class PCReg_Trainer(BasicTrainer):
def __init__(self, cfg):
super(PCReg_Trainer, self).__init__(cfg)
# Input setup
self.use_gt_vp = cfg.MODEL.use_gt_vp
assert cfg.DATASET.num_views == cfg.MODEL.num_views
self.num_views = cfg.MODEL.num_views
# set loss weights
train_cfg = cfg.TRAIN
self.render_loss_weight = train_cfg.rgb_render_loss_weight
self.decode_loss_weight = train_cfg.rgb_decode_loss_weight
self.corres_weight = train_cfg.correspondance_loss_weight
self.depth_weight = train_cfg.depth_loss_weight
def calculate_norm_dict(self):
max_norm = 1e10
norm_dict = {}
modules = ["encode", "decode"]
def grad_fn(name, module):
try:
p = module.parameters()
_grad = torch.nn.utils.clip_grad_norm_(p, max_norm)
norm_dict[name] = _grad.item()
except RuntimeError:
pass
_model = self.model
grad_fn("full_model", self.model)
for m in modules:
if hasattr(_model, m):
grad_fn(m, getattr(_model, m))
return norm_dict
def forward_batch(self, batch):
B, _, H, W = batch["rgb_0"].shape
gt_rgb = [batch[f"rgb_{i}"].to(self.device) for i in range(self.num_views)]
gt_dep = [batch[f"depth_{i}"].to(self.device) for i in range(self.num_views)]
gt_vps = [batch[f"Rt_{i}"].to(self.device) for i in range(self.num_views)]
K = batch["K"].to(self.device)
output = self.model(gt_rgb, K, gt_dep, vps=gt_vps if self.use_gt_vp else None)
loss, metrics = [], {}
# calculate losses
vis_loss = []
geo_loss = []
for i in range(self.num_views):
cover_i = output[f"cover_{i}"]
depth_i = output[f"ras_depth_{i}"]
rgb_gt_i = gt_rgb[i]
rgb_pr0_i = output[f"rgb_decode_{i}"]
rgb_pr1_i = output[f"rgb_render_{i}"]
# Appearance losses
w0, w1 = self.decode_loss_weight, self.render_loss_weight
vr0_loss_i, vr0_vis_i = get_rgb_loss(rgb_pr0_i, rgb_gt_i, cover_i)
vr1_loss_i, vr1_vis_i = get_rgb_loss(rgb_pr1_i, rgb_gt_i, cover_i)
vr_vis_i = w0 * vr0_vis_i + w1 * vr1_vis_i
vr_loss_i = w0 * vr0_loss_i + w1 * vr1_loss_i
# depth loss - simple L1 loss
depth_dif = (depth_i - gt_dep[i]).abs()
depth_dif = depth_dif * (gt_dep[i] > 0).float()
dc_loss_i = (cover_i * depth_dif).mean(dim=(1, 2, 3))
# aggregate losses
vis_loss.append(vr_loss_i)
geo_loss.append(dc_loss_i)
# Update some outputs
output[f"rgb-l1_{i}"] = vr_vis_i.detach().cpu()
# Add losses to metrics
metrics[f"loss-rgb-decode_{i}"] = vr0_loss_i.detach().cpu()
metrics[f"loss-rgb-render_{i}"] = vr1_loss_i.detach().cpu()
metrics[f"loss-depth_{i}"] = dc_loss_i.detach().cpu()
# Evaluate pose
if f"vp_{i}" in output:
p_metrics = evaluate_pose_Rt(output[f"vp_{i}"], gt_vps[i], scaled=False)
for _k in p_metrics:
metrics[f"{_k}_{i}"] = p_metrics[_k].detach().cpu()
# Evaluate correspondaces
if f"corres_0{i}" in output:
c_id0, c_id1, c_weight, _ = output[f"corres_0{i}"]
input_pcs = self.model.generate_pointclouds(K, gt_dep)
c_xyz_0 = nn_gather(input_pcs[0], c_id0)
c_xyz_i = nn_gather(input_pcs[1], c_id1)
vp_i = output[f"vp_{i}"]
cor_eval, cor_pix = evaluate_3d_correspondances(
c_xyz_0, c_xyz_i, K, vp_i, (H, W)
)
output[f"corres_0{i}_pixels"] = (cor_pix[0], cor_pix[1], c_weight)
for key in cor_eval:
metrics[f"{key}_{i}"] = cor_eval[key]
# ==== Loss Aggregation ====
vs_loss = sum(vis_loss) # wighting already accounted for above
dc_loss = sum(geo_loss) * self.depth_weight
cr_loss = output["corr_loss"] * self.corres_weight
loss = vs_loss + dc_loss + cr_loss
# sum losses
metrics["losses_appearance"] = vs_loss
metrics["losses_geometric"] = dc_loss
metrics["losses_correspondance"] = cr_loss
metrics["losses_weight-sum"] = loss.detach().cpu()
loss = loss.mean()
return loss, metrics, output
|
alembic/versions/00026_34f427187628_add_rss_bits.py | awesome-archive/ReadableWebProxy | 193 | 11120082 | """Add rss bits!
Revision ID: 34f427187628
Revises: <PASSWORD>
Create Date: 2017-02-27 02:45:31.776790
"""
# revision identifiers, used by Alembic.
revision = '34f427187628'
down_revision = 'b88c4e0<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('rss_parser_feed_name_lut_version',
sa.Column('id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('feed_netloc', sa.Text(), autoincrement=False, nullable=True),
sa.Column('feed_name', sa.Text(), autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('end_transaction_id', sa.BigInteger(), nullable=True),
sa.Column('operation_type', sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_end_transaction_id'), 'rss_parser_feed_name_lut_version', ['end_transaction_id'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_feed_name'), 'rss_parser_feed_name_lut_version', ['feed_name'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_feed_netloc'), 'rss_parser_feed_name_lut_version', ['feed_netloc'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_id'), 'rss_parser_feed_name_lut_version', ['id'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_operation_type'), 'rss_parser_feed_name_lut_version', ['operation_type'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_version_transaction_id'), 'rss_parser_feed_name_lut_version', ['transaction_id'], unique=False)
op.create_table('rss_parser_funcs',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('version', sa.Integer(), nullable=True),
sa.Column('feed_name', sa.Text(), nullable=False),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.Column('func', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_rss_parser_funcs_feed_name'), 'rss_parser_funcs', ['feed_name'], unique=True)
op.create_index(op.f('ix_rss_parser_funcs_id'), 'rss_parser_funcs', ['id'], unique=False)
op.create_table('rss_parser_funcs_version',
sa.Column('id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('version', sa.Integer(), autoincrement=False, nullable=True),
sa.Column('feed_name', sa.Text(), autoincrement=False, nullable=True),
sa.Column('enabled', sa.Boolean(), autoincrement=False, nullable=True),
sa.Column('func', sa.Text(), autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('end_transaction_id', sa.BigInteger(), nullable=True),
sa.Column('operation_type', sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(op.f('ix_rss_parser_funcs_version_end_transaction_id'), 'rss_parser_funcs_version', ['end_transaction_id'], unique=False)
op.create_index(op.f('ix_rss_parser_funcs_version_feed_name'), 'rss_parser_funcs_version', ['feed_name'], unique=False)
op.create_index(op.f('ix_rss_parser_funcs_version_id'), 'rss_parser_funcs_version', ['id'], unique=False)
op.create_index(op.f('ix_rss_parser_funcs_version_operation_type'), 'rss_parser_funcs_version', ['operation_type'], unique=False)
op.create_index(op.f('ix_rss_parser_funcs_version_transaction_id'), 'rss_parser_funcs_version', ['transaction_id'], unique=False)
op.create_table('rss_parser_feed_name_lut',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('feed_netloc', sa.Text(), nullable=False),
sa.Column('feed_name', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['feed_name'], ['rss_parser_funcs.feed_name'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('feed_netloc', 'feed_name')
)
op.create_index(op.f('ix_rss_parser_feed_name_lut_feed_name'), 'rss_parser_feed_name_lut', ['feed_name'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_feed_netloc'), 'rss_parser_feed_name_lut', ['feed_netloc'], unique=False)
op.create_index(op.f('ix_rss_parser_feed_name_lut_id'), 'rss_parser_feed_name_lut', ['id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_rss_parser_feed_name_lut_id'), table_name='rss_parser_feed_name_lut')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_feed_netloc'), table_name='rss_parser_feed_name_lut')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_feed_name'), table_name='rss_parser_feed_name_lut')
op.drop_table('rss_parser_feed_name_lut')
op.drop_index(op.f('ix_rss_parser_funcs_version_transaction_id'), table_name='rss_parser_funcs_version')
op.drop_index(op.f('ix_rss_parser_funcs_version_operation_type'), table_name='rss_parser_funcs_version')
op.drop_index(op.f('ix_rss_parser_funcs_version_id'), table_name='rss_parser_funcs_version')
op.drop_index(op.f('ix_rss_parser_funcs_version_feed_name'), table_name='rss_parser_funcs_version')
op.drop_index(op.f('ix_rss_parser_funcs_version_end_transaction_id'), table_name='rss_parser_funcs_version')
op.drop_table('rss_parser_funcs_version')
op.drop_index(op.f('ix_rss_parser_funcs_id'), table_name='rss_parser_funcs')
op.drop_index(op.f('ix_rss_parser_funcs_feed_name'), table_name='rss_parser_funcs')
op.drop_table('rss_parser_funcs')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_transaction_id'), table_name='rss_parser_feed_name_lut_version')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_operation_type'), table_name='rss_parser_feed_name_lut_version')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_id'), table_name='rss_parser_feed_name_lut_version')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_feed_netloc'), table_name='rss_parser_feed_name_lut_version')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_feed_name'), table_name='rss_parser_feed_name_lut_version')
op.drop_index(op.f('ix_rss_parser_feed_name_lut_version_end_transaction_id'), table_name='rss_parser_feed_name_lut_version')
op.drop_table('rss_parser_feed_name_lut_version')
### end Alembic commands ###
|
external/model-preparation-algorithm/mpa_tasks/apis/detection/config.py | opencv/openvino_training_extensions | 775 | 11120110 | <gh_stars>100-1000
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from attr import attrs
from ote_sdk.configuration.elements import (add_parameter_group,
# ParameterGroup,
# configurable_boolean,
# configurable_float,
# configurable_integer,
selectable,
string_attribute)
from mpa_tasks.apis import BaseConfig, LearningRateSchedule
@attrs
class DetectionConfig(BaseConfig):
header = string_attribute("Configuration for an object detection task of MPA")
description = header
@attrs
class __LearningParameters(BaseConfig.BaseLearningParameters):
header = string_attribute('Learning Parameters')
description = header
learning_rate_schedule = selectable(
default_value=LearningRateSchedule.COSINE,
header='Learning rate schedule',
description='Specify learning rate scheduling for the MMDetection task. '
'When training for a small number of epochs (N < 10), the fixed '
'schedule is recommended. For training for 10 < N < 25 epochs, '
'step-wise or exponential annealing might give better results. '
'Finally, for training on large datasets for at least 20 '
'epochs, cyclic annealing could result in the best model.',
editable=True, visible_in_ui=True)
@attrs
class __Postprocessing(BaseConfig.BasePostprocessing):
header = string_attribute("Postprocessing")
description = header
@attrs
class __NNCFOptimization(BaseConfig.BaseNNCFOptimization):
header = string_attribute("Optimization by NNCF")
description = header
@attrs
class __POTParameter(BaseConfig.BasePOTParameter):
header = string_attribute("POT Parameters")
description = header
@attrs
class __AlgoBackend(BaseConfig.BaseAlgoBackendParameters):
header = string_attribute('Parameters for the MPA algo-backend')
description = header
learning_parameters = add_parameter_group(__LearningParameters)
postprocessing = add_parameter_group(__Postprocessing)
nncf_optimization = add_parameter_group(__NNCFOptimization)
pot_parameters = add_parameter_group(__POTParameter)
algo_backend = add_parameter_group(__AlgoBackend)
|
archivebox/__init__.py | sarvex/ArchiveBox | 6,340 | 11120124 | __package__ = 'archivebox'
|
pyleri/prio.py | robbm1/pyleri | 106 | 11120136 | '''Prio class.
:copyright: 2021, <NAME> <<EMAIL>>
'''
from .elements import NamedElement
from .rule import Rule
from .exceptions import MaxRecursionError
class _Prio(NamedElement):
MAX_RECURSION = 50
__slots__ = ('_elements', '_name')
def __init__(self, *elements):
self._elements = self._validate_elements(elements)
def _get_node_result(self, root, tree, rule, s, node):
if rule._depth == _Prio.MAX_RECURSION:
raise MaxRecursionError(
'Max recursion depth of {} is reached'
.format(_Prio.MAX_RECURSION))
rule._depth += 1
if node.start not in rule._tested:
rule._tested[node.start] = False, node.start
for elem in self._elements:
children = []
is_valid, pos = root._walk(elem, node.start, children, rule, True)
if is_valid and \
pos > rule._tested[node.start][1]:
node.children = rule._tree[node.start] = children
rule._tested[node.start] = is_valid, pos
rule._depth -= 1
if rule._tested[node.start][0]:
root._append_tree(tree, node, rule._tested[node.start][1])
return rule._tested[node.start]
def _run_export_js(self, js_indent, indent, classes, cname):
return self._export_js_elements(js_indent, indent, classes, cname)
def _run_export_py(self, py_indent, indent, classes):
return self._export_py_elements(py_indent, indent, classes)
def _run_export_c(self, c_indent, indent, enums):
return self._export_c_elements(c_indent, indent, enums)
def _run_export_go(self, go_indent, indent, enums):
return self._export_go_elements(go_indent, indent, enums)
def _run_export_java(self, java_indent, indent, enums, classes):
return self._export_java_elements(java_indent, indent, enums, classes)
_Prio.__name__ = 'Prio'
def Prio(*elements):
return Rule(_Prio(*elements))
|
bcs-ui/backend/bcs_web/audit_log/audit/auditors.py | laodiu/bk-bcs | 599 | 11120140 | <reponame>laodiu/bk-bcs<filename>bcs-ui/backend/bcs_web/audit_log/audit/auditors.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 <NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from dataclasses import asdict
from ..constants import ActivityStatus, ActivityType, ResourceType
from ..models import UserActivityLog
from .context import AuditContext
class Auditor:
"""提供操作审计日志记录功能"""
def __init__(self, audit_ctx: AuditContext):
self.audit_ctx = audit_ctx
def log_raw(self):
UserActivityLog.objects.create(**asdict(self.audit_ctx))
def log_succeed(self):
self._log(ActivityStatus.Succeed)
def log_failed(self, err_msg: str = ''):
self._log(ActivityStatus.Failed, err_msg)
def _log(self, activity_status: str, err_msg: str = ''):
self._complete_description(activity_status, err_msg)
self.audit_ctx.activity_status = activity_status
UserActivityLog.objects.create(**asdict(self.audit_ctx))
def _complete_description(self, activity_status: str, err_msg: str):
audit_ctx = self.audit_ctx
if not audit_ctx.description:
activity_type = ActivityType.get_choice_label(audit_ctx.activity_type)
resource_type = ResourceType.get_choice_label(audit_ctx.resource_type)
description_prefix = f'{activity_type} {resource_type}' # noqa
if audit_ctx.resource:
description_prefix = f'{description_prefix} {audit_ctx.resource}'
else:
description_prefix = audit_ctx.description
audit_ctx.description = f'{description_prefix} {ActivityStatus.get_choice_label(activity_status)}'
if err_msg:
audit_ctx.description += f': {err_msg}'
class HelmAuditor(Auditor):
def __init__(self, audit_ctx: AuditContext):
super().__init__(audit_ctx)
self.audit_ctx.resource_type = ResourceType.HelmApp
|
modules/exploitation/xxe-serve.py | decidedlygray/ptf | 4,391 | 11120157 | #!/usr/bin/python
AUTHOR="<NAME> (Su1ph3r)"
DESCRIPTION="This module will install/update XXE Serve (XXE Out of Band Server)"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/joernchen/xxeserve.git"
INSTALL_LOCATION="xxe-serve"
DEBIAN=""
ARCHLINUX =""
BYPASS_UPDATE="NO"
AFTER_COMMANDS="cd {INSTALL_LOCATION}, chmod +x xxeserve.rb"
LAUNCHER="xxe-serve"
|
tests/query_test/test_decimal_fuzz.py | Keendata/impala | 1,523 | 11120181 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generates random decimal numbers and verifies that mathematical
# operations return correct results under decimal_v2.
import decimal
import math
import pytest
import random
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_vector import ImpalaTestDimension, ImpalaTestMatrix
class TestDecimalFuzz(ImpalaTestSuite):
# Impala's max precision for decimals is 38, so we should have the same in the tests
decimal.getcontext().prec = 38
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
cls.ImpalaTestMatrix = ImpalaTestMatrix()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
cls.iterations = 10000
def weighted_choice(self, options):
total_weight = sum(options.itervalues())
numeric_choice = random.uniform(0, total_weight)
last_choice = None
for choice, weight in options.iteritems():
if numeric_choice <= weight:
return choice
numeric_choice -= weight
if weight > 0:
last_choice = choice
return last_choice
def get_decimal(self):
'''Returns a 3-tuple with string values of (value, precision, scale). The function
does not always return completely random values, we try to bias it to select
more interesting values.'''
def random_precision():
return random.randint(1, 38)
def extreme_precision():
return 38
precision_weights = {}
precision_weights[random_precision] = 0.8
precision_weights[extreme_precision] = 0.2
precision = self.weighted_choice(precision_weights)()
def random_scale(precision):
return random.randint(0, precision)
def extreme_scale(precision):
return random.choice([0, precision])
scale_weights = {}
scale_weights[random_scale] = 0.9
scale_weights[extreme_scale] = 0.1
scale = self.weighted_choice(scale_weights)(precision)
def random_value(precision):
'''Generates a completely random value.'''
def num_digits_random(precision):
return random.randint(1, precision)
def num_digits_all(precision):
return precision
# Determine how many digits the value is going to have.
num_digits_weights = {}
num_digits_weights[num_digits_random] = 0.8
num_digits_weights[num_digits_all] = 0.2
num_digits = self.weighted_choice(num_digits_weights)(precision)
no_zero = '123456789'
with_zero = '0123456789'
result = random.choice(no_zero)
for _ in range(num_digits - 1):
result += random.choice(with_zero)
return result
def special_case_binary_value(precision):
'''Generates a value that looks like 11111... or 10000... in binary number
system.'''
def exponent_random(precision):
return random.randint(0, int(precision * math.log(10, 2)))
def exponent_max(precision):
return int(precision * math.log(10, 2))
exponent_weights = {}
exponent_weights[exponent_random] = 0.8
exponent_weights[exponent_max] = 0.2
exponent = self.weighted_choice(exponent_weights)(precision)
value = 2 ** exponent
if random.random() < 0.5:
value -= 1
return '{0}'.format(value)
def special_case_decimal_value(precision):
'''Generates a value that looks like 99999... or 10000... in decimal number
system.'''
def num_digits_random(precision):
return random.randint(1, precision)
def num_digits_max(precision):
return precision
num_digits_weights = {}
num_digits_weights[num_digits_random] = 8
num_digits_weights[num_digits_max] = 0.2
num_digits = self.weighted_choice(num_digits_weights)(precision)
value = 10 ** num_digits
if num_digits == precision or random.random() < 0.5:
value -= 1
return '{0}'.format(value)
value_weights = {}
value_weights[random_value] = 0.6
value_weights[special_case_binary_value] = 0.2
value_weights[special_case_decimal_value] = 0.2
value = self.weighted_choice(value_weights)(precision)
# Randomly determine the placement of the decimal mark.
# The smallest index where the decimal mark can be placed in the number string.
min_dot_location = max(len(value) - scale, 0)
# The largest index where the decimal mark can be placed in the number string.
max_dot_location = min(precision - scale, len(value))
dot_location = random.randint(min_dot_location, max_dot_location)
if dot_location == 0:
value = '0.' + value
elif dot_location == len(value):
pass
else:
value = value[:dot_location] + '.' + value[dot_location:]
if random.random() < 0.5:
# Negate the number.
value = '-' + value
return (value, precision, scale)
def result_equals(self, expected, actual):
'''Verify that the expected result is equal to the actual result. We verify equality
by rounding the expected result to different numbers of places and verifying that the
actual result is matched in at least one of the cases.'''
if actual == expected:
return True
if actual is None:
# Overflow
if abs(expected) > decimal.Decimal("9" * 32):
# If the expected result is larger than 10^32 - 1, it's not unreasonable for
# there to be an overflow in Impala because the minimum scale is 6 and
# 38 (max precision) - 6 = 32.
return True
return False
for num_digits_after_dot in xrange(39):
# Reduce the number of digits after the dot in the expected_result to different
# amounts. If it matches the actual result in at least one of the cases, we
# consider the actual result to be acceptable.
truncated_expected = expected.quantize(
decimal.Decimal("1e-{0}".format(num_digits_after_dot)),
rounding=decimal.ROUND_HALF_UP)
if actual == truncated_expected:
return True
return False
def execute_one_decimal_op(self):
'''Executes a single query and compares the result to a result that we computed in
Python.'''
op = random.choice(['+', '-', '*', '/', '%'])
value1, precision1, scale1 = self.get_decimal()
value2, precision2, scale2 = self.get_decimal()
query = ('select cast({value1} as decimal({precision1},{scale1})) {op} '
'cast({value2} as decimal({precision2},{scale2}))').format(op=op,
value1=value1, precision1=precision1, scale1=scale1,
value2=value2, precision2=precision2, scale2=scale2)
try:
result = self.execute_scalar(query, query_options={'decimal_v2': 'true'})
except ImpalaBeeswaxException as e:
result = None
if result is not None:
result = decimal.Decimal(result)
with decimal.localcontext() as ctx:
# Set the decimal context to a large precision initially, so that the
# mathematical operations are performed at a high precision.
ctx.prec = 80
try:
if op == '+':
expected_result = decimal.Decimal(value1) + decimal.Decimal(value2)
elif op == '-':
expected_result = decimal.Decimal(value1) - decimal.Decimal(value2)
elif op == '*':
expected_result = decimal.Decimal(value1) * decimal.Decimal(value2)
elif op == '/':
expected_result = decimal.Decimal(value1) / decimal.Decimal(value2)
elif op == '%':
expected_result = decimal.Decimal(value1) % decimal.Decimal(value2)
else:
assert False
except decimal.InvalidOperation as e:
expected_result = None
except decimal.DivisionByZero as e:
expected_result = None
assert self.result_equals(expected_result, result)
def test_decimal_ops(self, vector):
for _ in xrange(self.iterations):
self.execute_one_decimal_op()
def width_bucket(self, val, min_range, max_range, num_buckets):
# Multiplying the values by 10**40 guarantees that the numbers can be converted
# to int without losing information.
val_int = int(decimal.Decimal(val) * 10**40)
min_range_int = int(decimal.Decimal(min_range) * 10**40)
max_range_int = int(decimal.Decimal(max_range) * 10**40)
if min_range_int >= max_range_int:
return None
if val_int < min_range_int:
return 0
if val_int > max_range_int:
return num_buckets + 1
range_size = max_range_int - min_range_int
dist_from_min = val_int - min_range_int
return (num_buckets * dist_from_min) / range_size + 1
def execute_one_width_bucket(self):
val, val_prec, val_scale = self.get_decimal()
min_range, min_range_prec, min_range_scale = self.get_decimal()
max_range, max_range_prec, max_range_scale = self.get_decimal()
num_buckets = random.randint(1, 2147483647)
query = ('select width_bucket('
'cast({val} as decimal({val_prec},{val_scale})), '
'cast({min_range} as decimal({min_range_prec},{min_range_scale})), '
'cast({max_range} as decimal({max_range_prec},{max_range_scale})), '
'{num_buckets})')
query = query.format(val=val, val_prec=val_prec, val_scale=val_scale,
min_range=min_range, min_range_prec=min_range_prec,
min_range_scale=min_range_scale,
max_range=max_range, max_range_prec=max_range_prec,
max_range_scale=max_range_scale,
num_buckets=num_buckets)
expected_result = self.width_bucket(val, min_range, max_range, num_buckets)
if not expected_result:
return
try:
result = self.execute_scalar(query, query_options={'decimal_v2': 'true'})
assert int(result) == expected_result
except ImpalaBeeswaxException as e:
if "You need to wrap the arguments in a CAST" not in str(e):
# Sometimes the decimal inputs are incompatible with each other, so it's ok
# to ignore this error.
raise e
def test_width_bucket(self, vector):
for _ in xrange(self.iterations):
self.execute_one_width_bucket()
|
imagetagger/imagetagger/tools/migrations/0003_auto_20171222_0302.py | jbargu/imagetagger | 212 | 11120187 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-22 02:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tools', '0002_tool_public'),
]
operations = [
migrations.AlterField(
model_name='tool',
name='filename',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
beacon_chain/state/chain.py | muta6150/beacon_chain | 217 | 11120193 | <gh_stars>100-1000
from typing import (
List,
TYPE_CHECKING,
)
if TYPE_CHECKING:
from .block import Block # noqa: F401
class Chain():
# Note, this is not an object defined in the v2.1 spec
# this is a helper object to mask complexity in tracking
# blocks
def __init__(self, head: 'Block'=None, blocks: List['Block']=[]) -> None:
self.head = head
self.blocks = blocks
self.chain = [] # type: List['Block']
# temp helper
all_blocks_by_hash = {
block.hash: block
for block in self.blocks
}
if self.head:
tmp = self.head
self.chain.append(tmp)
while all_blocks_by_hash.get(tmp.parent_hash, None):
tmp = all_blocks_by_hash[tmp.parent_hash]
self.chain.append(tmp)
self.block_by_hash = {
block.hash: block
for block in self.chain
}
self.block_by_slot_number = {
block.slot_number: block
for block in self.chain
}
def __contains__(self, block: 'Block') -> bool:
return bool(self.get_block_by_hash(block.hash))
def get_block_by_slot_number(self, slot_number: int) -> 'Block':
return self.block_by_slot_number.get(slot_number, None)
def get_block_by_hash(self, block_hash: bytes) -> 'Block':
return self.block_by_hash.get(block_hash, None)
|
examples/preprocessing/plot_transformers.py | jmrichardson/pyts | 1,217 | 11120214 | <filename>examples/preprocessing/plot_transformers.py
"""
============
Transformers
============
Some algorithms make assumptions on the distribution of the data.
Therefore it can be useful to transform time series so that they
approximatively follow a given distribution.
Two transformers are made available:
* :class:`pyts.preprocessing.PowerTransformer`
* :class:`pyts.preprocessing.QuantileTransformer`.
This example illustrates the transformation from both algorithms.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
from pyts.datasets import load_gunpoint
from pyts.preprocessing import PowerTransformer, QuantileTransformer
X, _, _, _ = load_gunpoint(return_X_y=True)
n_timestamps = X.shape[1]
# Transform the data with different transformation algorithms
X_power = PowerTransformer().transform(X)
X_quantile = QuantileTransformer(n_quantiles=n_timestamps).transform(X)
# Show the results for the first time series
plt.figure(figsize=(6, 4))
plt.plot(X[0], '--', label='Original')
plt.plot(X_power[0], '--', label='PowerTransformer')
plt.plot(X_quantile[0], '--', label='QuantileTransformer')
plt.legend(loc='best', fontsize=8)
plt.title('Transforming time series', fontsize=16)
plt.tight_layout()
plt.show()
|
biostar/recipes/management/commands/cleanup.py | Oribyne/biostar-central-fork | 477 | 11120217 | <gh_stars>100-1000
import logging, os, csv
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand
from biostar.recipes.models import Data, Job, Analysis, Project
logger = logging.getLogger('engine')
__CURR_DIR = os.path.dirname(os.path.realpath(__file__))
class Command(BaseCommand):
help = 'Adds access to a project'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
data = Data.objects.filter(deleted=True)
jobs = Job.objects.filter(deleted=True)
recipes = Analysis.objects.filter(deleted=True)
projects = Project.objects.filter(deleted=True)
root_dir = os.path.abspath(settings.MEDIA_ROOT)
def rmdirs(objs):
for obj in objs:
obj_path = os.path.abspath(obj.get_data_dir())
# Only delete job files in the media root
if obj_path.startswith(root_dir):
shutil.rmtree(obj_path)
logger.info(f"{obj_path} deleted")
objs.delete()
# Delete files associated with objects
rmdirs(objs=jobs)
rmdirs(objs=data)
recipes.delete()
projects.delete()
|
tests/test_eval.py | ming-hai/spleeter | 19,827 | 11120224 | #!/usr/bin/env python
# coding: utf8
""" Unit testing for Separator class. """
__email__ = '<EMAIL>'
__author__ = '<NAME>'
__license__ = 'MIT License'
from os import makedirs
from os.path import join
from tempfile import TemporaryDirectory
import pytest
import numpy as np
from spleeter.__main__ import evaluate
from spleeter.audio.adapter import AudioAdapter
BACKENDS = ['tensorflow', 'librosa']
TEST_CONFIGURATIONS = {el: el for el in BACKENDS}
res_4stems = {
'vocals': {
'SDR': 3.25e-05,
'SAR': -11.153575,
'SIR': -1.3849,
'ISR': 2.75e-05
},
'drums': {
'SDR': -0.079505,
'SAR': -15.7073575,
'SIR': -4.972755,
'ISR': 0.0013575
},
'bass': {
'SDR': 2.5e-06,
'SAR': -10.3520575,
'SIR': -4.272325,
'ISR': 2.5e-06
},
'other': {
'SDR': -1.359175,
'SAR': -14.7076775,
'SIR': -4.761505,
'ISR': -0.01528
}
}
def generate_fake_eval_dataset(path):
"""
generate fake evaluation dataset
"""
aa = AudioAdapter.default()
n_songs = 2
fs = 44100
duration = 3
n_channels = 2
rng = np.random.RandomState(seed=0)
for song in range(n_songs):
song_path = join(path, 'test', f'song{song}')
makedirs(song_path, exist_ok=True)
for instr in ['mixture', 'vocals', 'bass', 'drums', 'other']:
filename = join(song_path, f'{instr}.wav')
data = rng.rand(duration*fs, n_channels)-0.5
aa.save(filename, data, fs)
@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS)
def test_evaluate(backend):
with TemporaryDirectory() as dataset:
with TemporaryDirectory() as evaluation:
generate_fake_eval_dataset(dataset)
metrics = evaluate(
adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',
output_path=evaluation,
stft_backend=backend,
params_filename='spleeter:4stems',
mus_dir=dataset,
mwf=False,
verbose=False)
for instrument, metric in metrics.items():
for m, value in metric.items():
assert np.allclose(
np.median(value),
res_4stems[instrument][m],
atol=1e-3)
|
rpython/jit/backend/arm/test/test_slist.py | nanjekyejoannah/pypy | 381 | 11120251 | <reponame>nanjekyejoannah/pypy<filename>rpython/jit/backend/arm/test/test_slist.py<gh_stars>100-1000
import py
from rpython.jit.metainterp.test import test_slist
from rpython.jit.backend.arm.test.support import JitARMMixin
class TestSList(JitARMMixin, test_slist.ListTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_slist.py
def test_list_of_voids(self):
py.test.skip("list of voids unsupported by ll2ctypes")
|
examples/cp/visu/rcpsp_multi_mode_json.py | yukarinoki/docplex-examples | 302 | 11120264 | <reponame>yukarinoki/docplex-examples
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This example is the same than the one implemented in rcpsp_multi_mode.py except that
input data files are represented with JSON format, simpler to read and modify.
The MMRCPSP (Multi-Mode Resource-Constrained Project Scheduling Problem) is a
generalization of the Resource-Constrained Project Scheduling problem
(see rcpsp.py).
In the MMRCPSP, each activity can be performed in one out of several modes.
Each mode of an activity represents an alternative way of combining different levels
of resource requirements with a related duration.
Renewable and non-renewable resources are distinguished.
While renewable resources have a limited instantaneous availability such as
manpower and machines, non renewable resources are limited for the entire project,
allowing to model, e.g., a budget for the project.
The objective is to find a mode and a start time for each activity such that the
schedule is makespan minimal and feasible with regard to the precedence
and resource constraints.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import *
import os
import json
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Load input data from json file
filename = os.path.dirname(os.path.abspath(__file__)) + '/data/rcpspmm_default.json'
with open(filename, 'r') as f:
jstr = f.read()
JSON_DATA = json.loads(jstr)
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Get renewable capacities
CAPACITIES_RENEWABLE = JSON_DATA['capacityRenewable']
NB_RENEWABLE = len(CAPACITIES_RENEWABLE)
# Get non-renewable capacities
CAPACITIES_NON_RENEWABLE = JSON_DATA['capacityNonRenewable']
NB_NON_RENEWABLE = len(CAPACITIES_NON_RENEWABLE)
# Get list of tasks
TASKS = JSON_DATA['tasks']
NB_TASKS = len(TASKS)
# Create a unique id for each mode (to retrieve results)
MODES = []
for t in TASKS:
for i, m in enumerate(t['modes']):
m['id'] = 'T{}-M{}'.format(t['id'], i + 1)
MODES.append(m)
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Create one interval variable per task
tasks = {t['id']: interval_var(name='T{}'.format(t['id'])) for t in TASKS}
# Add precedence constraints
mdl.add(end_before_start(tasks[t['id']], tasks[s]) for t in TASKS for s in t['successors'])
# Create one optional interval variable per task mode
modes = { m['id']: interval_var(name=m['id'], optional=True, size=m['duration']) for t in TASKS for m in t['modes'] }
# Add alternative constraints for tasks
mdl.add(alternative(tasks[t['id']], [ modes[m['id']] for m in t['modes'] ]) for t in TASKS)
# Initialize cumul functions for renewable and non renewable resources
renewables = [ sum(pulse(modes[m['id']], m['demandRenewable'][j]) for m in MODES if m['demandRenewable'][j] > 0)
for j in range(NB_RENEWABLE)]
non_renewables = [ sum(m['demandNonRenewable'][j]*presence_of(modes[m['id']]) for m in MODES if m['demandNonRenewable'][j] > 0 )
for j in range(NB_NON_RENEWABLE)]
# Constrain renewable resources capacity
mdl.add(renewables[j] <= CAPACITIES_RENEWABLE[j] for j in range(NB_RENEWABLE))
# Constrain non-renewable resources capacity
mdl.add(non_renewables[j] <= CAPACITIES_NON_RENEWABLE[j] for j in range(NB_NON_RENEWABLE))
# Minimize overall schedule end date
mdl.add(minimize(max([end_of(t) for t in tasks.values()])))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
# Solve model
print('Solving model...')
res = mdl.solve(FailLimit=30000, TimeLimit=10)
print('Solution: ')
res.print_solution()
import docplex.cp.utils_visu as visu
if res and visu.is_visu_enabled():
load = [CpoStepFunction() for j in range(NB_RENEWABLE)]
for m in MODES:
itv = res.get_var_solution(modes[m['id']])
if itv.is_present():
for j in range(NB_RENEWABLE):
dem = m['demandRenewable'][j]
if dem > 0:
load[j].add_value(itv.get_start(), itv.get_end(), dem)
visu.timeline('Solution for RCPSPMM ' + filename)
visu.panel('Tasks')
for t in TASKS:
tid = t['id']
visu.interval(res.get_var_solution(tasks[tid]), tid, str(tid))
for j in range(NB_RENEWABLE):
visu.panel('R' + str(j + 1))
visu.function(segments=[(INTERVAL_MIN, INTERVAL_MAX, CAPACITIES_RENEWABLE[j])], style='area', color='lightgrey')
visu.function(segments=load[j], style='area', color=j)
visu.show()
|
L1TriggerConfig/RPCTriggerConfig/python/RPCHwConfigOffline_cfi.py | ckamtsikis/cmssw | 852 | 11120267 | <filename>L1TriggerConfig/RPCTriggerConfig/python/RPCHwConfigOffline_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBSetup_cfi import *
L1RPCHwConfigOffline = cms.ESSource("PoolDBESSource",
CondDBSetup,
toGet = cms.VPSet(cms.PSet(
record = cms.string('L1RPCHwConfigRcd'),
tag = cms.string('L1RPCHwConfig_v1')
)),
connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_31X_RPC')
)
|
php-src/sdk/Python/PhalApiClient/python3.x/__init__.py | agui2200/roadRunnerXphalApi | 1,564 | 11120271 | #-*- coding:utf-8 -*-
#gaoyiping (<EMAIL>) 2017-02-18 |
networkx/algorithms/tests/test_asteroidal.py | jebogaert/networkx | 10,024 | 11120301 | <filename>networkx/algorithms/tests/test_asteroidal.py<gh_stars>1000+
import networkx as nx
def test_is_at_free():
is_at_free = nx.asteroidal.is_at_free
cycle = nx.cycle_graph(6)
assert not is_at_free(cycle)
path = nx.path_graph(6)
assert is_at_free(path)
small_graph = nx.complete_graph(2)
assert is_at_free(small_graph)
petersen = nx.petersen_graph()
assert not is_at_free(petersen)
clique = nx.complete_graph(6)
assert is_at_free(clique)
line_clique = nx.line_graph(clique)
assert not is_at_free(line_clique)
|
add_trailing_comma/_plugins/calls.py | asottile/add-trailing-comma | 238 | 11120304 | <gh_stars>100-1000
import ast
import functools
from typing import Iterable
from typing import List
from typing import Set
from typing import Tuple
from tokenize_rt import Offset
from tokenize_rt import Token
from add_trailing_comma._ast_helpers import ast_to_offset
from add_trailing_comma._data import register
from add_trailing_comma._data import State
from add_trailing_comma._data import TokenFunc
from add_trailing_comma._token_helpers import find_call
from add_trailing_comma._token_helpers import fix_brace
def _fix_call(
i: int,
tokens: List[Token],
*,
add_comma: bool,
arg_offsets: Set[Offset],
) -> None:
return fix_brace(
tokens,
find_call(arg_offsets, i, tokens),
add_comma=add_comma,
remove_comma=True,
)
@register(ast.Call)
def visit_Call(
state: State,
node: ast.Call,
) -> Iterable[Tuple[Offset, TokenFunc]]:
argnodes = [*node.args, *node.keywords]
arg_offsets = set()
has_starargs = False
for argnode in argnodes:
if isinstance(argnode, ast.Starred):
has_starargs = True
if isinstance(argnode, ast.keyword) and argnode.arg is None:
has_starargs = True
offset = ast_to_offset(argnode)
# multiline strings have invalid position, ignore them
if offset.utf8_byte_offset != -1: # pragma: no branch (cpy bug)
arg_offsets.add(offset)
# If the sole argument is a generator, don't add a trailing comma as
# this breaks lib2to3 based tools
only_a_generator = (
len(argnodes) == 1 and isinstance(argnodes[0], ast.GeneratorExp)
)
if arg_offsets and not only_a_generator and not state.in_fstring:
func = functools.partial(
_fix_call,
add_comma=not has_starargs or state.min_version >= (3, 5),
arg_offsets=arg_offsets,
)
yield ast_to_offset(node), func
|
esmvaltool/install/__init__.py | cffbots/ESMValTool | 148 | 11120349 | <reponame>cffbots/ESMValTool
"""Install Julia and R dependencies."""
import subprocess
import sys
from pathlib import Path
class Install:
"""Install extra dependencies.
Diagnostics written in Julia or R need extra dependencies. Use this
command to install them.
Note that Julia or R must be pre-installed before running this command.
"""
@staticmethod
def _run(cmd, script):
root = Path(__file__).parent
try:
subprocess.check_output(
[cmd, str(root / script)],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.CalledProcessError as exc:
print(exc.stdout)
print("installation failed")
sys.exit(1)
else:
print("Installation successful")
def Julia(self): # noqa: N802
"""Install dependencies needed to run Julia diagnostics."""
print("installing Julia packages, please wait...")
script = Path("Julia") / "setup.jl"
self._run("julia", script)
def R(self): # noqa: N802
"""Install dependencies needed to run R diagnostics."""
print("installing R packages, please wait...")
print("Compiling may take up to 15 minutes or more.")
script = Path("R") / "setup.R"
self._run("Rscript", script)
|
src/scenic/simulators/webots/utils.py | ArenBabikian/Scenic | 141 | 11120364 | """Various utilities for working with Webots scenarios."""
import math
import numpy as np
from scenic.core.geometry import normalizeAngle
def webotsToScenicPosition(pos):
"""Convert a Webots position to a Scenic position.
Drops the Webots Y coordinate.
"""
x, y, z = pos
return (x, -z)
def scenicToWebotsPosition(pos, y=0):
"""Convert a Scenic position to a Webots position."""
x, z = pos
return [x, y, -z]
def webotsToScenicRotation(rot, tolerance2D=None):
"""Convert a Webots rotation vector to a Scenic heading.
Assumes the object lies in the Webots X-Z plane, with a rotation axis
close to the Y axis. If ``tolerance2D`` is given, returns ``None`` if the
orientation of the object is not sufficiently close to being 2D.
"""
axis = np.array(rot[:3])
angle = rot[3]
if tolerance2D is not None and np.linalg.norm(axis - (0, 1, 0)) > tolerance2D:
return None
return normalizeAngle(angle + math.pi)
def scenicToWebotsRotation(heading):
"""Convert a Scenic heading to a Webots rotation vector."""
return [0, 1, 0, heading - math.pi]
|
examples/gym/mountain_car_continuous_env.py | jhardy0/deer | 373 | 11120372 | <filename>examples/gym/mountain_car_continuous_env.py<gh_stars>100-1000
""" Mountain car environment with continuous action space.
Author: <NAME>
"""
import numpy as np
import copy
import math
from deer.base_classes import Environment
import gym
class MyEnv(Environment):
def __init__(self, rng):
""" Initialize environment.
Parameters
-----------
rng : numpy random number generator
"""
self.env = gym.make('MountainCarContinuous-v0')
self.rng=rng
self._last_observation = self.reset()
self.is_terminal=False
self._input_dim = [(1,), (1,)]
def act(self, action):
""" Simulate one time step in the environment and returns the reward for the time step
Parameters
-----------
action : list of floats (in this case one float, because there is one action)
Returns
-------
reward : float
"""
reward=0
for _ in range(10): # Increase the duration of one time step by a factor 10
self._last_observation, r, self.is_terminal, info = self.env.step([action[0]])
reward+=r
if(self.is_terminal==True):
break
if (self.mode==0): # Show the policy only at test time
try:
self.env.render()
except:
pass
return reward/100. #Scale the reward so that it's 1 at maximum
def reset(self, mode=0):
""" Reset environment for a new episode.
Parameters
-----------
Mode : int
-1 corresponds to training and 0 to test
"""
self.mode=mode
self._last_observation = self.env.reset()
self.is_terminal=False
return self._last_observation
def inTerminalState(self):
""" This returns whether the environment reached a terminal state after the last transition
(i.e. whether the last transition that occurred was terminal).
Returns
-------
self.is_terminal : bool
"""
return self.is_terminal
def inputDimensions(self):
return self._input_dim
def nActions(self):
""" Provides the bounds on the action space
Returns
-------
bounds on the action space
"""
return [[self.env.action_space.low[0],self.env.action_space.high[0]]]
def observe(self):
return copy.deepcopy(self._last_observation)
def main():
# This function can be used for debug purposes
rng = np.random.RandomState(123456)
myenv=MyEnv(rng)
print(myenv.env.action_space)
print(myenv.env.action_space.low)
print(myenv.env.action_space.high)
print(myenv.env.observation_space)
print (myenv.observe())
myenv.act([0])
print (myenv.observe())
myenv.act([1])
print (myenv.observe())
if __name__ == "__main__":
main()
|
examples/example_edifice.py | fding/pyedifice | 151 | 11120378 | <reponame>fding/pyedifice
import edifice
from edifice import View, Label, TextInput
class App(edifice.Component):
def __init__(self):
super(App, self).__init__()
self.text = ""
def render(self):
return View(layout="column")(
Label("Hello world: " + self.text),
TextInput(self.text, on_change=lambda text: self.set_state(text=text)),
View(layout="row")(
Label("Bonjour")
)
)
if __name__ == "__main__":
edifice.App(App()).start()
|
src/graphql/execution/collect_fields.py | closeio/graphql-core | 590 | 11120380 | from typing import Any, Dict, List, Set, Union, cast
from ..language import (
FieldNode,
FragmentDefinitionNode,
FragmentSpreadNode,
InlineFragmentNode,
SelectionSetNode,
)
from ..type import (
GraphQLAbstractType,
GraphQLIncludeDirective,
GraphQLObjectType,
GraphQLSchema,
GraphQLSkipDirective,
is_abstract_type,
)
from ..utilities.type_from_ast import type_from_ast
from .values import get_directive_values
__all__ = ["collect_fields"]
def collect_fields(
schema: GraphQLSchema,
fragments: Dict[str, FragmentDefinitionNode],
variable_values: Dict[str, Any],
runtime_type: GraphQLObjectType,
selection_set: SelectionSetNode,
fields: Dict[str, List[FieldNode]],
visited_fragment_names: Set[str],
) -> Dict[str, List[FieldNode]]:
"""Collect fields.
Given a selection_set, adds all of the fields in that selection to the passed in
map of fields, and returns it at the end.
collect_fields requires the "runtime type" of an object. For a field which
returns an Interface or Union type, the "runtime type" will be the actual
Object type returned by that field.
For internal use only.
"""
for selection in selection_set.selections:
if isinstance(selection, FieldNode):
if not should_include_node(variable_values, selection):
continue
name = get_field_entry_key(selection)
fields.setdefault(name, []).append(selection)
elif isinstance(selection, InlineFragmentNode):
if not should_include_node(
variable_values, selection
) or not does_fragment_condition_match(schema, selection, runtime_type):
continue
collect_fields(
schema,
fragments,
variable_values,
runtime_type,
selection.selection_set,
fields,
visited_fragment_names,
)
elif isinstance(selection, FragmentSpreadNode): # pragma: no cover else
frag_name = selection.name.value
if frag_name in visited_fragment_names or not should_include_node(
variable_values, selection
):
continue
visited_fragment_names.add(frag_name)
fragment = fragments.get(frag_name)
if not fragment or not does_fragment_condition_match(
schema, fragment, runtime_type
):
continue
collect_fields(
schema,
fragments,
variable_values,
runtime_type,
fragment.selection_set,
fields,
visited_fragment_names,
)
return fields
def should_include_node(
variable_values: Dict[str, Any],
node: Union[FragmentSpreadNode, FieldNode, InlineFragmentNode],
) -> bool:
"""Check if node should be included
Determines if a field should be included based on the @include and @skip
directives, where @skip has higher precedence than @include.
"""
skip = get_directive_values(GraphQLSkipDirective, node, variable_values)
if skip and skip["if"]:
return False
include = get_directive_values(GraphQLIncludeDirective, node, variable_values)
if include and not include["if"]:
return False
return True
def does_fragment_condition_match(
schema: GraphQLSchema,
fragment: Union[FragmentDefinitionNode, InlineFragmentNode],
type_: GraphQLObjectType,
) -> bool:
"""Determine if a fragment is applicable to the given type."""
type_condition_node = fragment.type_condition
if not type_condition_node:
return True
conditional_type = type_from_ast(schema, type_condition_node)
if conditional_type is type_:
return True
if is_abstract_type(conditional_type):
return schema.is_sub_type(cast(GraphQLAbstractType, conditional_type), type_)
return False
def get_field_entry_key(node: FieldNode) -> str:
"""Implements the logic to compute the key of a given field's entry"""
return node.alias.value if node.alias else node.name.value
|
tests/performance/grpc_latency.py | BrightTux/model_server | 305 | 11120384 | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import grpc
import datetime
import argparse
import numpy as np
from tensorflow import make_tensor_proto, make_ndarray
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
parser = argparse.ArgumentParser(
description='Sends requests via TFS gRPC API using images in numpy format.'
' It measures performance statistics.')
parser.add_argument('--images_numpy_path',
required=True,
help='image in numpy format')
parser.add_argument('--labels_numpy_path',
required=False,
help='labels in numpy format')
parser.add_argument('--grpc_address',
required=False,
default='localhost',
help='Specify url to grpc service. default:localhost')
parser.add_argument('--grpc_port',
required=False,
default=9178,
help='Specify port to grpc service. default: 9178')
parser.add_argument('--input_name',
required=False,
default='input',
help='Specify input tensor name. default: input')
parser.add_argument('--output_name',
required=False,
default='prob',
help='Specify output tensor name. default: prob')
parser.add_argument('--iterations',
default=0,
help='Number of requests iterations, '
'as default use number of images in numpy memmap. '
'default: 0 (consume all frames)',
type=int)
parser.add_argument('--batchsize',
default=1,
help='Number of images in a single request. default: 1',
type=int)
parser.add_argument('--model_name',
default='resnet',
help='Define model name in payload. default: resnet')
parser.add_argument('--model_version',
default=1,
help='Model version number. default: 1',
type=int)
parser.add_argument('--report_every',
default=0,
help='Report performance every X iterations',
type=int)
parser.add_argument('--precision',
default=np.float32,
help='input precision',
type=np.dtype)
parser.add_argument('--id',
default='--',
help='Helps identifying client')
args = parser.parse_args()
accurracy_measuring_mode = args.labels_numpy_path is not None
channel = grpc.insecure_channel("{}:{}".format(
args.grpc_address,
args.grpc_port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
processing_times = np.zeros((0), int)
imgs = np.load(args.images_numpy_path, mmap_mode='r', allow_pickle=False)
imgs = imgs - np.min(imgs) # Normalization 0-255
imgs = imgs / np.ptp(imgs) * 255 # Normalization 0-255
imgs = imgs.astype(args.precision)
if accurracy_measuring_mode:
labels = np.load(args.labels_numpy_path, mmap_mode='r', allow_pickle=False)
matches_count = 0
total_count = 0
# If input numpy file has too few frames according to the
# value of iterations and the batch size,
# it will be duplicated to match requested number of frames.
while args.batchsize >= imgs.shape[0]:
imgs = np.append(imgs, imgs, axis=0)
if accurracy_measuring_mode:
labels = np.append(labels, labels, axis=0)
if args.iterations < 0:
print("Argument '--iterations' can't be lower than 0")
print("Exitting")
sys.exit(1)
elif args.iterations == 0:
iterations = int(imgs.shape[0] // args.batchsize)
else:
iterations = args.iterations
iteration = 0
print("[{:2}] Starting iterations".format(args.id))
while iteration <= iterations:
for x in range(0, imgs.shape[0] - args.batchsize + 1, args.batchsize):
iteration += 1
if iteration > iterations:
break
# Preparing image data
img = imgs[x:(x + args.batchsize)]
if accurracy_measuring_mode:
expected_label = labels[x:(x + args.batchsize)][0]
# Creating request object
request = predict_pb2.PredictRequest()
request.model_spec.name = args.model_name
request.model_spec.version.value = args.model_version
# Populating request with data
request.inputs[args.input_name].CopyFrom(
make_tensor_proto(img, shape=(img.shape)))
# Measuring gRPC request time
start_time = datetime.datetime.now()
result = stub.Predict(request, 10.0)
end_time = datetime.datetime.now()
# Aggregating processing time statistics
duration = (end_time - start_time).total_seconds() * 1000
processing_times = np.append(processing_times, np.array([duration]))
# If we want to check accurracy
if accurracy_measuring_mode:
output = np.array(make_ndarray(result.outputs[args.output_name]))
if args.model_name == "dummy":
if (img + 1 == output ).all():
matches_count += 1
total_count += 1
else:
actual_label = np.argmax(output[0])
if (expected_label == actual_label) :
matches_count += 1
total_count += 1
if args.report_every > 0 and iteration < iterations and iteration % args.report_every == 0:
print(f'[{args.id:2}] Iteration {iteration:5}/{iterations:5}; '
f'Current latency: {round(duration, 2):.2f}ms; '
f'Average latency: {round(np.average(processing_times), 2):.2f}ms')
# Latency and accurracy
if accurracy_measuring_mode:
accuracy = 100 * matches_count / total_count
print(f"[{args.id:2}] "
f"Iterations: {iterations:5}; "
f"Final average latency: {round(np.average(processing_times), 2):.2f}ms; "
f"Classification accuracy: {accuracy}%")
if accuracy < 100.0:
print('Accurracy is lower than 100')
exit(1)
# Latency only
else:
print(f"[{args.id:2}] "
f"Iterations: {iterations:5}; "
f"Final average latency: {round(np.average(processing_times), 2):.2f}ms")
|
alf/environments/suite_unittest_test.py | www2171668/alf | 175 | 11120406 | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
from absl.testing import parameterized
import numpy as np
import torch
import alf
from alf.data_structures import TimeStep, StepType
from alf.environments.suite_unittest import ActionType
from alf.environments.suite_unittest import ValueUnittestEnv
from alf.environments.suite_unittest import PolicyUnittestEnv
from alf.environments.suite_unittest import RNNPolicyUnittestEnv
class SuiteUnittestEnvTest(parameterized.TestCase, alf.test.TestCase):
@parameterized.parameters(ActionType.Discrete, ActionType.Continuous)
def test_value_unittest_env(self, action_type):
batch_size = 1
steps_per_episode = 13
env = ValueUnittestEnv(
batch_size, steps_per_episode, action_type=action_type)
time_step = env.reset()
for _ in range(10):
for s in range(steps_per_episode):
if s == 0:
step_type = StepType.FIRST
discount = 1.0
elif s == steps_per_episode - 1:
step_type = StepType.LAST
discount = 0.0
else:
step_type = StepType.MID
discount = 1.0
self.assertEqual(time_step.step_type,
torch.full([batch_size], step_type))
self.assertEqual(time_step.reward, torch.ones(batch_size))
self.assertEqual(time_step.discount,
torch.full([batch_size], discount))
action = torch.randint(0, 2, (batch_size, 1))
time_step = env.step(action)
@parameterized.parameters(ActionType.Discrete, ActionType.Continuous)
def test_policy_unittest_env(self, action_type):
batch_size = 100
steps_per_episode = 13
env = PolicyUnittestEnv(
batch_size, steps_per_episode, action_type=action_type)
time_step = env.reset()
for _ in range(10):
for s in range(steps_per_episode):
if s == 0:
step_type = StepType.FIRST
discount = 1.0
elif s == steps_per_episode - 1:
step_type = StepType.LAST
discount = 0.0
else:
step_type = StepType.MID
discount = 1.0
if s == 0:
reward = torch.zeros(batch_size)
else:
reward = (action == prev_observation.to(torch.int64)).to(
torch.float32)
reward = reward.reshape(batch_size)
self.assertEqual(time_step.step_type,
torch.full([batch_size], step_type))
self.assertEqual(time_step.reward, reward)
self.assertEqual(time_step.discount,
torch.full([batch_size], discount))
action = torch.randint(0, 2, (batch_size, 1))
prev_observation = time_step.observation
time_step = env.step(action)
def test_rnn_policy_unittest_env(self):
batch_size = 100
steps_per_episode = 5
gap = 3
env = RNNPolicyUnittestEnv(batch_size, steps_per_episode, gap)
time_step = env.reset()
for _ in range(10):
for s in range(steps_per_episode):
if s == 0:
observation0 = time_step.observation
if s == 0:
step_type = StepType.FIRST
discount = 1.0
elif s == steps_per_episode - 1:
step_type = StepType.LAST
discount = 0.0
else:
step_type = StepType.MID
discount = 1.0
if s <= gap:
reward = torch.zeros(batch_size)
else:
reward = (2 * action - 1 == observation0.to(
torch.int64)).to(torch.float32)
reward = reward.reshape(batch_size)
self.assertEqual(time_step.step_type,
torch.full([batch_size], step_type))
self.assertEqual(time_step.reward, reward)
self.assertEqual(time_step.discount,
torch.full([batch_size], discount))
action = torch.randint(0, 2, (batch_size, 1))
time_step = env.step(action)
if __name__ == '__main__':
alf.test.main()
|
backtrack/Yu/17.py | lidongdongbuaa/leetcode | 1,232 | 11120453 | <reponame>lidongdongbuaa/leetcode
class Solution(object):
def letterCombinations(self, string):
# input : "23"
# output: ["ad","ae","af","bd","be","bf","cd","ce","cf"]
#Edge
if not string:
return []
#Global Variable
self.dict = {
"2":"abc",
"3":"def",
"4":"ghi",
"5":"jkl",
"6":"mno",
"7":"pqrs",
"8":"tuv",
"9":"wxyz"
}
self.res = []
# Helper function
def dfs(string, index, temp):
if len(temp) == len(string):
self.res.append("".join(x for x in temp))
return
for char in self.dict[string[index]]:
temp.append(char)
dfs(string, index+1, temp)
temp.pop()
# Function Call
dfs(string, 0, [])
return self.res
|
feedhq/feeds/management/commands/__init__.py | feedhq/feedhq | 361 | 11120479 | <filename>feedhq/feeds/management/commands/__init__.py
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from raven import Client
class SentryCommand(BaseCommand):
def handle(self, *args, **kwargs):
try:
self.handle_sentry(*args, **kwargs)
except Exception:
if settings.DEBUG or 'SENTRY_DSN' not in os.environ:
raise
client = Client()
client.captureException()
|
google_refexp_py_lib/refexp_eval.py | sidr97/Google_Refexp_toolbox | 164 | 11120504 | """Python class for the evaluation of Google Refexp dataset.
This script contains two python classes:
1. GoogleRefexpEvalComprehension
- Use precision@k score to evaluate comprehension task performance
- Can evaluate generation task through an end-to-end way
2. GoogleRefexpEvalGeneration
- Use Amazon Mechanical Turker (AMT) to compare generated refexps with GT
with the following steps (step a, c, f covered by the class):
a. Generate csv files for AMT
b. Generate images and masked images
c. Upload these images to a server (e.g. Amazon S3) so that the image are
publicly accessible
d. Create a AMT project with the interface at
./cache_evaluation/AMT_interface/AMT_template_generated_vs_GT.html
e. Upload csv files and start AMT job
f. Download annotated json file and calculate the score
TO CHECK:
GoogleRefexp.getAnnIds(): get COCO object ids
GoogleRefexp.getRefexpIds(): get referring expression ids
GoogleRefexp.getRefexpAnns(): get google refexp annotations for a list of annotation_id
GoogleRefexp.getGtBoxes(): currently assume a dictionary with key of id, value of a list for bbox
TODO:
Comprehention:
- A script that can visualize predicted bboxes whose iou satistied a constrain
"""
import json
import os
import copy
import random
import sys
import numpy
import csv
from scipy import misc
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
from refexp import Refexp # Need to check - change
import common_utils as cu
class RefexpEvalComprehension(object):
def __init__(self, refexp_dataset_path, coco_data_path):
"""Constructor for GoogleRefexpEvalComprehension class for evaluation.
Args:
refexp_dataset_path: path for the Google Refexp dataset file
coco_data_path: path for the original coco dataset file (e.g. 'instances_train2014.json')
"""
# handle refexp dataset file
assert refexp_dataset_path, "Refexp dataset file missing!"
self.refexp_dataset_path = refexp_dataset_path
print 'Loading Google Refexp dataset file for the comprehension task.'
self.refexp_dataset = Refexp(refexp_dataset_path, coco_data_path) # Need to check - change
self.gt_ann_ids_set = frozenset(self.refexp_dataset.getAnnIds()) # Need to check - change
self.gt_refexp_ids_set = frozenset(self.refexp_dataset.getRefexpIds()) # Need to check - change
# reset evaluation state
self.reset_eval_state()
def reset_eval_state(self):
"""Reset evaluation state."""
self.pred_results_path = None
self.pred_results = None
self.flag_already_eval = False
def evaluate(self, pred_results_path,
thresh_iou=0.5,
thresh_k=1,
flag_ignore_non_existed_object=False,
flag_ignore_non_existed_gt_refexp=False,
flag_missing_objects_verbose=False,
flag_missing_refexps_verbose=False):
"""Evaluate the predicted results for the comprehension task.
Args:
pred_results_path: path for the predicted results with the format
described in ./cache_evaluation/format_comprehension_eval.md
thresh_iou: threshold of the IoU ratio of the evaluation
thresh_k: precision@k
flag_ignore_non_existed_object: if set True, the evaluation process
continues with an warning when encountered non existed objects in
self.refexp_dataset. Otherwise stops.
flag_ignore_non_existed_gt_refexp: if set True, the evaluation process
continues when encountered non existed GT referring expressions.
Otherwise stops.
flag_missing_objects_verbose: if set true, will list the ids of all the
missing objects in self.refexp_dataset
flag_missing_refexps_verbose: if set true, will list the ids of all the
missing referring expressions in self.refexp_dataset
Returns:
A two element tuple. The first element is precision@k. The second
element is the predicted results (a dictionary) with an added field
'best_iou' of the best iou for the top k bounding boxes.
"""
# Load predicted results
self.reset_eval_state()
print 'Loading predicted result file for the comprehension task.'
with open(pred_results_path) as fin:
self.pred_results = json.load(fin)
# evaluation
pred_ann_ids_set = set()
pred_refexp_ids_set = set()
score = 0.0
num_valid_pred = 0
for pred_elem in self.pred_results:
# validate the predicted results
assert 'annotation_id' in pred_elem, 'Object annotation id missing!'
assert 'predicted_bounding_boxes' in pred_elem, \
'list of predicted bounding boxes missing!'
ann_id = pred_elem['annotation_id']
gt_bbox = self._get_GT_bbox_with_annotation_id(ann_id) # Need to check - change
if gt_bbox is None:
if flag_ignore_non_existed_object:
print ('Ignore COCO annotation id %d which does not exist in '
'Refexp dataset file for evaluation' % ann_id)
pred_elem['best_iou'] = 0.0
continue
else:
print ('COCO annotation id %d does not exist in Refexp '
'dataset file for evaluation!' % ann_id)
raise
if ('refexp_id' in pred_elem) and not(pred_elem['refexp_id'] in self.gt_refexp_ids_set):
if flag_ignore_non_existed_gt_refexp:
print ('Ignore refexp id %d which does not exist in '
'Refexp dataset file for evaluation' % pred_elem['refexp_id'])
pred_elem['best_iou'] = 0.0
continue
else:
print ('refexp id %d does not exist in Refexp '
'dataset file for evaluation!' % pred_elem['refexp_id'])
raise
pred_ann_ids_set.add(ann_id)
if 'refexp_id' in pred_elem:
pred_refexp_ids_set.add(pred_elem['refexp_id'])
num_valid_pred += 1
# check whether it is a correct prediction
pred_bboxes = pred_elem['predicted_bounding_boxes']
best_iou = 0.0
for k in xrange(min(thresh_k, len(pred_bboxes))):
iou = cu.iou_bboxes(pred_bboxes[k], gt_bbox)
best_iou = max(best_iou, iou)
if best_iou >= thresh_iou:
score += 1.0
pred_elem['best_iou'] = best_iou
score /= num_valid_pred
# warning for missing objects and refexps
gt_ann_ids_left_set = self.gt_ann_ids_set - pred_ann_ids_set
gt_refexp_ids_left_set = self.gt_refexp_ids_set - pred_refexp_ids_set
if gt_ann_ids_left_set:
print ('Missing %d objects in the refexp dataset file in the predicted '
'file' % len(gt_ann_ids_left_set))
if flag_missing_objects_verbose:
print ('The missing object annotation ids are:')
print gt_ann_ids_left_set # TODO pretty print format
if gt_refexp_ids_left_set:
print ('Missing %d refexps in the refexp dataset file in the predicted '
'file' % len(gt_refexp_ids_left_set))
if flag_missing_refexps_verbose:
print ('The missing refexp ids are:')
print gt_refexp_ids_left_set # TODO pretty print format
# summarize the results
print 'The average prec@%d score is %.3f' % (thresh_k, score)
return (score, self.pred_results)
def _get_GT_bbox_with_annotation_id(self, ann_id):
if not ann_id in self.gt_ann_ids_set:
return None
anns = self.refexp_dataset.loadAnns(ids = [ann_id])
if len(anns) == 0:
return None
assert len(anns) == 1
return anns[0]['bbox']
def visualize_top_predicted_bbox(self, pred_sample, coco_image_dir):
"""Visualize the top predicted bounding box."""
assert 'annotation_id' in pred_sample, 'Object annotation id missing!'
assert 'predicted_bounding_boxes' in pred_sample, \
'list of predicted bounding boxes missing!'
if not pred_sample['predicted_bounding_boxes']:
print 'Empty predicted bounding boxes.'
return
bbox_pred_top = pred_sample['predicted_bounding_boxes'][0]
ann_id = pred_sample['annotation_id']
ann = self.refexp_dataset.loadAnns(ids=[ann_id])[0]
image_id = ann['image_id']
img_coco = self.refexp_dataset.loadImgs(ids=[image_id])[0]
iou = cu.iou_bboxes(bbox_pred_top, ann['bbox'])
if 'refexp' in pred_sample or 'refexp_id' in pred_sample:
print 'The Referring expression input to the model is:'
if 'refexp' in pred_sample:
print ' ' + pred_sample['refexp']
else:
refexp_tmp = self.refexp_dataset.loadRefexps(ids=pred_sample['refexp_id'])[0]
print ' ' + refexp_tmp['raw']
I = misc.imread(os.path.join(coco_image_dir, (img_coco['file_name'])))
ax = plt.imshow(I)
ax = plt.axis('off')
ax = plt.title('IoU: %.3f, green bbox: GT, red bbox: predicted' % iou)
cu.draw_bbox(plt.gca(), ann['bbox'], edge_color='green')
cu.draw_bbox(plt.gca(), bbox_pred_top, edge_color='red')
class RefexpEvalGeneration(object):
def __init__(self, refexp_dataset_path, coco_data_path):
"""Constructor for GoogleRefexpEvalGeneration class for evaluation.
Args:
refexp_dataset_path: path for the Google Refexp dataset file
"""
# handle refexp dataset file
assert refexp_dataset_path, "Refexp dataset file missing!"
self.refexp_dataset_path = refexp_dataset_path
print 'Loading Google Refexp dataset file for the generation task.'
self.refexp_dataset = Refexp(refexp_dataset_path, coco_data_path) # Need to check - change
self.gt_ann_ids_set = frozenset(self.refexp_dataset.getAnnIds()) # Need to check - change
def generate_AMT_csv_and_images(self, pred_results_path,
public_image_url_prefix,
AMT_csv_path,
num_refexp_group=5,
flag_generate_images=True,
coco_image_dir=None,
local_image_dir=None):
"""Generate a csv file and images for AMT evaluation.
Args:
pred_results_path: path for the predicted results with the format
described in ./cache_evaluation/format_generation_eval.md
public_image_url_prefix: image url prefix for the publicly accessible
images. AMTurkers should be able to access images with this url prefix
(see details in README.md, AMT section)
AMT_csv_path: path for the generated csv file.
num_refexp_group: the number of referring expressions that we plan to
group as one HIT for AMT. default=5 (highly recommended, otherwise
need to change AMT_interface)
flag_generate_images: if set true, will generate images for AMT
coco_image_dir: directory that coco images can be found, e.g.
./external/coco/images/train2014
local_image_dir: directory to save the images locally.
"""
# Load predicted results
print 'Loading predicted result file for the generation task.'
with open(pred_results_path) as fin:
self.pred_results = json.load(fin)
assert len(self.pred_results) % num_refexp_group == 0, ('The number of '
'generated sentences should be a multiple of num of images in the'
'AMT group (i.e. %d)' % num_refexp_group)
# Generate csv file for AMT
pred_ann_ids = self._generate_AMT_csv_file(
AMT_csv_path, public_image_url_prefix,
num_refexp_group=num_refexp_group)
# Generate images for AMT if necessary
if flag_generate_images:
assert coco_image_dir, 'Missing the directory of original coco image'
assert local_image_dir, 'Missing the local directory for storing images'
self._generate_images_for_AMT(pred_ann_ids,
coco_image_dir=coco_image_dir, local_image_dir=local_image_dir)
def parse_AMT_results(self, csv_download_path, num_refexp_group=5):
"""Parse the AMT results from the downloaded csv file.
Args:
csv_download_path: the path of the downloaded csv result file from AMT.
num_refexp_group: the number of the refexp grouped in a HIT.
Return:
A tuple with two numbers. They represent the ratio of the generated
referring expressions are considered to be better and similar
respectively.
"""
num_better = 0
num_similar = 0
num_row = 0
with open(csv_download_path) as fin:
reader = csv.DictReader(fin)
for row in reader:
for ind in xrange(num_refexp_group):
key = 'Answer.choice_%d' % ind
if row[key] == 'GEN':
num_better += 1
elif row[key] == 'similar':
num_similar += 1
num_row += 1
ratio_better = num_better / float(num_row * num_refexp_group)
ratio_similar = num_similar / float(num_row * num_refexp_group)
print ('%.4f of the generated referring expressions are considered to be '
'better than humans (groundtruth)' % ratio_better)
print ('%.4f of the generated referring expressions are considered to be '
'similar to humans (groundtruth)' % ratio_similar)
return (ratio_better, ratio_similar)
def _generate_AMT_csv_file(self, AMT_csv_path, public_image_url_prefix,
num_refexp_group=5):
"""Private function to generate csv file for AMT."""
print 'Start to generate csv file to upload to AMT'
fieldnames_template = ['image_url_o_%d', 'image_url_mask_%d',
'descrip_type_%d_0', 'descrip_type_%d_1',
'descrip_%d_0', 'descrip_%d_1']
pred_ann_ids = []
ind_cur = 0
with open(AMT_csv_path, 'w') as fout:
while ind_cur < len(self.pred_results):
dct_row = {}
fields_all = []
for ind_group in xrange(num_refexp_group):
# check pred_result format
pred_elem = self.pred_results[ind_cur]
assert 'annotation_id' in pred_elem, 'Object annotation id missing!'
assert 'generated_refexp' in pred_elem, 'Generated refexp missing!'
pred_ann_id = pred_elem['annotation_id']
# load GT data
assert pred_ann_id in self.gt_ann_ids_set, ('Cannot find object with'
'annotation id %d' % pred_ann_id)
gt_data = self.refexp_dataset.loadAnns(ids = [pred_ann_id])[0] # Need to check - change
gt_refexps = self.refexp_dataset.loadRefexps(ids = gt_data['refexp_ids']) # Need to check - change
pred_ann_ids.append(pred_ann_id)
# add fieldnames
for field_template in fieldnames_template:
fields_all.append(field_template % ind_group)
# add image urls
img_name = 'coco_%d.jpg' % gt_data['image_id']
img_mask_name = 'coco_%d_ann_%d_masked.jpg' % (gt_data['image_id'], pred_ann_id)
dct_row['image_url_o_%d' % ind_group] = public_image_url_prefix + img_name
dct_row['image_url_mask_%d' % ind_group] = public_image_url_prefix + img_mask_name
# get refexp and type, shuffle them (refexp, type)
descrip_gen = (pred_elem['generated_refexp'], 'GEN')
descrip_gt = (' '.join(gt_refexps[0]['tokens']), 'GT') # Need to check - change
list_descrip = [descrip_gen, descrip_gt]
random.shuffle(list_descrip)
for ind in xrange(2):
dct_row['descrip_%d_%d' % (ind_group, ind)] = list_descrip[ind][0]
dct_row['descrip_type_%d_%d' % (ind_group, ind)] = list_descrip[ind][1]
ind_cur += 1
# write row to csv files
assert len(dct_row) == len(fields_all)
if ind_cur == num_refexp_group:
writer = csv.DictWriter(fout, fieldnames=fields_all)
writer.writeheader()
writer.writerow(dct_row)
print 'Finished to generate the csv file'
return pred_ann_ids
def _generate_images_for_AMT(self, pred_ann_ids,
coco_image_dir=None, local_image_dir=None):
"""Private function to generated images to upload to AMT."""
assert coco_image_dir and local_image_dir
assert os.path.isdir(coco_image_dir)
if not os.path.isdir(local_image_dir):
print 'Input local image directory does not exist, create it'
os.makedirs(local_image_dir)
print 'Start to generate images for AMT in local hard disk'
image_ids_saved = set()
for (ind, pred_ann_id) in enumerate(pred_ann_ids):
gt_data = self.refexp_dataset.loadAnns(ids = [pred_ann_id])[0] # Need to check - change
img = self._read_image(coco_image_dir, gt_data)
mask = self._load_mask(gt_data)
masked_img = cu.apply_mask_to_image(img, mask)
masked_img_path = os.path.join(local_image_dir, ('coco_%d_ann_%d'
'_masked.jpg' % (gt_data['image_id'], pred_ann_id)))
misc.imsave(masked_img_path, masked_img)
if not gt_data['image_id'] in image_ids_saved:
image_ids_saved.add(gt_data['image_id'])
img_path = os.path.join(local_image_dir, 'coco_%d.jpg' % gt_data['image_id'])
misc.imsave(img_path, img)
print ('Images generated in local hard disk, please make sure to make them '
'publicly available online.')
def _read_image(self, coco_image_dir, gt_data):
"""Private function to read an original coco image."""
img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]
return misc.imread(os.path.join(coco_image_dir, img_coco['file_name']))
def _load_mask(self, gt_data):
"""Private function to load the mask of a coco object."""
img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]
mask = Image.new('L', (img_coco['width'], img_coco['height']), 0)
for seg in gt_data['segmentation']:
ImageDraw.Draw(mask).polygon(seg, outline='white', fill='white')
return numpy.asarray(mask)
|
jaclearn/nlp/tree/node.py | dapatil211/Jacinle | 114 | 11120518 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : node.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 07/04/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
"""
The definition for tree Nodes.
"""
from copy import deepcopy
__all__ = ['Node']
class Node(object):
def __init__(self, vtype, etype=None):
self.vtype = vtype
self.etype = etype
self.children = []
self.father = None
self.sibling_ind = -1
@property
def nr_children(self):
return len(self.children)
@property
def size(self):
return 1 + sum(c.size for c in self.children)
@property
def nr_leaves(self):
if self.is_leaf:
return 1
return sum(c.nr_leaves for c in self.children)
@property
def is_leaf(self):
return len(self.children) == 0
@property
def lson(self):
assert len(self.children) == 2
return self.childre[0]
@property
def rson(self):
assert len(self.children) == 2
return self.children[1]
@property
def depth(self):
"""
Depth is defined as the number of nodes on the maximum distance with the root of nodes + 1.
(Thus a single nodes will have depth 1.)
"""
if self.is_leaf:
return 1
return max([c.depth for c in self.children]) + 1
@property
def breadth(self):
"""
Breadth is defined as the maximum number of children of nodes in the tree.
"""
if self.is_leaf:
return 1
return max(max([c.breath for c in self.children]), len(self.children))
def clone(self):
return deepcopy(self)
def insert_child(self, pos, node):
node.father = self
self.children.insert(pos, node)
self._refresh_sibling_inds()
return node
def remove_child(self, node):
assert self.children[node.sibling_ind] == node
self.children.remove(node)
self._refresh_sibling_inds()
rv = node.father, node.sibling_ind
node.father = None
node.sibling_ind = -1
return rv
def append_child(self, node):
node.father = self
node.sibling_ind = len(self.children)
self.children.append(node)
return node
def attach(self, father, sibling_ind=-1):
"""
Attach to a new father.
"""
if sibling_ind == -1:
return father.append_child(self)
return father.insert_child(sibling_ind, self)
def detach(self):
"""
Detach from the father.
"""
return self.father.remove_child(self)
def _refresh_sibling_inds(self):
for i, c in enumerate(self.children):
c.sibling_ind = i
def __str_node__(self):
if self.etype is not None:
return 'VType: {} EType: {}'.format(self.vtype, self.etype)
return 'VType: {}'.format(self.vtype)
def __str__(self):
results = [self.__str_node__()]
for c in self.children:
lines = str(c).split('\n')
results.extend([' ' + l for l in lines])
return '\n'.join(results)
def __repr__(self):
return str(self)
|
data_gen/singing/binarize.py | dtx525942103/DiffSinger | 288 | 11120532 | import os
import random
from copy import deepcopy
import pandas as pd
import logging
from tqdm import tqdm
import json
import glob
import re
from resemblyzer import VoiceEncoder
import traceback
import numpy as np
import pretty_midi
import librosa
from scipy.interpolate import interp1d
import torch
from textgrid import TextGrid
from utils.hparams import hparams
from data_gen.tts.data_gen_utils import build_phone_encoder, get_pitch
from utils.pitch_utils import f0_to_coarse
from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError
from data_gen.tts.binarizer_zh import ZhBinarizer
from data_gen.tts.txt_processors.zh_g2pM import ALL_YUNMU
from vocoders.base_vocoder import VOCODERS
class SingingBinarizer(BaseBinarizer):
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams['processed_data_dir']
self.processed_data_dirs = processed_data_dir.split(",")
self.binarization_args = hparams['binarization_args']
self.pre_align_args = hparams['pre_align_args']
self.item2txt = {}
self.item2ph = {}
self.item2wavfn = {}
self.item2f0fn = {}
self.item2tgfn = {}
self.item2spk = {}
def split_train_test_set(self, item_names):
item_names = deepcopy(item_names)
test_item_names = [x for x in item_names if any([ts in x for ts in hparams['test_prefixes']])]
train_item_names = [x for x in item_names if x not in set(test_item_names)]
logging.info("train {}".format(len(train_item_names)))
logging.info("test {}".format(len(test_item_names)))
return train_item_names, test_item_names
def load_meta_data(self):
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
wav_suffix = '_wf0.wav'
txt_suffix = '.txt'
ph_suffix = '_ph.txt'
tg_suffix = '.TextGrid'
all_wav_pieces = glob.glob(f'{processed_data_dir}/*/*{wav_suffix}')
for piece_path in all_wav_pieces:
item_name = raw_item_name = piece_path[len(processed_data_dir)+1:].replace('/', '-')[:-len(wav_suffix)]
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2txt[item_name] = open(f'{piece_path.replace(wav_suffix, txt_suffix)}').readline()
self.item2ph[item_name] = open(f'{piece_path.replace(wav_suffix, ph_suffix)}').readline()
self.item2wavfn[item_name] = piece_path
self.item2spk[item_name] = re.split('-|#', piece_path.split('/')[-2])[0]
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
self.item2tgfn[item_name] = piece_path.replace(wav_suffix, tg_suffix)
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@property
def train_item_names(self):
return self._train_item_names
@property
def valid_item_names(self):
return self._test_item_names
@property
def test_item_names(self):
return self._test_item_names
def process(self):
self.load_meta_data()
os.makedirs(hparams['binary_data_dir'], exist_ok=True)
self.spk_map = self.build_spk_map()
print("| spk_map: ", self.spk_map)
spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
json.dump(self.spk_map, open(spk_map_fn, 'w'))
self.phone_encoder = self._phone_encoder()
self.process_data('valid')
self.process_data('test')
self.process_data('train')
def _phone_encoder(self):
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
ph_set = []
if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn):
for ph_sent in self.item2ph.values():
ph_set += ph_sent.split(' ')
ph_set = sorted(set(ph_set))
json.dump(ph_set, open(ph_set_fn, 'w'))
print("| Build phone set: ", ph_set)
else:
ph_set = json.load(open(ph_set_fn, 'r'))
print("| Load phone set: ", ph_set)
return build_phone_encoder(hparams['binary_data_dir'])
# @staticmethod
# def get_pitch(wav_fn, spec, res):
# wav_suffix = '_wf0.wav'
# f0_suffix = '_f0.npy'
# f0fn = wav_fn.replace(wav_suffix, f0_suffix)
# pitch_info = np.load(f0fn)
# f0 = [x[1] for x in pitch_info]
# spec_x_coor = np.arange(0, 1, 1 / len(spec))[:len(spec)]
# f0_x_coor = np.arange(0, 1, 1 / len(f0))[:len(f0)]
# f0 = interp1d(f0_x_coor, f0, 'nearest', fill_value='extrapolate')(spec_x_coor)[:len(spec)]
# # f0_x_coor = np.arange(0, 1, 1 / len(f0))
# # f0_x_coor[-1] = 1
# # f0 = interp1d(f0_x_coor, f0, 'nearest')(spec_x_coor)[:len(spec)]
# if sum(f0) == 0:
# raise BinarizationError("Empty f0")
# assert len(f0) == len(spec), (len(f0), len(spec))
# pitch_coarse = f0_to_coarse(f0)
#
# # vis f0
# # import matplotlib.pyplot as plt
# # from textgrid import TextGrid
# # tg_fn = wav_fn.replace(wav_suffix, '.TextGrid')
# # fig = plt.figure(figsize=(12, 6))
# # plt.pcolor(spec.T, vmin=-5, vmax=0)
# # ax = plt.gca()
# # ax2 = ax.twinx()
# # ax2.plot(f0, color='red')
# # ax2.set_ylim(0, 800)
# # itvs = TextGrid.fromFile(tg_fn)[0]
# # for itv in itvs:
# # x = itv.maxTime * hparams['audio_sample_rate'] / hparams['hop_size']
# # plt.vlines(x=x, ymin=0, ymax=80, color='black')
# # plt.text(x=x, y=20, s=itv.mark, color='black')
# # plt.savefig('tmp/20211229_singing_plots_test.png')
#
# res['f0'] = f0
# res['pitch'] = pitch_coarse
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
# cls.get_pitch(wav_fn, mel, res)
cls.get_pitch(wav, mel, res)
if binarization_args['with_txt']:
try:
# print(ph)
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(tg_fn, ph, mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
class MidiSingingBinarizer(SingingBinarizer):
item2midi = {}
item2midi_dur = {}
item2is_slur = {}
item2ph_durs = {}
item2wdb = {}
def load_meta_data(self):
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
meta_midi = json.load(open(os.path.join(processed_data_dir, 'meta.json'))) # [list of dict]
for song_item in meta_midi:
item_name = raw_item_name = song_item['item_name']
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2wavfn[item_name] = song_item['wav_fn']
self.item2txt[item_name] = song_item['txt']
self.item2ph[item_name] = ' '.join(song_item['phs'])
self.item2wdb[item_name] = [1 if x in ALL_YUNMU + ['AP', 'SP', '<SIL>'] else 0 for x in song_item['phs']]
self.item2ph_durs[item_name] = song_item['ph_dur']
self.item2midi[item_name] = song_item['notes']
self.item2midi_dur[item_name] = song_item['notes_dur']
self.item2is_slur[item_name] = song_item['is_slur']
self.item2spk[item_name] = 'pop-cs'
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@staticmethod
def get_pitch(wav_fn, wav, spec, ph, res):
wav_suffix = '.wav'
# midi_suffix = '.mid'
wav_dir = 'wavs'
f0_dir = 'f0'
item_name = '/'.join(os.path.splitext(wav_fn)[0].split('/')[-2:]).replace('_wf0', '')
res['pitch_midi'] = np.asarray(MidiSingingBinarizer.item2midi[item_name])
res['midi_dur'] = np.asarray(MidiSingingBinarizer.item2midi_dur[item_name])
res['is_slur'] = np.asarray(MidiSingingBinarizer.item2is_slur[item_name])
res['word_boundary'] = np.asarray(MidiSingingBinarizer.item2wdb[item_name])
assert res['pitch_midi'].shape == res['midi_dur'].shape == res['is_slur'].shape, (
res['pitch_midi'].shape, res['midi_dur'].shape, res['is_slur'].shape)
# gt f0.
gt_f0, gt_pitch_coarse = get_pitch(wav, spec, hparams)
if sum(gt_f0) == 0:
raise BinarizationError("Empty **gt** f0")
res['f0'] = gt_f0
res['pitch'] = gt_pitch_coarse
@staticmethod
def get_align(ph_durs, mel, phone_encoded, res, hop_size=hparams['hop_size'], audio_sample_rate=hparams['audio_sample_rate']):
mel2ph = np.zeros([mel.shape[0]], int)
startTime = 0
for i_ph in range(len(ph_durs)):
start_frame = int(startTime * audio_sample_rate / hop_size + 0.5)
end_frame = int((startTime + ph_durs[i_ph]) * audio_sample_rate / hop_size + 0.5)
mel2ph[start_frame:end_frame] = i_ph + 1
startTime = startTime + ph_durs[i_ph]
# print('ph durs: ', ph_durs)
# print('mel2ph: ', mel2ph, len(mel2ph))
res['mel2ph'] = mel2ph
# res['dur'] = None
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
cls.get_pitch(wav_fn, wav, mel, ph, res)
if binarization_args['with_txt']:
try:
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(MidiSingingBinarizer.item2ph_durs[item_name], mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
class ZhSingingBinarizer(ZhBinarizer, SingingBinarizer):
pass
class OpencpopBinarizer(MidiSingingBinarizer):
item2midi = {}
item2midi_dur = {}
item2is_slur = {}
item2ph_durs = {}
item2wdb = {}
def split_train_test_set(self, item_names):
item_names = deepcopy(item_names)
test_item_names = [x for x in item_names if any([x.startswith(ts) for ts in hparams['test_prefixes']])]
train_item_names = [x for x in item_names if x not in set(test_item_names)]
logging.info("train {}".format(len(train_item_names)))
logging.info("test {}".format(len(test_item_names)))
return train_item_names, test_item_names
def load_meta_data(self):
raw_data_dir = hparams['raw_data_dir']
# meta_midi = json.load(open(os.path.join(raw_data_dir, 'meta.json'))) # [list of dict]
utterance_labels = open(os.path.join(raw_data_dir, 'transcriptions.txt')).readlines()
for utterance_label in utterance_labels:
song_info = utterance_label.split('|')
item_name = raw_item_name = song_info[0]
self.item2wavfn[item_name] = f'{raw_data_dir}/wavs/{item_name}.wav'
self.item2txt[item_name] = song_info[1]
self.item2ph[item_name] = song_info[2]
# self.item2wdb[item_name] = list(np.nonzero([1 if x in ALL_YUNMU + ['AP', 'SP'] else 0 for x in song_info[2].split()])[0])
self.item2wdb[item_name] = [1 if x in ALL_YUNMU + ['AP', 'SP'] else 0 for x in song_info[2].split()]
self.item2ph_durs[item_name] = [float(x) for x in song_info[5].split(" ")]
self.item2midi[item_name] = [librosa.note_to_midi(x.split("/")[0]) if x != 'rest' else 0
for x in song_info[3].split(" ")]
self.item2midi_dur[item_name] = [float(x) for x in song_info[4].split(" ")]
self.item2is_slur[item_name] = [int(x) for x in song_info[6].split(" ")]
self.item2spk[item_name] = 'opencpop'
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@staticmethod
def get_pitch(wav_fn, wav, spec, ph, res):
wav_suffix = '.wav'
# midi_suffix = '.mid'
wav_dir = 'wavs'
f0_dir = 'text_f0_align'
item_name = os.path.splitext(os.path.basename(wav_fn))[0]
res['pitch_midi'] = np.asarray(OpencpopBinarizer.item2midi[item_name])
res['midi_dur'] = np.asarray(OpencpopBinarizer.item2midi_dur[item_name])
res['is_slur'] = np.asarray(OpencpopBinarizer.item2is_slur[item_name])
res['word_boundary'] = np.asarray(OpencpopBinarizer.item2wdb[item_name])
assert res['pitch_midi'].shape == res['midi_dur'].shape == res['is_slur'].shape, (res['pitch_midi'].shape, res['midi_dur'].shape, res['is_slur'].shape)
# gt f0.
# f0 = None
# f0_suffix = '_f0.npy'
# f0fn = wav_fn.replace(wav_suffix, f0_suffix).replace(wav_dir, f0_dir)
# pitch_info = np.load(f0fn)
# f0 = [x[1] for x in pitch_info]
# spec_x_coor = np.arange(0, 1, 1 / len(spec))[:len(spec)]
#
# f0_x_coor = np.arange(0, 1, 1 / len(f0))[:len(f0)]
# f0 = interp1d(f0_x_coor, f0, 'nearest', fill_value='extrapolate')(spec_x_coor)[:len(spec)]
# if sum(f0) == 0:
# raise BinarizationError("Empty **gt** f0")
#
# pitch_coarse = f0_to_coarse(f0)
# res['f0'] = f0
# res['pitch'] = pitch_coarse
# gt f0.
gt_f0, gt_pitch_coarse = get_pitch(wav, spec, hparams)
if sum(gt_f0) == 0:
raise BinarizationError("Empty **gt** f0")
res['f0'] = gt_f0
res['pitch'] = gt_pitch_coarse
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
cls.get_pitch(wav_fn, wav, mel, ph, res)
if binarization_args['with_txt']:
try:
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(OpencpopBinarizer.item2ph_durs[item_name], mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
if __name__ == "__main__":
SingingBinarizer().process()
|
test-framework/test-suites/unit/tests/command/stack/commands/list/firmware/test_command_stack_commands_list_firmware_plugin_basic.py | kmcm0/stacki | 123 | 11120544 | from unittest.mock import create_autospec, ANY
import pytest
from stack.commands import DatabaseConnection
from stack.commands.list.firmware import Command
from stack.commands.list.firmware.plugin_basic import Plugin
class TestListFirmwareBasicPlugin:
"""A test case for the list firmware imp basic plugin."""
@pytest.fixture
def basic_plugin(self):
"""A fixture that returns the plugin instance for use in tests.
This sets up the required mocks needed to construct the plugin class.
"""
mock_command = create_autospec(
spec = Command,
instance = True,
)
mock_command.db = create_autospec(
spec = DatabaseConnection,
spec_set = True,
instance = True,
)
return Plugin(command = mock_command)
def test_provides(self, basic_plugin):
"""Ensure that provides returns 'basic'."""
assert basic_plugin.provides() == "basic"
def test_run(self, basic_plugin):
"""Test that run queries the DB as expected."""
basic_plugin.owner.db.select.return_value = [
["foo", "bar", "baz", "fizz", "buzz", "bam!"],
["this", "is", "a", "test", "return", "value"],
]
expected_results = {
"keys": ["make", "model", "version", "source", "hash", "hash_alg"],
"values": [(row[0], row[1:]) for row in basic_plugin.owner.db.select.return_value],
}
assert expected_results == basic_plugin.run(args = True)
basic_plugin.owner.db.select.assert_called_once_with(ANY)
def test_run_expanded_false(self, basic_plugin):
"""Test that run queries the DB as expected when expanded is False."""
basic_plugin.owner.db.select.return_value = [
["foo", "bar", "baz"],
["fizz", "buzz", "bam!"],
]
expected_results = {
"keys": ["make", "model", "version"],
"values": [(row[0], row[1:]) for row in basic_plugin.owner.db.select.return_value],
}
assert expected_results == basic_plugin.run(args = False)
basic_plugin.owner.db.select.assert_called_once_with(ANY)
|
backend/src/data/github/graphql/user/contribs/models.py | rutvikpadhiyar000/github-trends | 157 | 11120567 | from datetime import date, datetime
from typing import List, Optional
from pydantic import BaseModel, Field
class RawCalendarDay(BaseModel):
date: date
weekday: int
count: int = Field(alias="contributionCount")
class RawCalendarWeek(BaseModel):
contribution_days: List[RawCalendarDay] = Field(alias="contributionDays")
class RawCalendar(BaseModel):
total_contributions: int = Field(alias="totalContributions")
weeks: List[RawCalendarWeek]
class RawEventsRepoName(BaseModel):
name: str = Field(alias="nameWithOwner")
class RawEventsCount(BaseModel):
count: int = Field(alias="totalCount")
class RawEventsCommit(BaseModel):
count: int = Field(alias="commitCount")
occurred_at: datetime = Field(alias="occurredAt")
class RawEventsEvent(BaseModel):
occurred_at: datetime = Field(alias="occurredAt")
class RawEventsPageInfo(BaseModel):
has_next_page: bool = Field(alias="hasNextPage")
end_cursor: Optional[str] = Field(alias="endCursor")
class Config:
allow_none = True
class RawEventsCommits(BaseModel):
nodes: List[RawEventsCommit]
page_info: RawEventsPageInfo = Field(alias="pageInfo")
class RawEventsContribs(BaseModel):
nodes: List[RawEventsEvent]
page_info: RawEventsPageInfo = Field(alias="pageInfo")
class RawEventsRepoCommits(BaseModel):
repo: RawEventsRepoName = Field(alias="repository")
count: RawEventsCount = Field(alias="totalCount")
contribs: RawEventsCommits = Field(alias="contributions")
class RawEventsRepo(BaseModel):
repo: RawEventsRepoName = Field(alias="repository")
count: RawEventsCount = Field(alias="totalCount")
contribs: RawEventsContribs = Field(alias="contributions")
class RawEventsRepoEvent(BaseModel):
repo: RawEventsRepoName = Field(alias="repository")
occurred_at: datetime = Field(alias="occurredAt")
class RawEventsRepoContribs(BaseModel):
count: int = Field(alias="totalCount")
nodes: List[RawEventsRepoEvent]
class RawEvents(BaseModel):
commit_contribs_by_repo: List[RawEventsRepoCommits] = Field(
alias="commitContributionsByRepository"
)
issue_contribs_by_repo: List[RawEventsRepo] = Field(
alias="issueContributionsByRepository"
)
pr_contribs_by_repo: List[RawEventsRepo] = Field(
alias="pullRequestContributionsByRepository"
)
review_contribs_by_repo: List[RawEventsRepo] = Field(
alias="pullRequestReviewContributionsByRepository"
)
repo_contribs: RawEventsRepoContribs = Field(alias="repositoryContributions")
|
pyjs/lib/errno.py | takipsizad/pyjs | 739 | 11120577 | EPERM = 1
ENOENT = 2
ESRCH = 3
EINTR = 4
EIO = 5
ENXIO = 6
E2BIG = 7
ENOEXEC = 8
EBADF = 9
ECHILD = 10
EAGAIN = 11
ENOMEM = 12
EACCES = 13
EFAULT = 14
ENOTBLK = 15
EBUSY = 16
EEXIST = 17
EXDEV = 18
ENODEV = 19
ENOTDIR = 20
EISDIR = 21
EINVAL = 22
ENFILE = 23
EMFILE = 24
ENOTTY = 25
ETXTBSY = 26
EFBIG = 27
ENOSPC = 28
ESPIPE = 29
EROFS = 30
EMLINK = 31
EPIPE = 32
EDOM = 33
ERANGE = 34
EDEADLOCK = 35
ENAMETOOLONG = 36
ENOLCK = 37
ENOSYS = 38
ENOTEMPTY = 39
ELOOP = 40
ENOMSG = 42
EIDRM = 43
ECHRNG = 44
EL2NSYNC = 45
EL3HLT = 46
EL3RST = 47
ELNRNG = 48
EUNATCH = 49
ENOCSI = 50
EL2HLT = 51
EBADE = 52
EBADR = 53
EXFULL = 54
ENOANO = 55
EBADRQC = 56
EBADSLT = 57
EBFONT = 59
ENOSTR = 60
ENODATA = 61
ETIME = 62
ENOSR = 63
ENONET = 64
ENOPKG = 65
EREMOTE = 66
ENOLINK = 67
EADV = 68
ESRMNT = 69
ECOMM = 70
EPROTO = 71
EMULTIHOP = 72
EDOTDOT = 73
EBADMSG = 74
EOVERFLOW = 75
ENOTUNIQ = 76
EBADFD = 77
EREMCHG = 78
ELIBACC = 79
ELIBBAD = 80
ELIBSCN = 81
ELIBMAX = 82
ELIBEXEC = 83
EILSEQ = 84
ERESTART = 85
ESTRPIPE = 86
EUSERS = 87
ENOTSOCK = 88
EDESTADDRREQ = 89
EMSGSIZE = 90
EPROTOTYPE = 91
ENOPROTOOPT = 92
EPROTONOSUPPORT = 93
ESOCKTNOSUPPORT = 94
EOPNOTSUPP = 95
EPFNOSUPPORT = 96
EAFNOSUPPORT = 97
EADDRINUSE = 98
EADDRNOTAVAIL = 99
ENETDOWN = 100
ENETUNREACH = 101
ENETRESET = 102
ECONNABORTED = 103
ECONNRESET = 104
ENOBUFS = 105
EISCONN = 106
ENOTCONN = 107
ESHUTDOWN = 108
ETOOMANYREFS = 109
ETIMEDOUT = 110
ECONNREFUSED = 111
EHOSTDOWN = 112
EHOSTUNREACH = 113
EALREADY = 114
EINPROGRESS = 115
ESTALE = 116
EUCLEAN = 117
ENOTNAM = 118
ENAVAIL = 119
EISNAM = 120
EREMOTEIO = 121
EDQUOT = 122
errorcode = {}
for _name, _value in list(globals().items()):
if type(_value) is type(0):
errorcode[_value] = _name
|
nomad/api/acl.py | i4s-pserrano/python-nomad | 109 | 11120589 | <reponame>i4s-pserrano/python-nomad<gh_stars>100-1000
import nomad.api.exceptions
from nomad.api.base import Requester
class Acl(Requester):
"""
The endpoint manage security ACL and tokens
https://www.nomadproject.io/api/acl-tokens.html
"""
ENDPOINT = "acl"
def __init__(self, **kwargs):
super(Acl, self).__init__(**kwargs)
def __str__(self):
return "{0}".format(self.__dict__)
def __repr__(self):
return "{0}".format(self.__dict__)
def __getattr__(self, item):
raise AttributeError
def generate_bootstrap(self):
""" Activate bootstrap token.
https://www.nomadproject.io/api/acl-tokens.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("bootstrap", method="post").json()
def get_tokens(self):
""" Get a list of tokens.
https://www.nomadproject.io/api/acl-tokens.html
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("tokens", method="get").json()
def get_token(self, id):
""" Retrieve specific token.
https://www.nomadproject.io/api/acl-tokens.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", id, method="get").json()
def get_self_token(self):
""" Retrieve self token used for auth.
https://www.nomadproject.io/api/acl-tokens.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", "self", method="get").json()
def create_token(self, token):
""" Create token.
https://www.nomadproject.io/api/acl-tokens.html
arguments:
token
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", json=token, method="post").json()
def delete_token(self, id):
""" Delete specific token.
https://www.nomadproject.io/api/acl-tokens.html
returns: Boolean
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", id, method="delete").ok
def update_token(self, id, token):
""" Update token.
https://www.nomadproject.io/api/acl-tokens.html
arguments:
- AccdesorID
- token
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", id, json=token, method="post").json()
def get_policies(self):
""" Get a list of policies.
https://www.nomadproject.io/api/acl-policies.html
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policies", method="get").json()
def create_policy(self, id, policy):
""" Create policy.
https://www.nomadproject.io/api/acl-policies.html
arguments:
- policy
returns: request.Response
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policy", id, json=policy, method="post")
def get_policy(self, id):
""" Get a spacific.
https://www.nomadproject.io/api/acl-policies.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policy", id, method="get").json()
def update_policy(self, id, policy):
""" Create policy.
https://www.nomadproject.io/api/acl-policies.html
arguments:
- name
- policy
returns: request.Response
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policy", id, json=policy, method="post")
def delete_policy(self, id):
""" Delete specific policy.
https://www.nomadproject.io/api/acl-policies.html
arguments:
- id
returns: Boolean
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policy", id, method="delete").ok
|
applications/SwimmingDEMApplication/python_scripts/cellular_flow/altair_cube_mesher.py | lkusch/Kratos | 778 | 11120667 | <filename>applications/SwimmingDEMApplication/python_scripts/cellular_flow/altair_cube_mesher.py
import cube_mesher
BaseClass = cube_mesher.box_data
class altair_box_data(BaseClass):
def __init__(self, xmin, ymin, zmin, xmax, ymax, zmax, nx, ny, nz):
BaseClass.__init__(self, xmin, ymin, zmin, xmax, ymax, zmax, nx, ny, nz)
def cube_vertices(self, ix, iy, iz):
""" Identify 8 contiguous nodes forming a cube.
Note: Even and odd levels are rotated 90 degrees along Z
to ensure that they are conformant.
"""
# node0 = (n+1)*(n+1)*k+(n+1)*j+i+1
# node1 = (n+1)*(n+1)*(k+1)+(n+1)*j+i+1
# node2 = (n+1)*(n+1)*(k+1)+(n+1)*j+i+2
# node3 = (n+1)*(n+1)*k+(n+1)*j+i+2
# node4 = (n+1)*(n+1)*k+(n+1)*(j+1)+i+1
# node5 = (n+1)*(n+1)*(k+1)+(n+1)*(j+1)+i+1
# node6 = (n+1)*(n+1)*(k+1)+(n+1)*(j+1)+i+2
# node7 = (n+1)*(n+1)*k+(n+1)*(j+1)+i+2
n0 = self.get_id(ix, iy, iz)
n1 = self.get_id(ix, iy, iz + 1)
n2 = self.get_id(ix + 1, iy, iz + 1)
n3 = self.get_id(ix + 1, iy, iz)
n4 = self.get_id(ix, iy + 1, iz)
n5 = self.get_id(ix, iy + 1, iz + 1)
n6 = self.get_id(ix + 1, iy + 1, iz + 1)
n7 = self.get_id(ix + 1, iy + 1, iz)
return n0, n1, n2, n3, n4, n5, n6, n7
def generate_elements(mdpa, box, elemtype="FractionalStep3D", prop_id=0):
index = 1
nx = box.nx()
ny = box.ny()
nz = box.nz()
print("Generating tetrahedral {0} elements.".format(elemtype))
mdpa.write("Begin Elements {0}\n".format(elemtype))
for iz in range(nz):
for iy in range(ny):
for ix in range(nx):
n0, n1, n2, n3, n4, n5, n6, n7 = box.cube_vertices(ix, iy, iz)
# fill the cube with 6 tetrahedra
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index, prop_id, n3, n2, n6, n0))
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index + 1, prop_id, n0, n3, n7, n6))
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index + 2, prop_id, n1, n5, n6, n0))
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index + 3, prop_id, n0, n4, n5, n6))
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index + 4, prop_id, n0, n1, n2, n6))
mdpa.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n".format(index + 5, prop_id, n4, n7, n6, n0))
index += 6
mdpa.write("End Elements\n\n") |
bolt/construct.py | thunder-project/bolt | 178 | 11120680 | class ConstructBase(object):
@classmethod
def dispatch(cls, method, *args, **kwargs):
if method in cls.__dict__:
return cls.__dict__[method].__func__(*args, **kwargs)
else:
raise NotImplementedError("Method %s not implemented on %s" % (method, cls.__name__))
@staticmethod
def _argcheck(*args, **kwargs):
return False
|
srsly/tests/util.py | Hirni-Meshram2/srsly | 255 | 11120681 | import tempfile
from pathlib import Path
from contextlib import contextmanager
import shutil
@contextmanager
def make_tempdir(files={}, mode="w"):
temp_dir_str = tempfile.mkdtemp()
temp_dir = Path(temp_dir_str)
for name, content in files.items():
path = temp_dir / name
with path.open(mode) as file_:
file_.write(content)
yield temp_dir
shutil.rmtree(temp_dir_str)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.