hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7398e5561c13bdbde44990f510ac2fe3a8dfe0c5
| 5,175 |
py
|
Python
|
src/rbx2/rbx2_tasks/nodes/random_patrol_smach.py
|
fujy/ROS-Project
|
b5e3b43c5eb5d3c1d984648f0f61710eea3a1bb8
|
[
"MIT"
] | 9 |
2017-08-22T13:07:13.000Z
|
2021-07-13T10:02:29.000Z
|
src/rbx2/rbx2_tasks/nodes/random_patrol_smach.py
|
vcdanda/ROS-Turtlebot2
|
f24356486f55933d52e25a81715f1571aace0224
|
[
"MIT"
] | 1 |
2017-12-14T06:46:58.000Z
|
2017-12-14T06:46:58.000Z
|
src/rbx2/rbx2_tasks/nodes/random_patrol_smach.py
|
vcdanda/ROS-Turtlebot2
|
f24356486f55933d52e25a81715f1571aace0224
|
[
"MIT"
] | 5 |
2018-02-07T14:09:28.000Z
|
2021-01-08T20:41:52.000Z
|
#!/usr/bin/env python
""" random_patrol_smach.py - Version 1.0 2013-04-12
Control a robot to patrol four waypoints chosen at random
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2014 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
from smach import State, StateMachine
from smach_ros import SimpleActionState, IntrospectionServer
from actionlib import GoalStatus
from geometry_msgs.msg import Twist
from rbx2_tasks.task_setup import *
from random import randrange
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseActionFeedback
class PickWaypoint(State):
def __init__(self):
State.__init__(self, outcomes=['succeeded'], input_keys=['waypoints'], output_keys=['waypoint_out'])
def execute(self, userdata):
waypoint_out = randrange(len(userdata.waypoints))
userdata.waypoint_out = waypoint_out
rospy.loginfo("Going to waypoint " + str(waypoint_out))
return 'succeeded'
class Nav2Waypoint(State):
def __init__(self):
State.__init__(self, outcomes=['succeeded','aborted','preempted'],
input_keys=['waypoints', 'waypoint_in'])
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# Wait up to 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move_base action server")
self.goal = MoveBaseGoal()
self.goal.target_pose.header.frame_id = 'map'
def execute(self, userdata):
self.goal.target_pose.pose = userdata.waypoints[userdata.waypoint_in]
# Send the goal pose to the MoveBaseAction server
self.move_base.send_goal(self.goal)
if self.preempt_requested():
self.service_preempt()
return 'preempted'
# Allow 1 minute to get there
finished_within_time = self.move_base.wait_for_result(rospy.Duration(60))
# If we don't get there in time, abort the goal
if not finished_within_time:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
return 'aborted'
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
return 'succeeded'
class RandomPatrol():
def __init__(self):
rospy.init_node('random_patrol', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
setup_task_environment(self)
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Set the userdata.waypoints variable to the pre-defined waypoints
self.sm_patrol.userdata.waypoints = self.waypoints
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('PICK_WAYPOINT', PickWaypoint(),
transitions={'succeeded':'NAV_WAYPOINT'},
remapping={'waypoint_out':'patrol_waypoint'})
StateMachine.add('NAV_WAYPOINT', Nav2Waypoint(),
transitions={'succeeded':'PICK_WAYPOINT',
'aborted':'PICK_WAYPOINT',
'preempted':'PICK_WAYPOINT'},
remapping={'waypoint_in':'patrol_waypoint'})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')
intro_server.start()
# Execute the state machine
sm_outcome = self.sm_patrol.execute()
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
RandomPatrol()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
| 36.964286 | 108 | 0.627826 |
eb1d6d7b51e344b93fdcff4c7a4911e62c00639a
| 7,721 |
py
|
Python
|
code/models/backbone/ResNet_Dilated.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | 1 |
2021-12-20T07:20:25.000Z
|
2021-12-20T07:20:25.000Z
|
code/models/backbone/ResNet_Dilated.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
code/models/backbone/ResNet_Dilated.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
# try:
# from encoding.nn import SyncBatchNorm
# _BATCH_NORM = SyncBatchNorm
# except:
# _BATCH_NORM = nn.BatchNorm2d
_BOTTLENECK_EXPANSION = 4
_BATCH_NORM = nn.BatchNorm2d
class _ConvBnReLU(nn.Sequential):
"""
Cascade of 2D convolution, batch norm, and ReLU.
"""
BATCH_NORM = _BATCH_NORM
def __init__(
self, in_ch, out_ch, kernel_size, stride, padding, dilation, relu=True
):
super(_ConvBnReLU, self).__init__()
self.add_module(
"conv",
nn.Conv2d(
in_ch, out_ch, kernel_size, stride, padding, dilation, bias=False
),
)
self.add_module("bn", _BATCH_NORM(out_ch, eps=1e-5, momentum=0.999))
if relu:
self.add_module("relu", nn.ReLU())
class _Bottleneck(nn.Module):
"""
Bottleneck block of MSRA ResNet.
"""
def __init__(self, in_ch, out_ch, stride, dilation, downsample):
super(_Bottleneck, self).__init__()
mid_ch = out_ch // _BOTTLENECK_EXPANSION
self.conv1 = nn.Conv2d(in_ch, mid_ch, 1, stride, 0, 1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_ch, eps=1e-5, momentum=0.999)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(mid_ch, mid_ch, 3, 1, dilation, dilation, bias=False)
self.bn2 = nn.BatchNorm2d(mid_ch, eps=1e-5, momentum=0.999)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(mid_ch, out_ch, 1, 1, 0, 1, bias=False)
self.bn3 = nn.BatchNorm2d(out_ch, eps=1e-5, momentum=0.999)
self.relu3 = nn.ReLU()
if downsample:
self.downsample = nn.Sequential()
self.downsample.add_module("0", nn.Conv2d(in_ch, out_ch, 1, stride, 0, 1, bias=False))
self.downsample.add_module("1", nn.BatchNorm2d(out_ch, eps=1e-5, momentum=0.999))
self.downsample.add_module("2", nn.ReLU())
else:
self.downsample = lambda x: x
# self.reduce = _ConvBnReLU(in_ch, mid_ch, 1, stride, 0, 1, True)
# self.conv3x3 = _ConvBnReLU(mid_ch, mid_ch, 3, 1, dilation, dilation, True)
# self.increase = _ConvBnReLU(mid_ch, out_ch, 1, 1, 0, 1, False)
# self.shortcut = (
# _ConvBnReLU(in_ch, out_ch, 1, stride, 0, 1, False)
# if downsample
# else lambda x: x # identity
# )
def forward(self, x):
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.conv2(h)
h = self.bn2(h)
h = self.relu2(h)
h = self.conv3(h)
h = self.bn3(h)
h = self.relu3(h)
h += self.downsample(x)
return F.relu(h)
class _ResLayer(nn.Sequential):
"""
Residual layer with multi grids
"""
def __init__(self, n_layers, in_ch, out_ch, stride, dilation, multi_grids=None):
super(_ResLayer, self).__init__()
if multi_grids is None:
multi_grids = [1 for _ in range(n_layers)]
else:
assert n_layers == len(multi_grids)
# Downsampling is only in the first block
for i in range(n_layers):
self.add_module(
"{}".format(i),
# "block{}".format(i + 1),
_Bottleneck(
in_ch=(in_ch if i == 0 else out_ch),
out_ch=out_ch,
stride=(stride if i == 0 else 1),
dilation=dilation * multi_grids[i],
downsample=(True if i == 0 else False),
),
)
class _Stem(nn.Sequential):
"""
The 1st conv layer.
Note that the max pooling is different from both MSRA and FAIR ResNet.
"""
def __init__(self, in_ch, out_ch):
super(_Stem, self).__init__()
self.add_module("conv1", _ConvBnReLU(in_ch, out_ch, 7, 2, 3, 1))
self.add_module("pool", nn.MaxPool2d(3, 2, 1, ceil_mode=True))
class ResNet_Dilated(nn.Module):
def __init__(self, in_ch, multi_grids=[1, 2, 4], output_stride=16, backbone='resnet101'):
super(ResNet_Dilated, self).__init__()
# Stride and dilation
if output_stride == 8:
s = [1, 2, 1, 1]
d = [1, 1, 2, 4]
elif output_stride == 16:
s = [1, 2, 2, 1]
d = [1, 1, 1, 2]
else:
raise ValueError("wrong value for output_stride, which should be 8 or 16")
ch = [64 * 2 ** p for p in range(6)]
self.conv1 = nn.Conv2d(in_ch, ch[0], 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(ch[0], eps=1e-5, momentum=0.999)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(3, 2, 1)
if backbone == 'resnet50':
n_blocks=[3, 4, 6, 3]
self.layer1 = _ResLayer(n_blocks[0], ch[0], ch[2], s[0], d[0])
self.layer2 = _ResLayer(n_blocks[1], ch[2], ch[3], s[1], d[1])
self.layer3 = _ResLayer(n_blocks[2], ch[3], ch[4], s[2], d[2])
self.layer4 = _ResLayer(n_blocks[3], ch[4], ch[5], s[3], d[3], multi_grids)
self.filters = [256, 512, 1024, 2048]
elif backbone == 'resnet101':
n_blocks=[3, 4, 23, 3]
self.layer1 = _ResLayer(n_blocks[0], ch[0], ch[2], s[0], d[0])
self.layer2 = _ResLayer(n_blocks[1], ch[2], ch[3], s[1], d[1])
self.layer3 = _ResLayer(n_blocks[2], ch[3], ch[4], s[2], d[2])
self.layer4 = _ResLayer(n_blocks[3], ch[4], ch[5], s[3], d[3], multi_grids)
self.filters = [256, 512, 1024, 2048]
else:
raise ValueError("Invalid indicator for backbone, which should be selected from {'resnet50', 'resnet101'}")
def init_weights(self, pretrained=''):
for _, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location={'cuda': 'cpu'})
model_dict = self.state_dict()
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
elif pretrained:
raise RuntimeError('No such file {}'.format(pretrained))
def get_filters(self):
return self.filters
def forward(self, x):
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.maxpool1(h)
outs = []
h = self.layer1(h)
outs.append(h)
h = self.layer2(h)
outs.append(h)
h = self.layer3(h)
outs.append(h)
h = self.layer4(h)
outs.append(h)
return outs
if __name__ == "__main__":
input = torch.autograd.Variable(torch.randn(1, 3, 512, 512))
net = ResNet_Dilated(3)
net.init_weights('/home/lufangxiao/GDANet/models/backbone/pretrained/resnet101-5d3b4d8f.pth')
# pretrained_dict = torch.load('/home/lufangxiao/GDANet/models/backbone/pretrained/resnet101-5d3b4d8f.pth')
# for k, v in net.state_dict().items():
# for k, v in pretrained_dict.items():
# print(k)
print(net(input)[0].size())
print(net(input)[1].size())
print(net(input)[2].size())
print(net(input)[3].size())
| 35.417431 | 119 | 0.563787 |
578c96719f4f9d1b222679692854da507ca2dace
| 1,278 |
py
|
Python
|
toontown/classicchars/DistributedSuperGoofy.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99 |
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/classicchars/DistributedSuperGoofy.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42 |
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/classicchars/DistributedSuperGoofy.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57 |
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from pandac.PandaModules import *
from . import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.classicchars import DistributedGoofySpeedway
from . import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from . import DistributedCCharBase
class DistributedSuperGoofy(DistributedGoofySpeedway.DistributedGoofySpeedway):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuperGoofy')
def __init__(self, cr):
try:
self.DistributedGoofySpeedway_initialized
except:
self.DistributedGoofySpeedway_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.SuperGoofy, 'sg')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.nametag.setName(TTLocalizer.Goofy)
def walkSpeed(self):
return ToontownGlobals.SuperGoofySpeed
| 47.333333 | 274 | 0.758216 |
e28408584caa42c156cf02d360f13a7a7a8588f1
| 1,642 |
py
|
Python
|
tests/integration/test_settings.py
|
izi-core/izi-core
|
21176be2d41f0cf54ca954f294209c585f643dba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_settings.py
|
izi-core/izi-core
|
21176be2d41f0cf54ca954f294209c585f643dba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_settings.py
|
izi-core/izi-core
|
21176be2d41f0cf54ca954f294209c585f643dba
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from django.template import loader, TemplateDoesNotExist
import izi
class TestIZICoreAppsList(TestCase):
def test_includes_izi_itself(self):
core_apps = izi.IZI_CORE_APPS
self.assertTrue('izi' in core_apps)
def test_can_be_retrieved_through_fn(self):
core_apps = izi.get_core_apps()
self.assertTrue('izi' in core_apps)
def test_can_be_retrieved_with_overrides(self):
apps = izi.get_core_apps(overrides=['apps.shipping'])
self.assertTrue('apps.shipping' in apps)
self.assertTrue('izi.apps.shipping' not in apps)
def test_raises_exception_for_string_arg(self):
with self.assertRaises(ValueError):
izi.get_core_apps('forks.catalogue')
class TestIZITemplateSettings(TestCase):
"""
IZI's IZI_MAIN_TEMPLATE_DIR setting
"""
def test_allows_a_template_to_be_accessed_via_two_paths(self):
paths = ['base.html', 'izi/base.html']
for path in paths:
try:
loader.get_template(path)
except TemplateDoesNotExist:
self.fail("Template %s should exist" % path)
def test_allows_a_template_to_be_customized(self):
path = 'base.html'
template = loader.get_template(path)
rendered_template = template.render({})
self.assertIn('IZI Test Shop', rendered_template)
def test_default_izi_templates_are_accessible(self):
path = 'izi/base.html'
template = loader.get_template(path)
rendered_template = template.render({})
self.assertNotIn('IZI Test Shop', rendered_template)
| 32.84 | 66 | 0.68514 |
50b4e20184dc272167160d0754533c31bc405b5a
| 1,618 |
py
|
Python
|
tests/test_docs.py
|
anosillus/cookiecutter-poetry
|
11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413
|
[
"MIT"
] | 6 |
2021-01-07T15:39:49.000Z
|
2022-03-25T10:06:45.000Z
|
tests/test_docs.py
|
anosillus/cookiecutter-poetry
|
11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413
|
[
"MIT"
] | 16 |
2020-02-24T11:42:21.000Z
|
2021-08-31T14:22:21.000Z
|
tests/test_docs.py
|
anosillus/cookiecutter-poetry
|
11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413
|
[
"MIT"
] | 14 |
2020-05-17T15:59:01.000Z
|
2022-03-12T03:19:17.000Z
|
import json
import re
from pathlib import Path
from typing import List, Tuple
import pytest
# @pytest.mark.datafiles("cookiecutter.json", relative_to=ROOT_DIR)
from tests.utils import PROJECT_ROOT_DIR
def get_prompts_from_cookiecutter_json() -> List[str]:
with open(str((PROJECT_ROOT_DIR / "cookiecutter.json"))) as f:
keys = list(json.load(f).keys())
return keys
def get_prompts_from_doc() -> List[str]:
prompt_regex = re.compile(r"\n`[`a-z_]+`")
with open(str(PROJECT_ROOT_DIR / "docs" / "prompts.rst")) as f:
raw_prompts_from_doc = prompt_regex.findall("\n".join(f.readlines()))
return [p.replace("`", "").replace("\n", "") for p in raw_prompts_from_doc]
def get_indexes(*prompts: List[str]) -> List[int]:
length = max([len(p) for p in prompts])
return [i for i in range(length)]
def fill_prompts_list(prompts: List[str], length: int) -> None:
if len(prompts) < length:
prompts.extend(["None"] * (length - len(prompts)))
def get_prompts() -> List[Tuple[int, str, str]]:
from_json = get_prompts_from_cookiecutter_json()
from_doc = get_prompts_from_doc()
indexes = get_indexes(from_json, from_doc)
fill_prompts_list(from_json, len(indexes))
fill_prompts_list(from_doc, len(indexes))
return list(zip(indexes, from_json, from_doc))
@pytest.mark.parametrize("index,from_json,from_doc", get_prompts())
def test_cookiecutter_prompts_are_documented(
index: int, from_json: str, from_doc: str
) -> None:
assert (
from_json == from_doc
), f"Prompt {index} should be '{from_json}' but is '{from_doc}'"
| 30.528302 | 83 | 0.689122 |
f3dbb6bc040c7246d1b91b5b095ec17facf1fb22
| 10,103 |
py
|
Python
|
data/external/repositories/132822/kaggle-driver-telematics-master/weights.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/132822/kaggle-driver-telematics-master/weights.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/132822/kaggle-driver-telematics-master/weights.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1 |
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
import model_def
import model_run
### - not run on train
#-- - ran on train, small influence
#== - ran on train, not run on test because slow
MODELS = [
#(52, model_run.get_data_accel, model_def.Model_ABC1, 1),
#(53, model_run.get_data_accel, model_def.Model_ABC2, 1),
#(1, model_run.get_data_accel, model_def.Model_ETC, 1),
(2, model_run.get_data_accel, model_def.Model_GBC, 10),
(3, model_run.get_data_accel, model_def.Model_LR, 10),
(63, model_run.get_data_accel, model_def.Model_LR2, 10),
#--(4, model_run.get_data_accel, model_def.Model_RFC, 10),
(5, model_run.get_data_accel, model_def.Model_SVC, 10), #maybe add
# (59, model_run.get_data_accel, model_def.Model_GBC, 100),
(103, model_run.get_data_basic_big, model_def.Model_GBC, 10),
(101, model_run.get_data_basic_big, model_def.Model_GBC2, 10),
(104, model_run.get_data_basic_big, model_def.Model_GBC3, 10),
#(57, model_run.get_data_basic, model_def.Model_ABC1, 1),
#(6, model_run.get_data_basic, model_def.Model_ETC, 1),
(7, model_run.get_data_basic, model_def.Model_GBC, 10),
(8, model_run.get_data_basic, model_def.Model_LR, 10),
(64, model_run.get_data_basic, model_def.Model_LR2, 10),
#(9, model_run.get_data_basic, model_def.Model_RFC, 1),
#(10, model_run.get_data_basic, model_def.Model_SVC, 1), # useless # RuntimeWarning: overflow encountered in exp
#(11, model_run.get_data_basic_accel, model_def.Model_ETC, 1),
(12, model_run.get_data_basic_accel, model_def.Model_GBC, 10),
#--(13, model_run.get_data_basic_accel, model_def.Model_LR, 10), # maybe add # Warning: overflow encountered in exp
#(14, model_run.get_data_basic_accel, model_def.Model_RFC, 1),
#(15, model_run.get_data_basic_accel, model_def.Model_SVC, 1),
(91, model_run.get_data_basic_accel_v2, model_def.Model_GBC, 10),
#(61, model_run.get_data_accel_v2, model_def.Model_GBC, 10),
(72, model_run.get_data_accel_v2_svd, model_def.Model_GBC, 10),
#(58, model_run.get_data_basic_v2, model_def.Model_ABC1, 1),
(16, model_run.get_data_basic_v2, model_def.Model_GBC, 10),
#(17, model_run.get_data_basic_v2, model_def.Model_LR, 1),
#(65, model_run.get_data_basic_v2, model_def.Model_LR3, 10),
#(18, model_run.get_data_basic_v2, model_def.Model_RFC, 1),
(87, model_run.get_data_basic_v3, model_def.Model_GBC, 10),
#--? (88, model_run.get_data_basic_v3, model_def.Model_LR, 10),
(89, model_run.get_data_basic_v4, model_def.Model_GBC, 10),
(102, model_run.get_data_basic_v4, model_def.Model_GBC2, 10),
(105, model_run.get_data_basic_v4, model_def.Model_GBC3, 10),
#--? (90, model_run.get_data_basic_v4, model_def.Model_LR, 10),
(99, model_run.get_data_basic_v5, model_def.Model_GBC, 10),
(107, model_run.get_data_dist_acc, model_def.Model_LR, 4),
(108, model_run.get_data_dist_acc, model_def.Model_LR2, 4),
(109, model_run.get_data_dist_acc, model_def.Model_SVC, 4),
(76, model_run.get_data_fft, model_def.Model_ETC, 10),
(77, model_run.get_data_fft_v2, model_def.Model_LR3, 10),
(78, model_run.get_data_fft_v2, model_def.Model_SVC2, 10),
#(19, model_run.get_data_g_forces_v1, model_def.Model_LR, 1), # slow, maybe add
(94, model_run.get_data_g_forces_v1, model_def.Model_LR2, 1), # slow, maybe add
(95, model_run.get_data_g_forces_v1, model_def.Model_SVC, 1), # slow, maybe add
## (20, model_run.get_data_g_forces_v1, model_def.Model_SVC, 1), maybe add
# (21, model_run.get_data_g_forces_v2, model_def.Model_SVC, 4),
#(22, model_run.get_data_g_forces_v3, model_def.Model_RFC, 1),
(62, model_run.get_data_g_forces_v4, model_def.Model_SVC, 4),
(93, model_run.get_data_g_forces_v5, model_def.Model_LR, 10),
(96, model_run.get_data_g_forces_v6, model_def.Model_LR2, 1),
(97, model_run.get_data_g_forces_v6, model_def.Model_SVC, 1),
(98, model_run.get_data_g_forces_v7, model_def.Model_LR2, 4),
#(48, model_run.get_data_heading, model_def.Model_ETC, 1), # 15 minutes per user, maybe add
#(48, model_run.get_data_heading, model_def.Model_GBC, 1), # 8 minutes per user, nerulat pe train
(49, model_run.get_data_heading, model_def.Model_LR, 10),
# (50, model_run.get_data_heading, model_def.Model_RFC, 1),
(51, model_run.get_data_heading, model_def.Model_SVC, 10),
# (54, model_run.get_data_heading, model_def.Model_SVC2, 1), # slow, not add?
(79, model_run.get_data_heading_stops, model_def.Model_LR, 10),
#-- (85, model_run.get_data_heading_stops_v2, model_def.Model_LR, 10),
(55, model_run.get_data_heading_v2, model_def.Model_LR, 10),
(56, model_run.get_data_heading_v2, model_def.Model_SVC, 10),
(86, model_run.get_data_heading_v3, model_def.Model_LR, 10),
# ?? (74, model_run.get_data_movements_accel, model_def.Model_LR, 4),
# ?? (75, model_run.get_data_movements_accel, model_def.Model_LR2, 4),
(60, model_run.get_data_movements_accel, model_def.Model_SVC, 4),
#(73, model_run.get_data_movements_accel_svd, model_def.Model_GBC, 4),
(92, model_run.get_data_movements_accel_v2, model_def.Model_SVC, 4),
(66, model_run.get_data_movements_v1, model_def.Model_LR, 10),
(69, model_run.get_data_movements_v1, model_def.Model_LR2, 10),
#(23, model_run.get_data_movements_v1, model_def.Model_RFC, 1),
(24, model_run.get_data_movements_v1, model_def.Model_SVC, 10),
#(25, model_run.get_data_movements_v1_tf, model_def.Model_SVC, 1),
#(26, model_run.get_data_movements_v2, model_def.Model_LR, 1), # folds=10, not add
(67, model_run.get_data_movements_v2, model_def.Model_LR2, 10),
(27, model_run.get_data_movements_v2, model_def.Model_SVC, 10), # slow
#(28, model_run.get_data_movements_v2, model_def.Model_SVC2, 1), # folds=10, maybe add
#(29, model_run.get_data_movements_v2_tf, model_def.Model_SVC, 1),
(71, model_run.get_data_movements_v2_svd, model_def.Model_GBC, 10),
#(30, model_run.get_data_movements_v3, model_def.Model_LR, 1), # folds=10, not add
(68, model_run.get_data_movements_v3, model_def.Model_LR2, 10),
#--(31, model_run.get_data_movements_v3, model_def.Model_SVC, 10), # maybe add # folds=10
#(32, model_run.get_data_movements_v3, model_def.Model_SVC2, 1), # maybe add
#(33, model_run.get_data_movements_v3_tf, model_def.Model_LR, 1),
#(34, model_run.get_data_movements_v3_tf, model_def.Model_SVC, 1),
#-- (81, model_run.get_data_movements_v4, model_def.Model_LR2, 10),
#-- (82, model_run.get_data_movements_v5, model_def.Model_LR2, 10),
(83, model_run.get_data_movements_v6, model_def.Model_LR2, 10),
(84, model_run.get_data_movements_v7, model_def.Model_LR2, 10),
(100, model_run.get_data_movements_v8, model_def.Model_LR2, 10),
#(35, model_run.get_data_segment_angles, model_def.Model_GBC, 1),
(36, model_run.get_data_segment_angles, model_def.Model_LR, 4),
#(37, model_run.get_data_segment_angles, model_def.Model_RFC, 1),
#(38, model_run.get_data_segment_angles, model_def.Model_SVC, 1),
#-- (80, model_run.get_data_segment_angles_v2, model_def.Model_LR, 4),
#(40, model_run.get_data_segment_lengths, model_def.Model_GBC, 1), # folds=8, maybe add
(41, model_run.get_data_segment_lengths, model_def.Model_LR, 4),
#(42, model_run.get_data_segment_lengths, model_def.Model_RFC, 1),
#(43, model_run.get_data_segment_lengths, model_def.Model_SVC, 1),
#(44, model_run.get_data_segment_times, model_def.Model_GBC, 1), # folds=8, maybe add
(45, model_run.get_data_segment_times, model_def.Model_LR, 4),
#(46, model_run.get_data_segment_times, model_def.Model_RFC, 1),
#(47, model_run.get_data_segment_times, model_def.Model_SVC, 1),
(70, model_run.get_data_segment_v2, model_def.Model_LR, 10),
]
# calculat 107 pe test
STACK = [
(2, model_run.get_data_accel, model_def.Model_GBC, 10), # 1 min per driver
(3, model_run.get_data_accel, model_def.Model_LR, 10), # 40 sec per driver
(63, model_run.get_data_accel, model_def.Model_LR2, 10), # 10 sec per driver
(5, model_run.get_data_accel, model_def.Model_SVC, 10), # 40 sec per driver #maybe add
(12, model_run.get_data_basic_accel, model_def.Model_GBC, 10), # 1.5 min per driver
(91, model_run.get_data_basic_accel_v2, model_def.Model_GBC, 10), # 1.8 min per driver
(104, model_run.get_data_basic_big, model_def.Model_GBC3, 10),
(111, model_run.get_data_basic_big_v2, model_def.Model_GBC3, 10), # 40 sec per driver
(16, model_run.get_data_basic_v2, model_def.Model_GBC, 10), # 40 sec per driver
(87, model_run.get_data_basic_v3, model_def.Model_GBC, 10), # 1 min per driver
(89, model_run.get_data_basic_v4, model_def.Model_GBC, 10), # 1.15 min per driver
(107, model_run.get_data_dist_acc, model_def.Model_LR, 4), # 10 folds
(96, model_run.get_data_g_forces_v6, model_def.Model_LR2, 1), # 2.5 min per driver
(51, model_run.get_data_heading, model_def.Model_SVC, 10), # 3.5 min per driver
(79, model_run.get_data_heading_stops, model_def.Model_LR, 10), # 3.5 min per driver
(55, model_run.get_data_heading_v2, model_def.Model_LR, 10), # 3.5 min per driver
(60, model_run.get_data_movements_accel, model_def.Model_SVC, 4), # 3.5 min per driver
(67, model_run.get_data_movements_v2, model_def.Model_LR2, 10), # 4 min per driver for 8 folds
(83, model_run.get_data_movements_v6, model_def.Model_LR2, 10),# 1 min per driver
(84, model_run.get_data_movements_v7, model_def.Model_LR2, 10), # 1.3 min per driver
(36, model_run.get_data_segment_angles, model_def.Model_LR, 4), # 1 sec per driver
(41, model_run.get_data_segment_lengths, model_def.Model_LR, 4), # 3 sec per driver
(70, model_run.get_data_segment_v2, model_def.Model_LR, 10), # 2 sec per driver
]
WEIGHTS = {2: 0.024819412691444782, 3: 0.045810432596201198, 5: 9.8381587787062545e-05, 12: 0.085751071851815894, 16: 0.036259536220486756, 36: 0.042808218136559099, 41: 0.029705081295213608, 51: 0.0453111571766193, 55: 0.067791960096966822, 60: 0.12838024895888966, 63: 0.035268117984193298, 67: 0.076977632203712093, 70: 0.01909734053123226, 79: 0.022575077886886973, 83: 0.11404680902846333, 84: 0.11611119742557031, 87: 0.049168388058686022, 89: 0.047098612253309559, 91: 0.068358155065302337, 96: 0.077934935592705035, 104: 0.039462792122427037, 111: 0.171981317575196}
| 56.441341 | 574 | 0.761754 |
ba4e0837688460e1c031c099addaa3858768bc9f
| 5,349 |
py
|
Python
|
nmpc/scripts/state_machine.py
|
MikeK4y/px4_control
|
3b91e2ab9a5c711f63ced680789629f67450fe5f
|
[
"MIT"
] | null | null | null |
nmpc/scripts/state_machine.py
|
MikeK4y/px4_control
|
3b91e2ab9a5c711f63ced680789629f67450fe5f
|
[
"MIT"
] | null | null | null |
nmpc/scripts/state_machine.py
|
MikeK4y/px4_control
|
3b91e2ab9a5c711f63ced680789629f67450fe5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import rospy as rp
import numpy as np
import quaternion
import threading
from px4_control_msgs.msg import DroneStateMarker, Setpoint, Trajectory
class StateMachineNode():
"""
Simple state machine that checks the state and when the target is found it
sends a setpoint relative to it. It also tracks the target's position and
if it changes too much it updates the setpoint
"""
def __init__(self, rate):
rp.init_node('state_machine_node')
self.rate = rate
# Check that NMPC is running by checking that a service is available
rp.loginfo('Checking that controller is up')
rp.wait_for_service('/enable_controller')
rp.loginfo('NMPC is up')
# Setpoint
self.H_marker_setpoint = np.array([[1.0, 0.0, 0.0, -4.2],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.2],
[0.0, 0.0, 0.0, 1.0]])
self.marker_setpoint_sent = False
self.marker_position = None
self.marker_orientation = None
self.traj = Trajectory()
# Subscribers
self.state_sub = rp.Subscriber(
'/drone_state', DroneStateMarker, self.stateCallback, queue_size=1)
# Publishers
self.trajectory_pub = rp.Publisher(
'/drone_trajectory', Trajectory, queue_size=1, latch=True)
self.traj_logs_pub = rp.Publisher(
'/drone_trajectory_log', Trajectory, queue_size=1)
t = threading.Thread(target=self.trajPublisher)
t.start()
rp.spin()
def stateCallback(self, msg):
if msg.marker_found.data:
if not self.marker_setpoint_sent:
# Get marker world position
self.marker_position = np.array([msg.marker_pose.position.x,
msg.marker_pose.position.y,
msg.marker_pose.position.z])
marker_att = np.quaternion(msg.marker_pose.orientation.w,
msg.marker_pose.orientation.x,
msg.marker_pose.orientation.y,
msg.marker_pose.orientation.z).normalized()
H_world_marker = np.identity(4)
H_world_marker[0, 3] = msg.marker_pose.position.x
H_world_marker[1, 3] = msg.marker_pose.position.y
H_world_marker[2, 3] = msg.marker_pose.position.z
H_world_marker[0:3, 0:3] = quaternion.as_rotation_matrix(
marker_att)
self.marker_orientation = np.arctan2(
H_world_marker[1, 0], H_world_marker[0, 0])
# Transform setpoint to world frame
H_world_setpoint = np.matmul(
H_world_marker, self.H_marker_setpoint)
# Prepare setpoint message and publish it
setpoint_msg = Setpoint()
setpoint_msg.position.x = H_world_setpoint[0, 3]
setpoint_msg.position.y = H_world_setpoint[1, 3]
setpoint_msg.position.z = H_world_setpoint[2, 3]
setpoint_msg.velocity.x = 0.0
setpoint_msg.velocity.y = 0.0
setpoint_msg.velocity.z = 0.0
setpoint_msg.orientation.x = 0.0
setpoint_msg.orientation.y = 0.0
setpoint_msg.orientation.z = np.arctan2(
H_world_setpoint[1, 0], H_world_setpoint[0, 0])
self.traj.header.stamp = rp.Time.now()
self.traj.trajectory.append(setpoint_msg)
self.trajectory_pub.publish(self.traj)
rp.loginfo('Setpoint sent')
self.marker_setpoint_sent = True
else:
marker_current_pos = np.array([msg.marker_pose.position.x,
msg.marker_pose.position.y,
msg.marker_pose.position.z])
marker_att = np.quaternion(msg.marker_pose.orientation.w,
msg.marker_pose.orientation.x,
msg.marker_pose.orientation.y,
msg.marker_pose.orientation.z).normalized()
R = quaternion.as_rotation_matrix(marker_att)
marker_current_orientation = np.arctan2(R[1, 0], R[0, 0])
d_o = marker_current_orientation - self.marker_orientation
d_p = marker_current_pos - self.marker_position
if np.dot(d_p, d_p) > 0.02 or abs(d_o) > 0.075:
rp.logwarn(
'The marker\'s position changed too much. Sending new setpoint')
self.traj.trajectory.clear()
self.marker_setpoint_sent = False
def trajPublisher(self,):
r = rp.Rate(self.rate)
while not rp.is_shutdown():
if self.marker_setpoint_sent:
self.traj.header.stamp = rp.Time.now()
self.traj_logs_pub.publish(self.traj)
r.sleep()
if __name__ == '__main__':
StateMachineNode(2)
| 40.522727 | 88 | 0.539727 |
5fca119c14e835095ad3954646a6f2572d361595
| 46,755 |
py
|
Python
|
pandas/tseries/period.py
|
clarkfitzg/pandas
|
a71ede374a019ea40321d8c1cfd13258b45ff58d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 |
2019-05-21T21:07:03.000Z
|
2019-05-21T21:07:03.000Z
|
pandas/tseries/period.py
|
josericardo/pandas
|
fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/tseries/period.py
|
josericardo/pandas
|
fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
from pandas.core.base import PandasObject
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object)
from pandas import compat
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import map, zip, u
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(PandasObject):
"""
Represents an period of time
Parameters
----------
value : Period or compat.string_types, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday. Must be a singular rule-code (e.g. 5T is not
allowed).
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
__slots__ = ['freq', 'ordinal']
_comparables = ['name','freqstr']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, compat.string_types) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
dt, freq = _get_date_and_freq(value, freq)
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
# TODO: Better error message - this is slightly confusing
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond, 0,
base)
self.freq = _freq_mod._get_freq_str(base)
def __eq__(self, other):
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot compare non-conforming periods")
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
else:
raise TypeError(other)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal + other, freq=self.freq)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal - other, freq=self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
else: # pragma: no cover
raise TypeError(other)
def _comp_method(func, name):
def f(self, other):
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot compare non-conforming periods")
return func(self.ordinal, other.ordinal)
else:
raise TypeError(other)
f.__name__ = name
return f
__lt__ = _comp_method(operator.lt, '__lt__')
__le__ = _comp_method(operator.le, '__le__')
__gt__ = _comp_method(operator.gt, '__gt__')
__ge__ = _comp_method(operator.ge, '__ge__')
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
interval
Parameters
----------
freq : string
how : {'E', 'S', 'end', 'start'}, default 'end'
Start or end of the timespan
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_ordinal = tslib.period_asfreq(self.ordinal, base1, base2, end)
return Period(ordinal=new_ordinal, freq=base2)
@property
def start_time(self):
return self.to_timestamp(how='S')
@property
def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
def to_timestamp(self, freq=None, how='start', tz=None):
"""
Return the Timestamp representation of the Period at the target
frequency at the specified end (how) of the Period
Parameters
----------
freq : string or DateOffset, default is 'D' if self.freq is week or
longer and 'S' otherwise
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.get_to_timestamp_base(base)
base, mult = _gfc(freq)
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64, tz=tz)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
day = _period_field_accessor('day', 4)
hour = _period_field_accessor('hour', 5)
minute = _period_field_accessor('minute', 6)
second = _period_field_accessor('second', 7)
weekofyear = _period_field_accessor('week', 8)
week = weekofyear
dayofweek = _period_field_accessor('dayofweek', 10)
weekday = dayofweek
dayofyear = _period_field_accessor('dayofyear', 9)
quarter = _period_field_accessor('quarter', 2)
qyear = _period_field_accessor('qyear', 1)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = tslib.period_format(self.ordinal, base)
freqstr = _freq_mod._reverse_period_code_map[base]
if not compat.PY3:
encoding = com.get_option("display.encoding")
formatted = formatted.encode(encoding)
return "Period('%s', '%s')" % (formatted, freqstr)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
base, mult = _gfc(self.freq)
formatted = tslib.period_format(self.ordinal, base)
value = ("%s" % formatted)
return value
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
return tslib.period_format(self.ordinal, base, fmt)
def _get_date_and_freq(value, freq):
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
elif reso == 'microsecond':
if dt.microsecond % 1000 == 0:
freq = 'L'
else:
freq = 'U'
else:
raise ValueError("Invalid frequency or could not infer: %s" % reso)
return dt, freq
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return tslib.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return tslib.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
def _period_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
return getattr(self.values, opname)(other.values)
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
return result
return wrapper
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int or array, default None
month : int or array, default None
quarter : int or array, default None
day : int or array, default None
hour : int or array, default None
minute : int or array, default None
second : int or array, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_allow_period_index_ops = True
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__')
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, year=None, month=None,
quarter=None, day=None, hour=None, minute=None, second=None,
tz=None):
freq = _freq_mod.get_standard_freq(freq)
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
fields = [year, month, quarter, day, hour, minute, second]
data, freq = cls._generate_range(start, end, periods,
freq, fields)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = com._count_not_none(*fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
y, mth, q, d, h, minute, s = fields
subarr, freq = _range_from_fields(year=y, month=mth, quarter=q,
day=d, hour=h, minute=minute,
second=s, freq=freq)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, np.ndarray):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = tslib.period_asfreq_arr(data.values, base1,
base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
def _box_values(self, values):
f = lambda x: Period(ordinal=x, freq=self.freq)
return lib.map_infer(values, f)
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
@property
def asobject(self):
return Index(self._box_values(self.values), dtype=object)
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def __iter__(self):
for val in self.values:
yield Period(ordinal=val, freq=self.freq)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def factorize(self):
"""
Specialized factorize that boxes uniques
"""
from pandas.core.algorithms import factorize
labels, uniques = factorize(self.values)
uniques = PeriodIndex(ordinal=uniques, freq=self.freq)
return labels, uniques
@property
def freqstr(self):
return self.freq
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
freq = _freq_mod.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_data = tslib.period_asfreq_arr(self.values, base1, base2, end)
result = new_data.view(PeriodIndex)
result.name = self.name
result.freq = freq
return result
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
_year = _field_accessor('year', 0)
_month = _field_accessor('month', 3)
_day = _field_accessor('day', 4)
_hour = _field_accessor('hour', 5)
_minute = _field_accessor('minute', 6)
_second = _field_accessor('second', 7)
_weekofyear = _field_accessor('week', 8)
_week = _weekofyear
_dayofweek = _field_accessor('dayofweek', 10)
_weekday = _dayofweek
_dayofyear = day_of_year = _field_accessor('dayofyear', 9)
_quarter = _field_accessor('quarter', 2)
_qyear = _field_accessor('qyear', 1)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
if not isinstance(result, np.ndarray):
raise TypeError
return result
except Exception:
return _algos.arrmap_object(self.asobject, f)
def _get_object_array(self):
freq = self.freq
boxfunc = lambda x: Period(ordinal=x, freq=freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(self.values)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
return np.array_equal(self.asi8, other.asi8)
def tolist(self):
"""
Return a list of Period objects
"""
return self._get_object_array().tolist()
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = tslib.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
if n == 0:
return self
return PeriodIndex(data=self.values + n, freq=self.freq)
def __add__(self, other):
return PeriodIndex(ordinal=self.values + other, freq=self.freq)
def __sub__(self, other):
return PeriodIndex(ordinal=self.values - other, freq=self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = _freq_mod._infer_period_group(reso)
freqn = _freq_mod._period_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
else:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
except TypeError:
pass
except KeyError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, self.freq)
try:
return self._engine.get_loc(key.ordinal)
except KeyError:
raise KeyError(key)
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, compat.string_types) or isinstance(end, compat.string_types):
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
if isinstance(start, datetime) and isinstance(end, datetime):
ordinals = self.values
t1 = Period(start, freq=self.freq)
t2 = Period(end, freq=self.freq)
left = ordinals.searchsorted(t1.ordinal, side='left')
right = ordinals.searchsorted(t2.ordinal, side='right')
return left, right
return Int64Index.slice_locs(self, start, end)
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
else:
raise KeyError(key)
ordinals = self.values
t2 = t1.asfreq(self.freq, how='end')
t1 = t1.asfreq(self.freq, how='start')
left = ordinals.searchsorted(t1.ordinal, side='left')
right = ordinals.searchsorted(t2.ordinal, side='right')
return slice(left, right)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed PeriodIndexes compatible '
'for join (for now)')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = rawarr.view(PeriodIndex)
rawarr.freq = self.freq
return rawarr
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Period(ordinal=val, freq=self.freq)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
result = arr_idx[key]
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_with_header(self, header, **kwargs):
return header + self._format_native_types(**kwargs)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = -mask
values[imask] = np.array([u('%s') % dt for dt in values[imask]])
return values.tolist()
def __array_finalize__(self, obj):
if not self.ndim: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def __repr__(self):
output = com.pprint_thing(self.__class__) + '\n'
output += 'freq: %s\n' % self.freq
n = len(self)
if n == 1:
output += '[%s]\n' % (self[0])
elif n == 2:
output += '[%s, %s]\n' % (self[0], self[-1])
elif n:
output += '[%s, ..., %s]\n' % (self[0], self[-1])
output += 'length: %d' % n
return output
def __unicode__(self):
output = self.__class__.__name__
output += u('(')
prefix = '' if compat.PY3 else 'u'
mapper = "{0}'{{0}}'".format(prefix)
output += '[{0}]'.format(', '.join(map(mapper.format, self)))
output += ", freq='{0}'".format(self.freq)
output += ')'
return output
def __bytes__(self):
encoding = com.get_option('display.encoding')
return self.__unicode__().encode(encoding, 'replace')
def __str__(self):
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
taken = taken.view(PeriodIndex)
taken.freq = self.freq
taken.name = self.name
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = (self.name, self.freq)
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
self.name = own_state[0]
try: # backcompat
self.freq = own_state[1]
except:
pass
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def _get_ordinal_range(start, end, periods, freq):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if base != FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = tslib.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(tslib.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list))
else np.repeat(x, length) for x in fields]
return arrays
def _ordinal_from_fields(year, month, quarter, day, hour, minute,
second, freq):
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
return tslib.period_ordinal(year, month, day, hour, minute, second, 0, 0, base)
def _quarter_to_myear(year, quarter, freq):
if quarter is not None:
if quarter <= 0 or quarter > 4:
raise ValueError('Quarter must be 1 <= q <= 4')
mnum = _month_numbers[_freq_mod._get_rule_month(freq)] + 1
month = (mnum + (quarter - 1) * 3) % 12 + 1
if month > mnum:
year -= 1
return year, month
def _validate_end_alias(how):
how_dict = {'S': 'S', 'E': 'E',
'START': 'S', 'FINISH': 'E',
'BEGIN': 'S', 'END': 'E'}
how = how_dict.get(str(how).upper())
if how not in set(['S', 'E']):
raise ValueError('How must be one of S or E')
return how
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start :
end :
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| 34.943946 | 97 | 0.516758 |
3277682b66abd00dd437f466d372169960c2e9e1
| 2,761 |
py
|
Python
|
Lib/site-packages/dill/tests/test_diff.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/dill/tests/test_diff.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/dill/tests/test_diff.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
from dill import __diff as diff
import sys
IS_PYPY = not hasattr(sys, 'getrefcount')
class A:
pass
def test_diff():
a = A()
b = A()
c = A()
a.a = b
b.a = c
diff.memorise(a)
assert not diff.has_changed(a)
c.a = 1
assert diff.has_changed(a)
diff.memorise(c, force=True)
assert not diff.has_changed(a)
c.a = 2
assert diff.has_changed(a)
changed = diff.whats_changed(a)
assert list(changed[0].keys()) == ["a"]
assert not changed[1]
a2 = []
b2 = [a2]
c2 = [b2]
diff.memorise(c2)
assert not diff.has_changed(c2)
a2.append(1)
assert diff.has_changed(c2)
changed = diff.whats_changed(c2)
assert changed[0] == {}
assert changed[1]
a3 = {}
b3 = {1: a3}
c3 = {1: b3}
diff.memorise(c3)
assert not diff.has_changed(c3)
a3[1] = 1
assert diff.has_changed(c3)
changed = diff.whats_changed(c3)
assert changed[0] == {}
assert changed[1]
if not IS_PYPY:
try:
import abc
# make sure the "_abc_invaldation_counter" doesn't make test fail
diff.memorise(abc.ABCMeta, force=True)
assert not diff.has_changed(abc)
abc.ABCMeta.zzz = 1
assert diff.has_changed(abc)
changed = diff.whats_changed(abc)
assert list(changed[0].keys()) == ["ABCMeta"]
assert not changed[1]
except ImportError:
pass
'''
import Queue
diff.memorise(Queue, force=True)
assert not diff.has_changed(Queue)
Queue.Queue.zzz = 1
assert diff.has_changed(Queue)
changed = diff.whats_changed(Queue)
assert list(changed[0].keys()) == ["Queue"]
assert not changed[1]
import math
diff.memorise(math, force=True)
assert not diff.has_changed(math)
math.zzz = 1
assert diff.has_changed(math)
changed = diff.whats_changed(math)
assert list(changed[0].keys()) == ["zzz"]
assert not changed[1]
'''
a = A()
b = A()
c = A()
a.a = b
b.a = c
diff.memorise(a)
assert not diff.has_changed(a)
c.a = 1
assert diff.has_changed(a)
diff.memorise(c, force=True)
assert not diff.has_changed(a)
del c.a
assert diff.has_changed(a)
changed = diff.whats_changed(a)
assert list(changed[0].keys()) == ["a"]
assert not changed[1]
if __name__ == '__main__':
test_diff()
| 24.873874 | 77 | 0.609924 |
40c0aa47d12c7de182bb29f7e797cc85b90f8834
| 3,350 |
py
|
Python
|
examples/test_rossler.py
|
Tarheel-Formal-Methods/kaa-optimize
|
35fe7b580df3b5efe7de9314b821c257f68d74bf
|
[
"MIT"
] | null | null | null |
examples/test_rossler.py
|
Tarheel-Formal-Methods/kaa-optimize
|
35fe7b580df3b5efe7de9314b821c257f68d74bf
|
[
"MIT"
] | 2 |
2020-12-11T17:34:46.000Z
|
2020-12-11T21:43:13.000Z
|
examples/test_rossler.py
|
Tarheel-Formal-Methods/kaa-optimize
|
35fe7b580df3b5efe7de9314b821c257f68d74bf
|
[
"MIT"
] | 1 |
2020-12-11T17:31:16.000Z
|
2020-12-11T17:31:16.000Z
|
from kaa.reach import ReachSet
from kaa.plotutil import Plot
from models.rossler import Rossler, Rossler_UnitBox
from kaa.trajectory import Traj
from kaa.experi_init import *
from kaa.timer import Timer
def test_sapo_Rossler():
model = Rossler()
num_steps = 150
experi_input = dict(model=model, #Encompass strat initilizations?
strat=None,
label="SapoRossler",
num_steps=num_steps)
harosc = ProjectionPlotExperiment(experi_input)
harosc.execute(0,1,2)
def test_sapo_vol_Rossler():
use_supp = True
use_pregen = False
num_trajs = 5000
num_steps = 150
model = Rossler(delta=0.5)
experi_input = dict(model=model, #Encompass strat initilizations?
strat=None,
label="SapoRossler",
supp_mode = use_supp,
pregen_mode = use_pregen,
num_trajs=num_trajs,
num_steps=num_steps-1,
max_steps=num_steps)
harosc = VolumeExperiment(experi_input)
harosc.execute(1)
def test_init_reach_vol_vs_ran_Rossler():
num_steps = 150
use_supp = True
use_pregen = False
num_trajs = 5000
pca_window_size = 10
lin_window_size = 10
inputs = []
for inc in range(5):
inc /= 100
box = ((0,0.1 + inc), (4.8 - inc,5), (0,0.1))
unit_model = Rossler_UnitBox(init_box=box)
model = Rossler(init_box=box)
pca_strat = SlidingPCAStrat(unit_model, lifespan=pca_window_size)
lin_strat = SlidingLinStrat(unit_model, lifespan=lin_window_size)
experi_input_one = dict(model=unit_model,
strat=MultiStrategy(pca_strat, lin_strat),
label=f"Rossler SlidingPCA Step {pca_window_size} and SlidingLin Step {lin_window_size}",
supp_mode = use_supp,
pregen_mode = use_pregen,
num_trajs=num_trajs,
num_steps=num_steps)
inputs.append(experi_input_one)
if use_supp:
file_identifier = "(SUPP)"
elif use_pregen:
file_identifier = f"(PREGEN: {num_trajs})"
else:
file_identifier = "(RAND)"
experi = InitReachVSRandomPlotExperiment(*inputs, num_ran_temps=pca_window_size+lin_window_size, num_trials=10)
experi.execute()
def test_sliding_strat_comb_Rossler():
model = Rossler_UnitBox()
test_sliding_strat_comb(model, 100, 4000, use_supp=True, use_pregen=False)
Timer.generate_stats()
def test_skewed_sliding_strat_comb_Rossler():
unit_model = Rossler_UnitBox()
model = Rossler()
test_skewed_sliding_strat_comb(unit_model, 100, 4000, num_temps=5, incre=1, use_supp=True, use_pregen=False, use_sapo=model)
Timer.generate_stats()
def test_sliding_pca_Rossler():
model = Rossler_UnitBox()
test_sliding_pca(model, 20, 150, -1, use_supp=True, use_pregen=False)
Timer.generate_stats()
def test_sliding_lin_Rossler():
model = Rossler_UnitBox()
test_sliding_lin(model, 20, 150, -1, use_supp=True, use_pregen=False)
Timer.generate_stats()
def gen_save_dirs_Rossler():
model = Rossler_UnitBox()
gen_save_dirs(model, 150)
| 31.018519 | 128 | 0.632537 |
fe042ed38e4c5060ab51fc067de2f04a765c24ea
| 11,833 |
py
|
Python
|
quidel_covidtest/delphi_quidel_covidtest/pull.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | null | null | null |
quidel_covidtest/delphi_quidel_covidtest/pull.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | 5 |
2021-08-18T17:33:13.000Z
|
2021-08-19T15:09:22.000Z
|
quidel_covidtest/delphi_quidel_covidtest/pull.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Collect and process Quidel export files."""
from os.path import join
import os
from datetime import datetime, timedelta
import boto3
import pandas as pd
import numpy as np
def get_from_s3(start_date, end_date, bucket):
"""
Get raw data from aws s3 bucket.
Args:
start_date: datetime.datetime
pull data from file tagged with date on/after the start date
end_date: datetime.datetime
pull data from file tagged with date on/before the end date
bucket: s3.Bucket
the aws s3 bucket that stores quidel data
output:
df: pd.DataFrame
time_flag: datetime.datetime
"""
time_flag = None
selected_columns = ['SofiaSerNum', 'TestDate', 'Facility', 'City',
'State', 'Zip', 'PatientAge', 'Result1',
'Result2', 'OverallResult', 'StorageDate',
'fname']
df = pd.DataFrame(columns=selected_columns)
s3_files = {}
for obj in bucket.objects.all():
if "-sars" in obj.key:
date_string = obj.key.split("/")[1]
try:
yy = int(date_string.split("_")[0])
mm = int(date_string.split("_")[1])
dd = int(date_string.split("_")[2])
received_date = datetime(yy, mm, dd)
except ValueError:
continue
if received_date not in s3_files.keys():
s3_files[received_date] = [obj.key]
else:
s3_files[received_date].append(obj.key)
n_days = (end_date - start_date).days + 1
for search_date in [start_date + timedelta(days=x) for x in range(n_days)]:
if search_date in s3_files.keys():
# Avoid appending duplicate datasets
print("Pulling data received on %s"%search_date.date())
# Fetch data received on the same day
for fn in s3_files[search_date]:
if fn in set(df["fname"].values):
continue
obj = bucket.Object(key=fn)
newdf = pd.read_csv(obj.get()["Body"],
parse_dates=["StorageDate", "TestDate"],
low_memory=False)
newdf["fname"] = fn
df = df.append(newdf[selected_columns])
time_flag = search_date
return df, time_flag
def fix_zipcode(df):
"""Fix zipcode that is 9 digit instead of 5 digit."""
zipcode5 = []
fixnum = 0
for zipcode in df['Zip'].values:
if isinstance(zipcode, str) and '-' in zipcode:
zipcode5.append(int(zipcode.split('-')[0]))
fixnum += 1
else:
zipcode = int(float(zipcode))
zipcode5.append(zipcode)
df['zip'] = zipcode5
# print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))
return df
def fix_date(df):
"""
Remove invalid dates and select correct test date to use.
Quidel Covid Test are labeled with Test Date and Storage Date. In principle,
the TestDate should reflect when the test was performed and the StorageDate
when the test was logged in the MyVirena cloud storage device. We expect
that the test date should precede the storage date by several days. However,
in the actual data the test date can be far earlier than the storage date
and the test date can also occur after the storage date.
- For most of the cases, use test date as the timestamp
- Remove tests with a storage date which is earlier than the test date
- If the storage date is 90 days later than the test date, the storage
will be adopted instead
"""
df.insert(2, "timestamp", df["TestDate"])
mask = df["TestDate"] <= df["StorageDate"]
print("Removing %.2f%% of unusual data" % ((len(df) - np.sum(mask)) * 100 / len(df)))
df = df[mask]
mask = df["StorageDate"] - df["TestDate"] > pd.Timedelta(days=90)
print("Fixing %.2f%% of outdated data" % (np.sum(mask) * 100 / len(df)))
df["timestamp"].values[mask] = df["StorageDate"].values[mask]
return df
def preprocess_new_data(start_date, end_date, params, test_mode):
"""
Pull and pre-process Quidel Covid Test data.
Drop unnecessary columns. Temporarily consider the positive rate
sensor only which is related to number of total tests and number
of positive tests.
Args:
start_date: datetime.datetime
pull data from file tagged with date on/after start date
end_date: datetime.datetime
pull data from file tagged with date on/before the end date
params: dict
read from params.json
test_mode: bool
pull raw data from s3 or not
output:
df: pd.DataFrame
time_flag: datetime.date:
the actual pull end date on which we successfully pull the data
"""
if test_mode:
test_data_dir = "./test_data/test_data.csv"
df, time_flag = pd.read_csv(
test_data_dir,
parse_dates=["StorageDate", "TestDate"]
), datetime(2020, 8, 17)
else:
# connect aws s3 bucket
aws_access_key_id = params["aws_credentials"]["aws_access_key_id"]
aws_secret_access_key = params["aws_credentials"]["aws_secret_access_key"]
bucket_name = params["bucket_name"]
s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
bucket = s3.Bucket(bucket_name)
# Get new data from s3
df, time_flag = get_from_s3(start_date, end_date, bucket)
# No new data can be pulled
if time_flag is None:
return df, time_flag
# Fix some of the fipcodes that are 9 digit instead of 5 digit
df = fix_zipcode(df)
# Create a column CanonicalDate according to StarageDate and TestDate
df = fix_date(df)
# Compute overallPositive
overall_pos = df[df["OverallResult"] == "positive"].groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_pos["positiveTest"] = overall_pos["OverallResult"]
overall_pos.drop(labels="OverallResult", axis="columns", inplace=True)
# Compute overallTotal
overall_total = df.groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_total["totalTest"] = overall_total["OverallResult"]
overall_total.drop(labels="OverallResult", axis="columns", inplace=True)
# Compute numUniqueDevices
numUniqueDevices = df.groupby(
by=["timestamp", "zip"],
as_index=False)["SofiaSerNum"].agg({"SofiaSerNum": "nunique"}).rename(
columns={"SofiaSerNum": "numUniqueDevices"}
)
df_merged = overall_total.merge(
numUniqueDevices, on=["timestamp", "zip"], how="left"
).merge(
overall_pos, on=["timestamp", "zip"], how="left"
).fillna(0).drop_duplicates()
return df_merged, time_flag
def check_intermediate_file(cache_dir, pull_start_date):
"""Check whether there is a cache file containing historical data already."""
for filename in os.listdir(cache_dir):
if ".csv" in filename:
pull_start_date = datetime.strptime(filename.split("_")[2].split(".")[0],
'%Y%m%d') + timedelta(days=1)
previous_df = pd.read_csv(os.path.join(cache_dir, filename),
sep=",", parse_dates=["timestamp"])
return previous_df, pull_start_date
return None, pull_start_date
def pull_quidel_covidtest(params):
"""Pull the quidel covid test data.
Conditionally merge new data with historical data from ./cache.
Parameters:
params: dict
including all the information read from params.json
end_from_today_minus: int
report data until - X days
export_day_range: int
number of dates to report
Returns:
DataFrame:
A data frame containinig the pre-process data with columns:
timestamp, numUniqueDevices, positiveTest, totalTest
datetime.datetime
the first date of the report
datetime.datetime
the last date of the report
"""
cache_dir = params["input_cache_dir"]
test_mode = params["test_mode"]
# pull new data only that has not been ingested
previous_df, pull_start_date = check_intermediate_file(
cache_dir,
datetime.strptime(params["pull_start_date"], '%Y-%m-%d'))
if params["pull_end_date"] == "":
pull_end_date = datetime.today()
else:
pull_end_date = datetime.strptime(params["pull_end_date"], '%Y-%m-%d')
# Pull data from the file at 5 digit zipcode level
# Use _end_date to check the most recent date that we received data
df, _end_date = preprocess_new_data(
pull_start_date, pull_end_date, params, test_mode)
# Utilize previously stored data
if previous_df is not None:
df = previous_df.append(df).groupby(["timestamp", "zip"]).sum().reset_index()
return df, _end_date
def check_export_end_date(input_export_end_date, _end_date,
end_from_today_minus):
"""
Update the export_end_date according to the data received.
By default, set the export end date to be the last pulling date - 5 days
(end_from_today_minus = 5).
Otherwise, use the required date if it is earlier than the default one.
Parameter:
input_export_end_date: str
read from params
_end_date: datetime.datetime
updated according the data received
end_from_today_minus: int
report data until - X days
Returns:
datetime.datetime
export data from which date
"""
export_end_date = _end_date - timedelta(days=end_from_today_minus)
if input_export_end_date != "":
input_export_end_date = datetime.strptime(input_export_end_date, '%Y-%m-%d')
if input_export_end_date < export_end_date:
return input_export_end_date
return export_end_date
def check_export_start_date(export_start_date, export_end_date,
export_day_range):
"""
Ensure that the starte date, end date, and day range are mutually consistent.
Parameters:
export_start_date: str
Read from params
export_end_date: datetime.datetime
Calculated according to the data received
export_day_range: int
Number of days to report
Returns:
datetime.datetime
export data until which date
"""
if export_start_date == "":
export_start_date = datetime(2020, 5, 26)
else:
export_start_date = datetime.strptime(export_start_date, '%Y-%m-%d')
# Only export data from -45 days to -5 days
if (export_end_date - export_start_date).days > export_day_range:
export_start_date = export_end_date - timedelta(days=export_day_range)
if export_start_date < datetime(2020, 5, 26):
return datetime(2020, 5, 26)
return export_start_date
def update_cache_file(df, _end_date, cache_dir):
"""
Update cache file. Remove the old one, export the new one.
Parameter:
df: pd.DataFrame
Pre-process file at ZipCode level
_end_date:
The most recent date when the raw data is received
cache_dir:
./cache where the cache file is stored
"""
for fn in os.listdir(cache_dir):
if ".csv" in fn:
os.remove(join(cache_dir, fn))
df.to_csv(join(cache_dir, "pulled_until_%s.csv") % _end_date.strftime("%Y%m%d"), index=False)
| 36.862928 | 97 | 0.62444 |
bb0bf7b1e87fce84babcda860dc288ec08f6f188
| 731 |
py
|
Python
|
proxy/http/exception/__init__.py
|
zanachka/proxy.py
|
ab5c155213115d1664ce429ec155184d16ca9be6
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/http/exception/__init__.py
|
zanachka/proxy.py
|
ab5c155213115d1664ce429ec155184d16ca9be6
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/http/exception/__init__.py
|
zanachka/proxy.py
|
ab5c155213115d1664ce429ec155184d16ca9be6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
.. spelling::
http
Submodules
"""
from .base import HttpProtocolException
from .http_request_rejected import HttpRequestRejected
from .proxy_auth_failed import ProxyAuthenticationFailed
from .proxy_conn_failed import ProxyConnectionFailed
__all__ = [
'HttpProtocolException',
'HttpRequestRejected',
'ProxyAuthenticationFailed',
'ProxyConnectionFailed',
]
| 27.074074 | 86 | 0.725034 |
0180f2960398af8e182f2162cc9988bb4e192dbb
| 2,811 |
py
|
Python
|
Examples/Message Types/DirectMESSAGE/main_send_file.py
|
davidhozic/Discord-Shilling-Bot
|
9649344804bf19cdd1a608cecae34939e19af841
|
[
"MIT"
] | null | null | null |
Examples/Message Types/DirectMESSAGE/main_send_file.py
|
davidhozic/Discord-Shilling-Bot
|
9649344804bf19cdd1a608cecae34939e19af841
|
[
"MIT"
] | 1 |
2022-02-13T21:35:57.000Z
|
2022-02-13T21:36:11.000Z
|
Examples/Message Types/DirectMESSAGE/main_send_file.py
|
davidhozic/Discord-Shilling-Framework
|
9649344804bf19cdd1a608cecae34939e19af841
|
[
"MIT"
] | null | null | null |
import framework, secret
from framework import discord
############################################################################################
# GUILD MESSAGES DEFINITION #
############################################################################################
# File object representing file that will be sent
l_file = framework.FILE("./Examples/main_send_file.py")
guilds = [
framework.USER(
user_id=123456789, # ID of server (guild)
messages_to_send=[ # List MESSAGE objects
framework.DirectMESSAGE(
start_period=None, # If None, messages will be send on a fixed period (end period)
end_period=15, # If start_period is None, it dictates the fixed sending period,
# If start period is defined, it dictates the maximum limit of randomized period
data=l_file, # Data you want to sent to the function (Can be of types : str, embed, file, list of types to the left
# or function that returns any of above types(or returns None if you don't have any data to send yet),
# where if you pass a function you need to use the framework.FUNCTION decorator on top of it ).
mode="send", # "send" will send a new message every time, "edit" will edit the previous message, "clear-send" will delete
# the previous message and then send a new one
start_now=True # Start sending now (True) or wait until period
),
],
generate_log=True ## Generate file log of sent messages (and failed attempts) for this user
)
]
############################################################################################
if __name__ == "__main__":
framework.run( token=secret.C_TOKEN, # MANDATORY
intents=discord.Intents.default(), # OPTIONAL (see https://docs.pycord.dev/en/master/intents.html)
server_list=guilds, # MANDATORY
is_user=False, # OPTIONAL
user_callback=None, # OPTIONAL
server_log_output="History", # OPTIONAL
debug=True) # OPTIONAL
| 65.372093 | 168 | 0.421558 |
549224fa616b73372bc462cbf4c5129b39b39bd5
| 474 |
py
|
Python
|
res/paid-origin/Lecture3/DoYourThing.py
|
mori-c/cs106a
|
b7764d9dec1d3abb6968ded265a0c4c4d96920c5
|
[
"MIT"
] | 1 |
2020-04-13T18:38:26.000Z
|
2020-04-13T18:38:26.000Z
|
res/paid-origin/Lecture3/DoYourThing.py
|
mori-c/cs106a
|
b7764d9dec1d3abb6968ded265a0c4c4d96920c5
|
[
"MIT"
] | null | null | null |
res/paid-origin/Lecture3/DoYourThing.py
|
mori-c/cs106a
|
b7764d9dec1d3abb6968ded265a0c4c4d96920c5
|
[
"MIT"
] | null | null | null |
from karel.stanfordkarel import *
def main():
move()
while beepers_present():
pick_beeper()
move()
putBeeper()
putBeeper()
turn_around()
move()
turn_around()
move()
while beepers_present():
pick_beeper()
turn_around()
move()
turn_around()
putBeeper()
move()
turn_around()
move()
turn_around()
turn_around()
move()
turn_around()
| 16.928571 | 33 | 0.514768 |
e2f85a25ccef97e544c1b7f41fc37be2520cf6b7
| 5,230 |
py
|
Python
|
test/unit_tests.py
|
djsilva99/heatrapy
|
91902e8fd9e216638855856ed7c0757bbade11c2
|
[
"MIT"
] | 31 |
2019-03-21T15:41:18.000Z
|
2022-03-21T15:41:49.000Z
|
test/unit_tests.py
|
danieljosesilva/heatrapy
|
5d1297d67ea237b2c6537133ace8c1f4a6091518
|
[
"MIT"
] | 9 |
2020-09-01T08:50:52.000Z
|
2022-03-12T00:55:53.000Z
|
test/unit_tests.py
|
djsilva99/heatrapy
|
91902e8fd9e216638855856ed7c0757bbade11c2
|
[
"MIT"
] | 7 |
2020-03-10T19:34:32.000Z
|
2022-03-28T01:12:59.000Z
|
"""unit_tests.
Contains the unit tests for the heatrapy modules
"""
import unittest
from .. import heatrapy as htp
class SingleObjects1D(unittest.TestCase):
"""Test all single_objects components."""
def test_implicit_general(self):
"""Test singleObject with the implicit_general solver."""
solution = 245
example = htp.SingleObject1D(300, boundaries=(0, 200), draw=[])
example.compute(30, 5, solver='implicit_general')
self.assertEqual(int(example.object.temperature[5][0]), solution)
def test_explicit_general(self):
"""Test singleObject with the explicit_general solver."""
solution = 245
example = htp.SingleObject1D(300, boundaries=(0, 200), draw=[])
example.compute(30, 5, solver='explicit_general')
self.assertEqual(int(example.object.temperature[5][0]), solution)
def test_explicit_k(self):
"""Test singleObject with the explicit_k(k) solver."""
solution = 270
example = htp.SingleObject1D(
300, boundaries=(0, 200), materials=('Cu', 'Gd_mag'),
borders=(1, 11, 22), materials_order=(0, 1), draw=[]
)
example.compute(2000, 5, solver='explicit_k(x)')
self.assertEqual(int(example.object.temperature[5][0]), solution)
def test_implicit_k(self):
"""Test singleObject with the implicit_k(k) solver."""
solution = 270
example = htp.SingleObject1D(
300, boundaries=(0, 200), materials=('Cu', 'Gd_mag'),
borders=(1, 11, 22), materials_order=(0, 1), draw=[]
)
example.compute(2000, 5, solver='implicit_k(x)')
self.assertEqual(int(example.object.temperature[5][0]), solution)
class SystemObjects1D(unittest.TestCase):
"""Test all system_objects components."""
def test_implicit_general(self):
"""Test systemObjects with the implicit_general solver."""
solution = 246
example = htp.SystemObjects1D()
example.objects[0].temperature = [[200., 200.] for i in range(12)]
example.contact_add(((0, 3), (1, 3), 1e6))
example.compute(200, 5, solver='implicit_general')
self.assertEqual(int(example.objects[0].temperature[5][0]), solution)
def test_explicit_general(self):
"""Test systemObjects with the explicit_general solver."""
solution = 246
example = htp.SystemObjects1D()
example.objects[0].temperature = [[200., 200.] for i in range(12)]
example.contact_add(((0, 3), (1, 3), 1e6))
example.compute(200, 5, solver='explicit_general')
self.assertEqual(int(example.objects[0].temperature[5][0]), solution)
def test_implicit_k(self):
"""Test systemObjects with the implicit_k(x) solver."""
solution = 246
example = htp.SystemObjects1D()
example.objects[0].temperature = [[200., 200.] for i in range(12)]
example.contact_add(((0, 3), (1, 3), 1e6))
example.compute(200, 5, solver='implicit_k(x)')
self.assertEqual(int(example.objects[0].temperature[5][0]), solution)
def test_explicit_k(self):
"""Test systemObjects with the explicit_k(x) solver."""
solution = 246
example = htp.SystemObjects1D()
example.objects[0].temperature = [[200., 200.] for i in range(12)]
example.contact_add(((0, 3), (1, 3), 1e6))
example.compute(200, 5, solver='explicit_k(x)')
self.assertEqual(int(example.objects[0].temperature[5][0]), solution)
class SingleObjects2D(unittest.TestCase):
"""Test all single_objects components."""
def test_explicit_general(self):
"""Test singleObject with the implicit_general solver."""
solution = 245
example = htp.SingleObject2D(300, boundaries=(200, 0, 0, 0), draw=[])
example.compute(30, 5, solver='explicit_general')
self.assertEqual(int(example.object.temperature[5][5][0]), solution)
def test_explicit_k(self):
"""Test singleObject with the implicit_general solver."""
solution = 245
example = htp.SingleObject2D(300, boundaries=(200, 0, 0, 0), draw=[])
example.compute(30, 5, solver='explicit_k(x)')
self.assertEqual(int(example.object.temperature[5][5][0]), solution)
class SystemObjects2D(unittest.TestCase):
"""Test all system_objects components."""
def test_explicit_general(self):
"""Test systemObjects with the implicit_general solver."""
solution = 275
example = htp.SystemObjects2D(boundaries=((0, 0, 0, 0), (250, 0, 0, 0)))
example.contact_add(((0, (4, 4)), (1, (7, 7)), 100000))
example.compute(10, 5, solver='explicit_general')
self.assertEqual(int(example.objects[1].temperature[3][3][0]), solution)
def test_explicit_k(self):
"""Test systemObjects with the implicit_general solver."""
solution = 275
example = htp.SystemObjects2D(boundaries=((0, 0, 0, 0), (250, 0, 0, 0)))
example.contact_add(((0, (4, 4)), (1, (7, 7)), 100000))
example.compute(10, 5, solver='explicit_k(x)')
self.assertEqual(int(example.objects[1].temperature[3][3][0]), solution)
if __name__ == '__main__':
unittest.main()
| 40.859375 | 80 | 0.635946 |
fb6988e06c56e0bfbeb295df47a0d57e73bab159
| 986 |
py
|
Python
|
eclcli/compute/v2/hypervisor_stats.py
|
hanasuke/eclcli
|
a72191799986a02596d0d467253fd9f5ee03c5c8
|
[
"Apache-2.0"
] | 32 |
2016-08-31T04:12:40.000Z
|
2020-12-11T04:49:57.000Z
|
eclcli/compute/v2/hypervisor_stats.py
|
hanasuke/eclcli
|
a72191799986a02596d0d467253fd9f5ee03c5c8
|
[
"Apache-2.0"
] | 27 |
2016-09-06T07:50:36.000Z
|
2021-09-14T09:46:03.000Z
|
eclcli/compute/v2/hypervisor_stats.py
|
hanasuke/eclcli
|
a72191799986a02596d0d467253fd9f5ee03c5c8
|
[
"Apache-2.0"
] | 24 |
2016-09-02T01:09:09.000Z
|
2021-01-19T09:14:16.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Hypervisor Stats action implementations"""
import six
from eclcli.common import command
class ShowHypervisorStats(command.ShowOne):
"""Display hypervisor stats details"""
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
hypervisor_stats = compute_client.hypervisors.statistics().to_dict()
return zip(*sorted(six.iteritems(hypervisor_stats)))
| 32.866667 | 77 | 0.739351 |
4be27f649bc478347f17eb5baf3b44e34405cc43
| 1,702 |
py
|
Python
|
deep-learning-for-image-processing-master/pytorch_classification/Test8_densenet/predict.py
|
zpwithme/zzzzpppp
|
0f5df647f1e9d6cb8c01b3fc7df25ee543714af3
|
[
"MIT"
] | 1 |
2021-07-10T14:16:48.000Z
|
2021-07-10T14:16:48.000Z
|
deep-learning-for-image-processing-master/pytorch_classification/Test8_densenet/predict.py
|
zpwithme/zzzzpppp
|
0f5df647f1e9d6cb8c01b3fc7df25ee543714af3
|
[
"MIT"
] | null | null | null |
deep-learning-for-image-processing-master/pytorch_classification/Test8_densenet/predict.py
|
zpwithme/zzzzpppp
|
0f5df647f1e9d6cb8c01b3fc7df25ee543714af3
|
[
"MIT"
] | 1 |
2021-07-10T14:16:51.000Z
|
2021-07-10T14:16:51.000Z
|
import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from model import densenet121
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_transform = transforms.Compose(
[transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# load image
img_path = "../tulip.jpg"
assert os.path.exists(img_path), "file: '{}' dose not exist.".format(img_path)
img = Image.open(img_path)
plt.imshow(img)
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# read class_indict
json_path = './class_indices.json'
assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)
json_file = open(json_path, "r")
class_indict = json.load(json_file)
# create model
model = densenet121(num_classes=5).to(device)
# load model weights
model_weight_path = "./weights/model-3.pth"
model.load_state_dict(torch.load(model_weight_path, map_location=device))
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
predict = torch.softmax(output, dim=0)
predict_cla = torch.argmax(predict).numpy()
print_res = "class: {} prob: {:.3}".format(class_indict[str(predict_cla)],
predict[predict_cla].numpy())
plt.title(print_res)
print(print_res)
plt.show()
if __name__ == '__main__':
main()
| 28.847458 | 84 | 0.639248 |
e19fe016cc942d75044a04f4e954aaa10202b0d0
| 22,288 |
py
|
Python
|
python/dask_cudf/dask_cudf/tests/test_core.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | 1 |
2021-12-17T19:28:00.000Z
|
2021-12-17T19:28:00.000Z
|
python/dask_cudf/dask_cudf/tests/test_core.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | 1 |
2021-03-10T20:28:23.000Z
|
2021-03-25T15:58:47.000Z
|
python/dask_cudf/dask_cudf/tests/test_core.py
|
sperlingxx/cudf
|
c681211df6253e1ceee9203658108980e7e93e3c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION.
import random
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import dask
from dask import dataframe as dd
from dask.dataframe.core import make_meta, meta_nonempty
from dask.utils import M
import cudf
import dask_cudf as dgd
def test_from_cudf():
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.DataFrame.from_pandas(df)
# Test simple around to/from dask
ingested = dd.from_pandas(gdf, npartitions=2)
dd.assert_eq(ingested, df)
# Test conversion to dask.dataframe
ddf = ingested.to_dask_dataframe()
dd.assert_eq(ddf, df)
def test_from_cudf_multiindex_raises():
df = cudf.DataFrame({"x": list("abc"), "y": [1, 2, 3], "z": [1, 2, 3]})
with pytest.raises(NotImplementedError):
# dask_cudf does not support MultiIndex yet
dgd.from_cudf(df.set_index(["x", "y"]))
def test_from_cudf_with_generic_idx():
cdf = cudf.DataFrame(
{
"a": list(range(20)),
"b": list(reversed(range(20))),
"c": list(range(20)),
}
)
ddf = dgd.from_cudf(cdf, npartitions=2)
assert isinstance(ddf.index.compute(), cudf.core.index.GenericIndex)
dd.assert_eq(ddf.loc[1:2, ["a"]], cdf.loc[1:2, ["a"]])
def _fragmented_gdf(df, nsplit):
n = len(df)
# Split dataframe in *nsplit*
subdivsize = n // nsplit
starts = [i * subdivsize for i in range(nsplit)]
ends = starts[1:] + [None]
frags = [df[s:e] for s, e in zip(starts, ends)]
return frags
def test_query():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=10), "y": np.random.normal(size=10)}
)
gdf = cudf.DataFrame.from_pandas(df)
expr = "x > 2"
dd.assert_eq(gdf.query(expr), df.query(expr))
queried = dd.from_pandas(gdf, npartitions=2).query(expr)
got = queried
expect = gdf.query(expr)
dd.assert_eq(got, expect)
def test_query_local_dict():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=10), "y": np.random.normal(size=10)}
)
gdf = cudf.DataFrame.from_pandas(df)
ddf = dgd.from_cudf(gdf, npartitions=2)
val = 2
gdf_queried = gdf.query("x > @val")
ddf_queried = ddf.query("x > @val", local_dict={"val": val})
dd.assert_eq(gdf_queried, ddf_queried)
def test_head():
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=100),
"y": np.random.normal(size=100),
}
)
gdf = cudf.DataFrame.from_pandas(df)
dgf = dd.from_pandas(gdf, npartitions=2)
dd.assert_eq(dgf.head(), df.head())
def test_from_dask_dataframe():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
ddf = dd.from_pandas(df, npartitions=2)
dgdf = ddf.map_partitions(cudf.from_pandas)
got = dgdf.compute().to_pandas()
expect = df
dd.assert_eq(got, expect)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
@pytest.mark.parametrize("divisions", [None, "quantile"])
def test_set_index(nelem, divisions):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
# Use unique index range as the sort may not be stable-ordering
x = np.arange(nelem)
np.random.shuffle(x)
df = pd.DataFrame(
{"x": x, "y": np.random.randint(0, nelem, size=nelem)}
)
ddf = dd.from_pandas(df, npartitions=2)
dgdf = ddf.map_partitions(cudf.from_pandas)
expect = ddf.set_index("x")
got = dgdf.set_index("x", divisions=divisions)
dd.assert_eq(expect, got, check_index=False, check_divisions=False)
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nelem", [10, 500])
@pytest.mark.parametrize("nparts", [1, 10])
def test_set_index_quantile(nelem, nparts, by):
df = cudf.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.random.choice(cudf.datasets.names, size=nelem)
ddf = dd.from_pandas(df, npartitions=nparts)
got = ddf.set_index(by, divisions="quantile")
expect = df.sort_values(by=by).set_index(by)
dd.assert_eq(got, expect)
def assert_frame_equal_by_index_group(expect, got):
assert sorted(expect.columns) == sorted(got.columns)
assert sorted(set(got.index)) == sorted(set(expect.index))
# Note the set_index sort is not stable,
unique_values = sorted(set(got.index))
for iv in unique_values:
sr_expect = expect.loc[[iv]]
sr_got = got.loc[[iv]]
for k in expect.columns:
# Sort each column before we compare them
sorted_expect = sr_expect.sort_values(k)[k]
sorted_got = sr_got.sort_values(k)[k]
np.testing.assert_array_equal(sorted_expect, sorted_got)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_2(nelem):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
df = pd.DataFrame(
{
"x": 100 + np.random.randint(0, nelem // 2, size=nelem),
"y": np.random.normal(size=nelem),
}
)
expect = df.set_index("x").sort_index()
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=4)
res = dgf.set_index("x") # sort by default
got = res.compute().to_pandas()
assert_frame_equal_by_index_group(expect, got)
@pytest.mark.xfail(reason="dask's index name '__dask_cudf.index' is correct")
def test_set_index_w_series():
with dask.config.set(scheduler="single-threaded"):
nelem = 20
np.random.seed(0)
df = pd.DataFrame(
{
"x": 100 + np.random.randint(0, nelem // 2, size=nelem),
"y": np.random.normal(size=nelem),
}
)
expect = df.set_index(df.x).sort_index()
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=4)
res = dgf.set_index(dgf.x) # sort by default
got = res.compute().to_pandas()
dd.assert_eq(expect, got)
def test_set_index_sorted():
with dask.config.set(scheduler="single-threaded"):
df1 = pd.DataFrame({"val": [4, 3, 2, 1, 0], "id": [0, 1, 3, 5, 7]})
ddf1 = dd.from_pandas(df1, npartitions=2)
gdf1 = cudf.from_pandas(df1)
gddf1 = dgd.from_cudf(gdf1, npartitions=2)
expect = ddf1.set_index("id", sorted=True)
got = gddf1.set_index("id", sorted=True)
dd.assert_eq(expect, got)
with pytest.raises(ValueError):
# Cannot set `sorted=True` for non-sorted column
gddf1.set_index("val", sorted=True)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
@pytest.mark.parametrize("index", [None, "myindex"])
def test_rearrange_by_divisions(nelem, index):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 20, size=nelem),
"y": np.random.normal(size=nelem),
"z": np.random.choice(["dog", "cat", "bird"], nelem),
}
)
df["z"] = df["z"].astype("category")
ddf1 = dd.from_pandas(df, npartitions=4)
gdf1 = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=4)
ddf1.index.name = index
gdf1.index.name = index
divisions = (0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20)
expect = dd.shuffle.rearrange_by_divisions(
ddf1, "x", divisions=divisions, shuffle="tasks"
)
result = dd.shuffle.rearrange_by_divisions(
gdf1, "x", divisions=divisions, shuffle="tasks"
)
dd.assert_eq(expect, result)
def test_assign():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
pdcol = pd.Series(np.arange(20) + 1000)
newcol = dd.from_pandas(cudf.Series(pdcol), npartitions=dgf.npartitions)
got = dgf.assign(z=newcol)
dd.assert_eq(got.loc[:, ["x", "y"]], df)
np.testing.assert_array_equal(got["z"].compute().to_array(), pdcol)
@pytest.mark.parametrize("data_type", ["int8", "int16", "int32", "int64"])
def test_setitem_scalar_integer(data_type):
np.random.seed(0)
scalar = np.random.randint(0, 100, dtype=data_type)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
@pytest.mark.parametrize("data_type", ["float32", "float64"])
def test_setitem_scalar_float(data_type):
np.random.seed(0)
scalar = np.random.randn(1).astype(data_type)[0]
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
def test_setitem_scalar_datetime():
np.random.seed(0)
scalar = np.int64(np.random.randint(0, 100)).astype("datetime64[ms]")
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
@pytest.mark.parametrize(
"func",
[
lambda: pd._testing.makeDataFrame().reset_index(),
pd._testing.makeDataFrame,
pd._testing.makeMixedDataFrame,
pd._testing.makeObjectSeries,
pd._testing.makeTimeSeries,
],
)
def test_repr(func):
pdf = func()
try:
gdf = cudf.from_pandas(pdf)
except Exception:
raise pytest.xfail()
# gddf = dd.from_pandas(gdf, npartitions=3, sort=False) # TODO
gddf = dd.from_pandas(gdf, npartitions=3, sort=False)
assert repr(gddf)
if hasattr(pdf, "_repr_html_"):
assert gddf._repr_html_()
@pytest.mark.skip(reason="datetime indexes not fully supported in cudf")
@pytest.mark.parametrize("start", ["1d", "5d", "1w", "12h"])
@pytest.mark.parametrize("stop", ["1d", "3d", "8h"])
def test_repartition_timeseries(start, stop):
# This test is currently absurdly slow. It should not be unskipped without
# slimming it down.
pdf = dask.datasets.timeseries(
"2000-01-01",
"2000-01-31",
freq="1s",
partition_freq=start,
dtypes={"x": int, "y": float},
)
gdf = pdf.map_partitions(cudf.DataFrame.from_pandas)
a = pdf.repartition(freq=stop)
b = gdf.repartition(freq=stop)
assert a.divisions == b.divisions
dd.utils.assert_eq(a, b)
@pytest.mark.parametrize("start", [1, 2, 5])
@pytest.mark.parametrize("stop", [1, 3, 7])
def test_repartition_simple_divisions(start, stop):
pdf = pd.DataFrame({"x": range(100)})
pdf = dd.from_pandas(pdf, npartitions=start)
gdf = pdf.map_partitions(cudf.DataFrame.from_pandas)
a = pdf.repartition(npartitions=stop)
b = gdf.repartition(npartitions=stop)
assert a.divisions == b.divisions
dd.assert_eq(a, b)
@pytest.mark.parametrize("npartitions", [2, 17, 20])
def test_repartition_hash_staged(npartitions):
by = ["b"]
datarange = 35
size = 100
gdf = cudf.DataFrame(
{
"a": np.arange(size, dtype="int64"),
"b": np.random.randint(datarange, size=size),
}
)
# WARNING: Specific npartitions-max_branch combination
# was specifically chosen to cover changes in #4676
npartitions_initial = 17
ddf = dgd.from_cudf(gdf, npartitions=npartitions_initial)
ddf_new = ddf.shuffle(
on=by, ignore_index=True, npartitions=npartitions, max_branch=4
)
# Make sure we are getting a dask_cudf dataframe
assert type(ddf_new) == type(ddf)
# Check that the length was preserved
assert len(ddf_new) == len(ddf)
# Check that the partitions have unique keys,
# and that the key values are preserved
expect_unique = gdf[by].drop_duplicates().sort_values(by)
got_unique = cudf.concat(
[
part[by].compute().drop_duplicates()
for part in ddf_new[by].partitions
],
ignore_index=True,
).sort_values(by)
dd.assert_eq(got_unique, expect_unique, check_index=False)
@pytest.mark.parametrize("by", [["b"], ["c"], ["d"], ["b", "c"]])
@pytest.mark.parametrize("npartitions", [3, 4, 5])
@pytest.mark.parametrize("max_branch", [3, 32])
def test_repartition_hash(by, npartitions, max_branch):
npartitions_i = 4
datarange = 26
size = 100
gdf = cudf.DataFrame(
{
"a": np.arange(0, stop=size, dtype="int64"),
"b": np.random.randint(datarange, size=size),
"c": np.random.choice(list("abcdefgh"), size=size),
"d": np.random.choice(np.arange(26), size=size),
}
)
gdf.d = gdf.d.astype("datetime64[ms]")
ddf = dgd.from_cudf(gdf, npartitions=npartitions_i)
ddf_new = ddf.shuffle(
on=by,
ignore_index=True,
npartitions=npartitions,
max_branch=max_branch,
)
# Check that the length was preserved
assert len(ddf_new) == len(ddf)
# Check that the partitions have unique keys,
# and that the key values are preserved
expect_unique = gdf[by].drop_duplicates().sort_values(by)
got_unique = cudf.concat(
[
part[by].compute().drop_duplicates()
for part in ddf_new[by].partitions
],
ignore_index=True,
).sort_values(by)
dd.assert_eq(got_unique, expect_unique, check_index=False)
@pytest.fixture
def pdf():
return pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": [11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
)
@pytest.fixture
def gdf(pdf):
return cudf.from_pandas(pdf)
@pytest.fixture
def ddf(pdf):
return dd.from_pandas(pdf, npartitions=3)
@pytest.fixture
def gddf(gdf):
return dd.from_pandas(gdf, npartitions=3)
@pytest.mark.parametrize(
"func",
[
lambda df: df + 1,
lambda df: df.index,
lambda df: df.x.sum(),
lambda df: df.x.astype(float),
lambda df: df.assign(z=df.x.astype("int")),
],
)
def test_unary_ops(func, gdf, gddf):
p = func(gdf)
g = func(gddf)
# Fixed in https://github.com/dask/dask/pull/4657
if isinstance(p, cudf.Index):
from packaging import version
if version.parse(dask.__version__) < version.parse("1.1.6"):
pytest.skip(
"dask.dataframe assert_eq index check hardcoded to "
"pandas prior to 1.1.6 release"
)
dd.assert_eq(p, g, check_names=False)
@pytest.mark.parametrize("series", [True, False])
def test_concat(gdf, gddf, series):
if series:
gdf = gdf.x
gddf = gddf.x
a = (
cudf.concat([gdf, gdf + 1, gdf + 2])
.sort_values()
.reset_index(drop=True)
)
b = (
dd.concat([gddf, gddf + 1, gddf + 2], interleave_partitions=True)
.compute()
.sort_values()
.reset_index(drop=True)
)
else:
a = (
cudf.concat([gdf, gdf + 1, gdf + 2])
.sort_values("x")
.reset_index(drop=True)
)
b = (
dd.concat([gddf, gddf + 1, gddf + 2], interleave_partitions=True)
.compute()
.sort_values("x")
.reset_index(drop=True)
)
dd.assert_eq(a, b)
def test_boolean_index(gdf, gddf):
gdf2 = gdf[gdf.x > 2]
gddf2 = gddf[gddf.x > 2]
dd.assert_eq(gdf2, gddf2)
def test_drop(gdf, gddf):
gdf2 = gdf.drop(columns="x")
gddf2 = gddf.drop(columns="x").compute()
dd.assert_eq(gdf2, gddf2)
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("index", [True, False])
def test_memory_usage(gdf, gddf, index, deep):
dd.assert_eq(
gdf.memory_usage(deep=deep, index=index),
gddf.memory_usage(deep=deep, index=index),
)
@pytest.mark.parametrize("index", [True, False])
def test_hash_object_dispatch(index):
obj = cudf.DataFrame(
{"x": ["a", "b", "c"], "y": [1, 2, 3], "z": [1, 1, 0]}, index=[2, 4, 6]
)
# DataFrame
result = dd.utils.hash_object_dispatch(obj, index=index)
expected = dgd.backends.hash_object_cudf(obj, index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
# Series
result = dd.utils.hash_object_dispatch(obj["x"], index=index)
expected = dgd.backends.hash_object_cudf(obj["x"], index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
# DataFrame with MultiIndex
obj_multi = obj.set_index(["x", "z"], drop=True)
result = dd.utils.hash_object_dispatch(obj_multi, index=index)
expected = dgd.backends.hash_object_cudf(obj_multi, index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
@pytest.mark.parametrize(
"index",
[
"int8",
"int32",
"int64",
"float64",
"strings",
"cats",
"time_s",
"time_ms",
"time_ns",
["int32", "int64"],
["int8", "float64", "strings"],
["cats", "int8", "float64"],
["time_ms", "cats"],
],
)
def test_make_meta_backends(index):
dtypes = ["int8", "int32", "int64", "float64"]
df = cudf.DataFrame(
{dt: np.arange(start=0, stop=3, dtype=dt) for dt in dtypes}
)
df["strings"] = ["cat", "dog", "fish"]
df["cats"] = df["strings"].astype("category")
df["time_s"] = np.array(
["2018-10-07", "2018-10-08", "2018-10-09"], dtype="datetime64[s]"
)
df["time_ms"] = df["time_s"].astype("datetime64[ms]")
df["time_ns"] = df["time_s"].astype("datetime64[ns]")
df = df.set_index(index)
# Check "empty" metadata types
chk_meta = make_meta(df)
dd.assert_eq(chk_meta.dtypes, df.dtypes)
# Check "non-empty" metadata types
chk_meta_nonempty = meta_nonempty(df)
dd.assert_eq(chk_meta.dtypes, chk_meta_nonempty.dtypes)
# Check dask code path if not MultiIndex
if not isinstance(df.index, cudf.MultiIndex):
ddf = dgd.from_cudf(df, npartitions=1)
# Check "empty" metadata types
dd.assert_eq(ddf._meta.dtypes, df.dtypes)
# Check "non-empty" metadata types
dd.assert_eq(ddf._meta.dtypes, ddf._meta_nonempty.dtypes)
@pytest.mark.parametrize(
"data",
[
pd.Series([], dtype="float64"),
pd.DataFrame({"abc": [], "xyz": []}),
pd.Series([1, 2, 10, 11]),
pd.DataFrame({"abc": [1, 2, 10, 11], "xyz": [100, 12, 120, 1]}),
],
)
def test_dataframe_series_replace(data):
pdf = data.copy()
gdf = cudf.from_pandas(pdf)
ddf = dgd.from_cudf(gdf, npartitions=5)
dd.assert_eq(ddf.replace(1, 2), pdf.replace(1, 2))
def test_dataframe_assign_col():
df = cudf.DataFrame(list(range(100)))
pdf = pd.DataFrame(list(range(100)))
ddf = dgd.from_cudf(df, npartitions=4)
ddf["fold"] = 0
ddf["fold"] = ddf["fold"].map_partitions(
lambda cudf_df: cp.random.randint(0, 4, len(cudf_df))
)
pddf = dd.from_pandas(pdf, npartitions=4)
pddf["fold"] = 0
pddf["fold"] = pddf["fold"].map_partitions(
lambda p_df: np.random.randint(0, 4, len(p_df))
)
dd.assert_eq(ddf[0], pddf[0])
dd.assert_eq(len(ddf["fold"]), len(pddf["fold"]))
def test_dataframe_set_index():
random.seed(0)
df = cudf.datasets.randomdata(26, dtypes={"a": float, "b": int})
df["str"] = list("abcdefghijklmnopqrstuvwxyz")
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
ddf = ddf.set_index("str")
pddf = dd.from_pandas(pdf, npartitions=4)
pddf = pddf.set_index("str")
from cudf.tests.utils import assert_eq
assert_eq(ddf.compute(), pddf.compute())
def test_series_describe():
random.seed(0)
sr = cudf.datasets.randomdata(20)["x"]
psr = sr.to_pandas()
dsr = dgd.from_cudf(sr, npartitions=4)
pdsr = dd.from_pandas(psr, npartitions=4)
dd.assert_eq(
dsr.describe(), pdsr.describe(), check_less_precise=3,
)
def test_dataframe_describe():
random.seed(0)
df = cudf.datasets.randomdata(20)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(
ddf.describe(), pddf.describe(), check_exact=False, atol=0.0001
)
def test_zero_std_describe():
num = 84886781
df = cudf.DataFrame(
{
"x": np.full((20,), num, dtype=np.float64),
"y": np.full((20,), num, dtype=np.float64),
}
)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(ddf.describe(), pddf.describe(), check_less_precise=3)
def test_large_numbers_var():
num = 8488678001
df = cudf.DataFrame(
{
"x": np.arange(num, num + 1000, dtype=np.float64),
"y": np.arange(num, num + 1000, dtype=np.float64),
}
)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(ddf.var(), pddf.var(), check_less_precise=3)
def test_index_map_partitions():
# https://github.com/rapidsai/cudf/issues/6738
ddf = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2)
mins_pd = ddf.index.map_partitions(M.min, meta=ddf.index).compute()
gddf = dgd.from_cudf(cudf.DataFrame({"a": range(10)}), npartitions=2)
mins_gd = gddf.index.map_partitions(M.min, meta=gddf.index).compute()
dd.assert_eq(mins_pd, mins_gd)
| 28.574359 | 79 | 0.611181 |
af677f2bdf1ae299ac4d230ae4febad9e54e8c40
| 1,280 |
py
|
Python
|
setup.py
|
Guillemdb/flake8-per-file-ignores
|
ab0d3d44764ea834211ca2806bef8694c80cab87
|
[
"MIT"
] | null | null | null |
setup.py
|
Guillemdb/flake8-per-file-ignores
|
ab0d3d44764ea834211ca2806bef8694c80cab87
|
[
"MIT"
] | null | null | null |
setup.py
|
Guillemdb/flake8-per-file-ignores
|
ab0d3d44764ea834211ca2806bef8694c80cab87
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as file:
long_description = file.read()
setup(
name='flake8-per-file-ignores',
version='0.8.1',
url='https://github.com/snoack/flake8-per-file-ignores',
description='Ignore individual error codes per file with flake8',
long_description=long_description,
long_description_content_type='text/markdown',
author='Sebastian Noack',
author_email='[email protected]',
py_modules=['flake8_per_file_ignores'],
install_requires=[
'flake8>=3,<4',
'pathmatch'
],
entry_points={
'flake8.extension': [
'per-file-ignores = flake8_per_file_ignores:PerFileIgnores',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
]
)
| 32.820513 | 72 | 0.635938 |
40ebf7a61213eb6b6e82d64ba98fb1130fc2087a
| 13,670 |
py
|
Python
|
Tests/ML/util.py
|
johngulliver/InnerEye-DeepLearning
|
5b7d57120928483837ae0ef021f35932efac775d
|
[
"MIT"
] | null | null | null |
Tests/ML/util.py
|
johngulliver/InnerEye-DeepLearning
|
5b7d57120928483837ae0ef021f35932efac775d
|
[
"MIT"
] | null | null | null |
Tests/ML/util.py
|
johngulliver/InnerEye-DeepLearning
|
5b7d57120928483837ae0ef021f35932efac775d
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import torch
from PIL import Image
from azureml.core import Workspace
from InnerEye.Azure.azure_config import AzureConfig
from InnerEye.Common import fixed_paths
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.Common.type_annotations import PathOrString, TupleInt3
from InnerEye.ML.dataset.full_image_dataset import PatientDatasetSource
from InnerEye.ML.dataset.sample import PatientMetadata, Sample
from InnerEye.ML.deep_learning_config import DeepLearningConfig
from InnerEye.ML.lightning_base import InnerEyeContainer
from InnerEye.ML.lightning_container import LightningContainer
from InnerEye.ML.lightning_loggers import StoringLogger
from InnerEye.ML.model_training import model_train
from InnerEye.ML.photometric_normalization import PhotometricNormalization
from InnerEye.ML.run_ml import MLRunner
from InnerEye.ML.runner import Runner
from InnerEye.ML.utils import io_util
from InnerEye.ML.utils.checkpoint_handling import CheckpointHandler
from InnerEye.ML.utils.config_loader import ModelConfigLoader
from InnerEye.ML.utils.io_util import ImageHeader, ImageWithHeader
from InnerEye.ML.utils.ml_util import is_gpu_available
TEST_CHANNEL_IDS = ["channel1", "channel2"]
TEST_MASK_ID = "mask"
TEST_GT_ID = "region"
machine_has_gpu = is_gpu_available()
no_gpu_available = not machine_has_gpu
def create_dataset_csv_file(csv_string: str, dst: Path) -> Path:
"""Creates a dataset.csv in the destination path from the csv_string provided"""
(dst / "dataset.csv").write_text(csv_string)
return Path(dst)
def content_mismatch(actual: Any, expected: Any) -> str:
"""Returns error message for content mismatch."""
return "Content mismatch. \nActual:\n {}\nExpected:\n {}".format(actual, expected)
def get_nifti_shape(full_path: PathOrString) -> TupleInt3:
"""Returns the size of the image in the given Nifti file, as an (X, Y, Z) tuple."""
image_with_header = io_util.load_nifti_image(full_path)
return get_image_shape(image_with_header)
def get_image_shape(image_with_header: ImageWithHeader) -> TupleInt3:
return image_with_header.image.shape[0], image_with_header.image.shape[1], image_with_header.image.shape[2]
def load_train_and_test_data_channels(patient_ids: List[int],
normalization_fn: PhotometricNormalization) -> List[Sample]:
if np.any(np.asarray(patient_ids) <= 0):
raise ValueError("data_items must be >= 0")
file_name = lambda k, y: full_ml_test_data_path("train_and_test_data") / f"id{k}_{y}.nii.gz"
get_sample = lambda z: io_util.load_images_from_dataset_source(dataset_source=PatientDatasetSource(
metadata=PatientMetadata(patient_id=z),
image_channels=[file_name(z, c) for c in TEST_CHANNEL_IDS],
mask_channel=file_name(z, TEST_MASK_ID),
ground_truth_channels=[file_name(z, TEST_GT_ID)],
allow_incomplete_labels=False
))
samples = []
for x in patient_ids:
sample = get_sample(x)
sample = Sample(image=normalization_fn.transform(sample.image, sample.mask),
mask=sample.mask,
labels=sample.labels,
metadata=sample.metadata)
samples.append(sample)
return samples
def assert_file_contains_string(full_file: Union[str, Path], expected: Any = None) -> None:
"""
Checks if the given file contains an expected string
:param full_file: The path to the file.
:param expected: The expected contents of the file, as a string.
"""
logging.info("Checking file {}".format(full_file))
file_path = full_file if isinstance(full_file, Path) else Path(full_file)
assert_file_exists(file_path)
if expected is not None:
assert expected.strip() in file_path.read_text()
def assert_text_files_match(full_file: Path, expected_file: Path) -> None:
"""
Checks line by line (ignoring leading and trailing spaces) if the given two files contains the exact same strings
:param full_file: The path to the file.
:param expected_file: The expected file.
"""
with full_file.open() as f1, expected_file.open() as f2:
for line1, line2 in zip(f1, f2):
_assert_line(line1, line2)
def _assert_line(actual: str, expected: str) -> None:
actual = actual.strip()
expected = expected.strip()
assert actual == expected, content_mismatch(actual, expected)
def assert_file_exists(file_path: Path) -> None:
"""
Checks if the given file exists.
"""
assert file_path.exists(), f"File does not exist: {file_path}"
def assert_nifti_content(full_file: PathOrString,
expected_shape: TupleInt3,
expected_header: ImageHeader,
expected_values: List[int],
expected_type: type) -> None:
"""
Checks if the given nifti file contains the expected unique values, and has the expected type and shape.
:param full_file: The path to the file.
:param expected_shape: The expected shape of the image in the nifti file.
:param expected_header: the expected image header
:param expected_values: The expected unique values in the image array.
:param expected_type: The expected type of the stored values.
"""
if isinstance(full_file, str):
full_file = Path(full_file)
assert_file_exists(full_file)
image_with_header = io_util.load_nifti_image(full_file, None)
assert image_with_header.image.shape == expected_shape, content_mismatch(image_with_header.image.shape,
expected_shape)
assert image_with_header.image.dtype == np.dtype(expected_type), content_mismatch(image_with_header.image.dtype,
expected_type)
image = np.unique(image_with_header.image).tolist()
assert image == expected_values, content_mismatch(image, expected_values)
assert image_with_header.header == expected_header
def assert_tensors_equal(t1: torch.Tensor, t2: Union[torch.Tensor, List], abs: float = 1e-6) -> None:
"""
Checks if the shapes of the given tensors is equal, and the values are approximately equal, with a given
absolute tolerance.
"""
if isinstance(t2, list):
t2 = torch.tensor(t2)
assert t1.shape == t2.shape, "Shapes must match"
# Alternative is to use torch.allclose here, but that method also checks that datatypes match. This makes
# writing the test cases more cumbersome.
v1 = t1.flatten().tolist()
v2 = t2.flatten().tolist()
assert v1 == pytest.approx(v2, abs=abs), f"Tensor elements don't match with tolerance {abs}: {v1} != {v2}"
def assert_binary_files_match(actual_file: Path, expected_file: Path) -> None:
"""
Checks if two files contain exactly the same bytes. If PNG files mismatch, additional diagnostics is printed.
"""
# Uncomment this line to batch-update all result files that use this assert function
# expected_file.write_bytes(actual_file.read_bytes())
assert_file_exists(actual_file)
assert_file_exists(expected_file)
actual = actual_file.read_bytes()
expected = expected_file.read_bytes()
if actual == expected:
return
if actual_file.suffix == ".png" and expected_file.suffix == ".png":
actual_image = Image.open(actual_file)
expected_image = Image.open(expected_file)
actual_size = actual_image.size
expected_size = expected_image.size
assert actual_size == expected_size, f"Image sizes don't match: actual {actual_size}, expected {expected_size}"
assert np.allclose(np.array(actual_image), np.array(expected_image)), "Image pixel data does not match."
assert False, f"File contents does not match: len(actual)={len(actual)}, len(expected)={len(expected)}"
def csv_column_contains_value(
csv_file_path: Path,
column_name: str,
value: Any,
contains_only_value: bool = True) -> bool:
"""
Checks that the column in the csv file contains the given value (and perhaps only contains that value)
:param csv_file_path: The path to the CSV
:param column_name: The name of the column in which we look for the value
:param value: The value to look for
:param contains_only_value: Check that this is the only value in the column (default True)
:returns: Boolean, whether the CSV column contains the value (and perhaps only the value)
"""
result = True
if not csv_file_path.exists:
raise ValueError(f"The CSV at {csv_file_path} does not exist.")
df = pd.read_csv(csv_file_path)
if column_name not in df.columns:
ValueError(f"The column {column_name} is not in the CSV at {csv_file_path}, which has columns {df.columns}.")
if value:
result = result and value in df[column_name].unique()
else:
result = result and df[column_name].isnull().any()
if contains_only_value:
if value:
result = result and df[column_name].nunique(dropna=True) == 1
else:
result = result and df[column_name].nunique(dropna=True) == 0
return result
DummyPatientMetadata = PatientMetadata(patient_id='42')
def get_model_loader(namespace: Optional[str] = None) -> ModelConfigLoader:
"""
Returns a ModelConfigLoader for segmentation models, with the given non-default namespace (if not None)
to search under.
"""
return ModelConfigLoader(model_configs_namespace=namespace)
def get_default_azure_config() -> AzureConfig:
"""
Gets the Azure-related configuration options, using the default settings file settings.yaml.
"""
return AzureConfig.from_yaml(yaml_file_path=fixed_paths.SETTINGS_YAML_FILE,
project_root=fixed_paths.repository_root_directory())
def get_default_checkpoint_handler(model_config: DeepLearningConfig, project_root: Path) -> CheckpointHandler:
"""
Gets a checkpoint handler, using the given model config and the default azure configuration.
"""
azure_config = get_default_azure_config()
lightning_container = InnerEyeContainer(model_config)
return CheckpointHandler(azure_config=azure_config,
container=lightning_container,
project_root=project_root)
def get_default_workspace() -> Workspace:
"""
Gets the project's default AzureML workspace.
:return:
"""
return get_default_azure_config().get_workspace()
def model_train_unittest(config: Optional[DeepLearningConfig],
dirs: OutputFolderForTests,
checkpoint_handler: Optional[CheckpointHandler] = None,
lightning_container: Optional[LightningContainer] = None) -> \
Tuple[StoringLogger, CheckpointHandler]:
"""
A shortcut for running model training in the unit test suite. It runs training for the given config, with the
default checkpoint handler initialized to point to the test output folder specified in dirs.
:param config: The configuration of the model to train.
:param dirs: The test fixture that provides an output folder for the test.
:param lightning_container: An optional LightningContainer object that will be pass through to the training routine.
:param checkpoint_handler: The checkpoint handler that should be used for training. If not provided, it will be
created via get_default_checkpoint_handler.
:return: Tuple[StoringLogger, CheckpointHandler]
"""
runner = MLRunner(model_config=config, container=lightning_container)
# Setup will set random seeds before model creation, and set the model in the container.
# It will also set random seeds correctly. Later we use so initialized container.
# For all tests running in AzureML, we need to skip the downloading of datasets that would otherwise happen,
# because all unit test configs come with their own local dataset already.
runner.setup()
if checkpoint_handler is None:
azure_config = get_default_azure_config()
checkpoint_handler = CheckpointHandler(azure_config=azure_config,
container=runner.container,
project_root=dirs.root_dir)
_, storing_logger = model_train(checkpoint_path=checkpoint_handler.get_recovery_or_checkpoint_path_train(),
container=runner.container)
checkpoint_handler.additional_training_done()
return storing_logger, checkpoint_handler # type: ignore
def default_runner() -> Runner:
"""
Create an InnerEye Runner object with the default settings, pointing to the repository root and
default settings files.
"""
return Runner(project_root=fixed_paths.repository_root_directory(),
yaml_config_file=fixed_paths.SETTINGS_YAML_FILE)
model_loader_including_tests = get_model_loader(namespace="Tests.ML.configs")
| 44.967105 | 120 | 0.701317 |
aa1dbca2c2a5c1bff89f21d8af60b7b9c32f6908
| 452 |
py
|
Python
|
data/scripts/templates/object/static/item/shared_wp_mle_lance_staff_wood_s02.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/static/item/shared_wp_mle_lance_staff_wood_s02.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/static/item/shared_wp_mle_lance_staff_wood_s02.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_wp_mle_lance_staff_wood_s02.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.588235 | 78 | 0.730088 |
56708965d6fec056a956b940dd9b227f3d8fbfd4
| 8,820 |
py
|
Python
|
dist_train/workers/baseline.py
|
victorcampos7/edl
|
ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0
|
[
"MIT"
] | 30 |
2020-02-16T15:52:59.000Z
|
2022-03-22T10:54:54.000Z
|
dist_train/workers/baseline.py
|
imatge-upc/edl
|
ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0
|
[
"MIT"
] | null | null | null |
dist_train/workers/baseline.py
|
imatge-upc/edl
|
ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0
|
[
"MIT"
] | 7 |
2020-02-16T15:53:05.000Z
|
2022-01-18T03:41:03.000Z
|
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import os
import json
import time
import torch
import numpy as np
from dist_train.utils.shared_optim import SharedAdam as Adam
from dist_train.workers.base import EpisodicOffPolicyManager, OffPolicyManager, OnPolicyManager, PPOManager
class EpisodicOffPolicy(EpisodicOffPolicyManager):
def rollout_wrapper(self, c_ep_counter):
st = time.time()
self.agent_model.play_episode()
# Add episode for training.
self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True))
dur = time.time() - st
# Calculate losses to allow dense logging
episode_stats = self.agent_model.episode_summary()
self._log_rollout(c_ep_counter, dur, episode_stats)
def _log_rollout(self, c_ep_counter, dur, episode_stats):
# Increment the steps counters, place the log in the epoch buffer, and give a quick rollout print
c_ep_counter += 1
self.time_keeper['n_rounds'] += 1
n_steps = int(self.agent_model.train_steps.data.item()) + int(c_ep_counter.item())
timestamp = ''.join('{:017.4f}'.format(time.time()).split('.'))
log = {'{:d}.{}'.format(n_steps, timestamp): [str(sl) for sl in episode_stats]}
self.epoch_buffer.append(log)
dense_save = False # (int(self.time_keeper['n_rounds']) % self.settings.ep_save) == 0 and self.rank == 0
log_str = '{:10d} - {} {:6d} Dur = {:6.2f}, Steps = {:3d} {} {}'.format(
n_steps,
'*' if dense_save else ' ',
int(self.time_keeper['n_rounds']),
dur,
int(self.agent_model.n_steps),
'!!!' if int(self.agent_model.was_success) else ' ',
'*' if dense_save else ' '
)
self.logger.info(log_str)
def eval_wrapper(self):
stats = []
episodes = {}
for evi in range(self.config.get('eval_iters', 10)):
self.agent_model.play_episode(do_eval=self.config.get('greedy_eval', True))
ep_stats = [float(x) for x in self.agent_model.episode_summary()]
stats.append(ep_stats)
dump_ep = []
for t in self.agent_model.curr_ep:
dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()}
dump_ep.append(dump_t)
episodes[evi] = dump_ep
return stats, episodes
class OffPolicy(OffPolicyManager):
def env_transitions_wrapper(self, c_step_counter, num_transitions):
# Collect transitions and update counter
self.agent_model.collect_transitions(num_transitions, skip_im_rew=True)
c_step_counter += num_transitions
# Add episode for training
self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True))
def eval_wrapper(self):
stats = []
episodes = {}
for evi in range(self.config.get('eval_iters', 10)):
self.agent_model.play_episode(do_eval=self.config.get('greedy_eval', True))
ep_stats = [float(x) for x in self.agent_model.episode_summary()]
stats.append(ep_stats)
dump_ep = []
for t in self.agent_model.curr_ep:
dump_t = {k: np.array(v.cpu().detach()).tolist() for k, v in t.items()}
dump_ep.append(dump_t)
episodes[evi] = dump_ep
return stats, episodes
class HierarchicalEpisodicOffPolicy(EpisodicOffPolicy):
def __init__(self, rank, config, settings):
super().__init__(rank, config, settings)
self.optim_lo_path = os.path.join(self.exp_dir, 'optim_lo.pth.tar')
self.optim_lo = Adam(self.agent_model._lo_parameters, lr=config['learning_rate'])
if os.path.isfile(self.optim_lo_path):
self.optim_lo.load_state_dict(torch.load(self.optim_lo_path))
def checkpoint(self):
super().checkpoint()
torch.save(self.optim_lo, self.optim_lo_path)
def rollout_wrapper(self, c_ep_counter):
st = time.time()
self.agent_model.play_episode(optim_lo=self.optim_lo)
self.agent_model.relabel_episode()
# Add episode for training.
self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True))
dur = time.time() - st
# Calculate losses to allow dense logging
episode_stats = self.agent_model.episode_summary()
self._log_rollout(c_ep_counter, dur, episode_stats)
class OnPolicy(OnPolicyManager):
def rollout_wrapper(self, c_ep_counter):
st = time.time()
self.agent_model.eval()
self.agent_model.play_episode()
self.agent_model.train()
loss = self.condense_loss(self.agent_model())
dur = time.time() - st
# Calculate losses to allow dense logging
episode_stats = self.agent_model.episode_summary()
self._log_rollout(c_ep_counter, dur, episode_stats)
return loss
def _log_rollout(self, c_ep_counter, dur, episode_stats):
c_ep_counter += 1
n_steps = int(self.agent_model.train_steps.data.item()) + int(c_ep_counter.item())
timestamp = ''.join('{:017.4f}'.format(time.time()).split('.'))
dense_save = False # (int(self.time_keeper['n_rounds']) % self.settings.ep_save) == 0 and self.rank == 0
# The burden to save falls to us
if dense_save:
dstr = '{:010d}.{}'.format(n_steps, timestamp)
config_path = self.settings.config_path
exp_name = config_path.split('/')[-1][:-5]
exp_dir = os.path.join(self.settings.log_dir, exp_name)
c_path = os.path.join(exp_dir, dstr + '.json')
dump_ep = []
for t in self.agent_model.curr_ep:
dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()}
dump_ep.append(dump_t)
with open(c_path, 'wt') as f:
json.dump(dump_ep, f)
self.time_keeper['ep_save'] = int(self.time_keeper['n_rounds'])
# Increment the steps counters and log the results.
self.time_keeper['n_rounds'] += 1
hist_name = 'hist_{}.json'.format(self.rank)
with open(os.path.join(self.exp_dir, hist_name), 'a') as save_file:
log = {'{:d}.{}'.format(n_steps, timestamp): [str(sl) for sl in episode_stats]}
save_file.write(json.dumps(log))
save_file.close()
log_str = '{:10d} - {} {:6d} Dur = {:6.2f}, Steps = {:3d} {} {}'.format(
n_steps,
'*' if dense_save else ' ',
int(self.time_keeper['n_rounds']),
dur,
int(self.agent_model.n_steps),
'!!!' if int(self.agent_model.was_success) else ' ',
'*' if dense_save else ' '
)
self.logger.info(log_str)
def eval_wrapper(self):
stats = []
episodes = {}
for evi in range(self.config.get('eval_iters', 10)):
self.agent_model.play_episode(do_eval=bool(self.config.get('greedy_eval', True)))
ep_stats = [float(x) for x in self.agent_model.episode_summary()]
stats.append(ep_stats)
dump_ep = []
for t in self.agent_model.curr_ep:
dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()}
dump_ep.append(dump_t)
episodes[evi] = dump_ep
return stats, episodes
class PPO(PPOManager, OnPolicy):
def rollout_wrapper(self, c_ep_counter):
st = time.time()
self.agent_model.reach_horizon()
dur = time.time() - st
# Calculate losses to allow dense logging
episode_stats = self.agent_model.episode_summary()
self._log_rollout(c_ep_counter, dur, episode_stats)
class HierarchicalPPO(PPO):
def __init__(self, rank, config, settings):
super().__init__(rank, config, settings)
self.optim_lo_path = os.path.join(self.exp_dir, 'optim_lo.pth.tar')
self.optim_lo = Adam(self.agent_model._lo_parameters, lr=config['learning_rate'])
if os.path.isfile(self.optim_lo_path):
self.optim_lo.load_state_dict(torch.load(self.optim_lo_path))
def checkpoint(self):
super().checkpoint()
torch.save(self.optim_lo, self.optim_lo_path)
def rollout_wrapper(self, c_ep_counter):
st = time.time()
self.agent_model.reach_horizon(optim_lo=self.optim_lo)
dur = time.time() - st
# Calculate losses to allow dense logging
episode_stats = self.agent_model.episode_summary()
self._log_rollout(c_ep_counter, dur, episode_stats)
| 37.372881 | 113 | 0.625057 |
62465d23caea9a55d617b43228f68bc11060176f
| 5,668 |
py
|
Python
|
models/rank/fnn/fm_model.py
|
michaelwang123/PaddleRec
|
4feb0a7f962e918bdfa4f7289a9ddfd08d459824
|
[
"Apache-2.0"
] | 3 |
2020-09-22T02:46:30.000Z
|
2021-06-17T06:43:37.000Z
|
models/rank/fnn/fm_model.py
|
hzj1558718/PaddleRec
|
927e363e42ab55dd2961b0fbbb23c2578d289105
|
[
"Apache-2.0"
] | null | null | null |
models/rank/fnn/fm_model.py
|
hzj1558718/PaddleRec
|
927e363e42ab55dd2961b0fbbb23c2578d289105
|
[
"Apache-2.0"
] | 1 |
2021-01-11T06:31:36.000Z
|
2021-01-11T06:31:36.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import paddle.fluid as fluid
from paddlerec.core.utils import envs
from paddlerec.core.model import ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_fleet_mode().upper(
) == "PSLIB" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number", None)
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim", None)
self.reg = envs.get_global_env("hyper_parameters.reg", 1e-4)
self.num_field = envs.get_global_env("hyper_parameters.num_field",
None)
def net(self, inputs, is_infer=False):
raw_feat_idx = self._sparse_data_var[1] # (batch_size * num_field) * 1
raw_feat_value = self._dense_data_var[0] # batch_size * num_field
self.label = self._sparse_data_var[0] # batch_size * 1
init_value_ = 0.1
feat_idx = raw_feat_idx
feat_value = fluid.layers.reshape(
raw_feat_value,
[-1, self.num_field, 1]) # batch_size * num_field * 1
# ------------------------- first order term --------------------------
first_weights_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, 1],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0, scale=init_value_),
regularizer=fluid.regularizer.L1DecayRegularizer(self.reg))
) # (batch_size * num_field) * 1 * 1(embedding_size)
first_weights = fluid.layers.reshape(
first_weights_re,
shape=[-1, self.num_field, 1]) # batch_size * num_field * 1
y_first_order = fluid.layers.reduce_sum((first_weights * feat_value),
1) # batch_size * 1
b_linear = fluid.layers.create_parameter(
shape=[1],
dtype='float32',
default_initializer=fluid.initializer.ConstantInitializer(
value=0)) # 1
# ------------------------- second order term --------------------------
feat_embeddings_re = fluid.embedding(
input=feat_idx,
is_sparse=True,
is_distributed=self.is_distributed,
dtype='float32',
size=[self.sparse_feature_number + 1, self.sparse_feature_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.TruncatedNormalInitializer(
loc=0.0,
scale=init_value_ /
math.sqrt(float(self.sparse_feature_dim))))
) # (batch_size * num_field) * 1 * embedding_size
feat_embeddings = fluid.layers.reshape(
feat_embeddings_re,
shape=[-1, self.num_field, self.sparse_feature_dim
]) # batch_size * num_field * embedding_size
# batch_size * num_field * embedding_size
feat_embeddings = feat_embeddings * feat_value
# sum_square part
summed_features_emb = fluid.layers.reduce_sum(
feat_embeddings, 1) # batch_size * embedding_size
summed_features_emb_square = fluid.layers.square(
summed_features_emb) # batch_size * embedding_size
# square_sum part
squared_features_emb = fluid.layers.square(
feat_embeddings) # batch_size * num_field * embedding_size
squared_sum_features_emb = fluid.layers.reduce_sum(
squared_features_emb, 1) # batch_size * embedding_size
y_FM = 0.5 * fluid.layers.reduce_sum(
summed_features_emb_square - squared_sum_features_emb,
dim=1,
keep_dim=True) # batch_size * 1
# ------------------------- Predict --------------------------
self.predict = fluid.layers.sigmoid(b_linear + y_first_order + y_FM)
cost = fluid.layers.log_loss(
input=self.predict, label=fluid.layers.cast(self.label,
"float32")) # log_loss
avg_cost = fluid.layers.reduce_sum(cost)
self._cost = avg_cost
predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)
label_int = fluid.layers.cast(self.label, 'int64')
auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self._metrics["AUC"] = auc_var
self._metrics["BATCH_AUC"] = batch_auc_var
if is_infer:
self._infer_results["AUC"] = auc_var
| 42.298507 | 80 | 0.59933 |
44e845bf3c7c38511147b2005a66084db8612c19
| 3,327 |
py
|
Python
|
crossing_tree/processes/hermite.py
|
ivannz/crossing_paper2017
|
a33c826b966d0238b96156ec19f462d2f9ed7906
|
[
"MIT"
] | 1 |
2019-05-25T21:37:23.000Z
|
2019-05-25T21:37:23.000Z
|
crossing_tree/processes/hermite.py
|
ivannz/crossing_paper2017
|
a33c826b966d0238b96156ec19f462d2f9ed7906
|
[
"MIT"
] | null | null | null |
crossing_tree/processes/hermite.py
|
ivannz/crossing_paper2017
|
a33c826b966d0238b96156ec19f462d2f9ed7906
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""A module with the Hermite process generator.
"""
import numpy as np
from numpy.polynomial.hermite_e import hermeval
from sklearn.base import BaseEstimator as BaseGenerator
from .gaussian import FractionalGaussianNoise
class HermiteProcess(BaseGenerator):
r"""A derived class to produce sample paths of a Hermite process of order
`degree` with specified Hurst exponent (fractional integration parameter).
For the best performance `N * n_downsample` should be a power of two. The
hurst exponent for this process is
:math:`H = 1 + degree * (H_{\text{fgn}} - 1)`.
Returns a process sampled on :math:`0.0=t_0<t_1<\ldots<t_N=1.0` with equal
spacing given by :math:`N^{-1}`.
Details
-------
When downsampling parameter (`n_downsample`) tends to infinity the process,
converges in distribution to the Rosenblatt process or in general to a
Hermite process. This stems from the `non-central limit theorem`, i.e.
.. math :
Z^k(t) = \frac{1}{n^\alpha}
\sum_{j=1}^{\lfloor kt\rfloor} H(\xi_j)
\,,
converges to :math:`Z_\frac{\alpha}{2}(t)` -- a hermite process. Thus
increasing `n_downsample` gives better approximation.
In theory it should tend to infinity. This is a serious drawback.
c.f. [Abry, Pipiras; 2005]
"""
def __init__(self, N, degree=2, n_downsample=16, hurst=0.5,
random_state=None, n_threads=1):
self.random_state = random_state
self.n_threads = n_threads
self.N = N
self.hurst = hurst
self.degree = degree
self.n_downsample = n_downsample
def start(self):
"""Initialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
return
self.fgn_ = FractionalGaussianNoise(
N=self.n_downsample * self.N + 1,
hurst=1 - (1.0 - self.hurst) / self.degree,
sigma=1.0, random_state=self.random_state,
n_threads=self.n_threads)
self.fgn_.start()
# Define the order of the Hermite polynomial
self.hermite_coef_ = np.zeros(self.degree + 1, dtype=np.float)
self.hermite_coef_[self.degree] = 1.
self.initialized_ = True
def finish(self):
"""Deinitialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
self.initialized_ = False
self.fgn_.finish()
self.fgn_ = None
def draw(self):
"""Evaluate a hermite polynomial at the values of a fractional Gaussian
Noise with the specified hurst index. Then apply the renorm-group
transformation.
"""
if not(hasattr(self, "initialized_") and self.initialized_):
raise RuntimeError("The generator has not been initialized properly. "
"Please call `.start()` before calling `.draw()`.")
increments = hermeval(self.fgn_.draw(), self.hermite_coef_)
if self.n_downsample > 1:
values_ = increments.cumsum()[self.n_downsample-1::self.n_downsample]
else:
values_ = increments[:-1].cumsum()
values_ /= (self.fgn_.N - 1) ** self.fgn_.hurst
return np.linspace(0, 1, num=self.N + 1), np.r_[0, values_]
| 35.393617 | 82 | 0.626991 |
c9d9429064d533cd3918c0ac7c474c07b4d52340
| 892 |
py
|
Python
|
etc/bin/sort_json_file.py
|
petrosagg/google-apis-rs
|
5c071ba03c2c060ce0ff0e40fe12dd7cc9ba6aa3
|
[
"MIT-feh",
"Apache-2.0",
"MIT"
] | 663 |
2015-03-13T17:48:25.000Z
|
2022-03-28T17:57:24.000Z
|
etc/bin/sort_json_file.py
|
petrosagg/google-apis-rs
|
5c071ba03c2c060ce0ff0e40fe12dd7cc9ba6aa3
|
[
"MIT-feh",
"Apache-2.0",
"MIT"
] | 303 |
2015-03-12T06:35:25.000Z
|
2022-03-27T23:36:15.000Z
|
etc/bin/sort_json_file.py
|
petrosagg/google-apis-rs
|
5c071ba03c2c060ce0ff0e40fe12dd7cc9ba6aa3
|
[
"MIT-feh",
"Apache-2.0",
"MIT"
] | 106 |
2015-03-15T11:41:24.000Z
|
2022-03-08T09:12:48.000Z
|
#!/usr/bin/env python
# Sort JSON file and print in fixed format
# This lowers git diffs when a file has only changed by whitespace.
import argparse
import json
import os
def main(json_file_path, skip_missing_file):
if not os.path.isfile(json_file_path) and skip_missing_file:
return
with open(json_file_path, 'r') as fh:
loaded_json = json.load(fh)
with open(json_file_path, 'w') as fh:
json.dump(loaded_json, fh, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sort and format JSON file.')
parser.add_argument('json_file', metavar='FILE', type=str, help='JSON file to sort in place.')
parser.add_argument('--skip-missing-file', default=False, action='store_true', help='Do not fail on missing file.')
args = parser.parse_args()
main(args.json_file, args.skip_missing_file)
| 29.733333 | 119 | 0.714126 |
27c52b09f147502954ae110aaade91b25ccbf4b8
| 2,380 |
py
|
Python
|
beer/cli/subcommands/hmm/train.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | null | null | null |
beer/cli/subcommands/hmm/train.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | null | null | null |
beer/cli/subcommands/hmm/train.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | null | null | null |
'train a HMM based model'
import argparse
import pickle
import sys
import beer
def setup(parser):
parser.add_argument('-b', '--batch-size', type=int, default=-1,
help='batch size in number of utterance ' \
'(-1 means all the utterances as one batch)')
parser.add_argument('-e', '--epochs', type=int, default=1,
help='number of epochs')
parser.add_argument('-l', '--lrate', type=float, default=1.,
help='learning rate')
parser.add_argument('model', help='hmm based model')
parser.add_argument('dataset', help='training data set')
parser.add_argument('out', help='phone loop model')
def main(args, logger):
logger.debug('load the model')
with open(args.model, 'rb') as f:
model = pickle.load(f)
logger.debug('load the dataset')
with open(args.dataset, 'rb') as f:
dataset = pickle.load(f)
logger.debug('create the optimizer')
optim = beer.BayesianModelOptimizer(model.mean_field_factorization(),
lrate=args.lrate)
for epoch in range(1, args.epochs + 1):
elbo = beer.evidence_lower_bound(datasize=dataset.size)
optim.init_step()
for i, utt in enumerate(dataset.utterances(), start=1):
logger.debug(f'processing utterance: {utt.id}')
elbo += beer.evidence_lower_bound(model, utt.features,
datasize=dataset.size)
# Update the model after N utterances.
if i % args.batch_size == 0:
elbo.backward()
optim.step()
logger.info(f'{"epoch=" + str(epoch):<20} ' \
f'{"batch=" + str(i // args.batch_size) + "/" + str(int(len(dataset) / args.batch_size)):<20} ' \
f'{"ELBO=" + str(round(float(elbo) / (args.batch_size * dataset.size), 3)):<20}')
elbo = beer.evidence_lower_bound(datasize=dataset.size)
optim.init_step()
logger.debug('save the model on disk...')
with open(args.out, 'wb') as f:
pickle.dump(model, f)
logger.info(f'finished training after {args.epochs} epochs. ' \
f'KL(q || p) = {float(model.kl_div_posterior_prior()): .3f}')
if __name__ == "__main__":
main()
| 36.615385 | 125 | 0.560924 |
505b630f4b580015d8080ff830262c1e2e2bb743
| 4,941 |
py
|
Python
|
test/master/test_scheduler.py
|
BenjaminHamon/BuildService
|
2ca12f9ae74e9cbf732229849f6cd6d13f40151a
|
[
"MIT"
] | 2 |
2021-01-28T15:56:50.000Z
|
2021-03-02T06:27:09.000Z
|
test/master/test_scheduler.py
|
BenjaminHamon/BuildService
|
2ca12f9ae74e9cbf732229849f6cd6d13f40151a
|
[
"MIT"
] | null | null | null |
test/master/test_scheduler.py
|
BenjaminHamon/BuildService
|
2ca12f9ae74e9cbf732229849f6cd6d13f40151a
|
[
"MIT"
] | null | null | null |
# pylint: disable = protected-access
""" Unit tests for JobScheduler """
import pytest
from bhamon_orchestra_master.job_scheduler import JobScheduler
from bhamon_orchestra_master.supervisor import Supervisor
from bhamon_orchestra_master.worker import Worker
from bhamon_orchestra_model.database.memory_database_client import MemoryDatabaseClient
from bhamon_orchestra_model.run_provider import RunProvider
from ..fakes.fake_date_time_provider import FakeDateTimeProvider
def test_abort_run_pending():
""" Test aborting a pending run """
database_client_instance = MemoryDatabaseClient()
date_time_provider_instance = FakeDateTimeProvider()
run_provider_instance = RunProvider(None, date_time_provider_instance)
supervisor_instance = Supervisor(None, None, None, None, None, None)
job_scheduler_instance = JobScheduler(lambda: database_client_instance, None, run_provider_instance, None, supervisor_instance, None, date_time_provider_instance)
job = { "project": "examples", "identifier": "empty" }
run = run_provider_instance.create(database_client_instance, job["project"], job["identifier"], {}, None)
assert run["status"] == "pending"
assert len(supervisor_instance._active_workers) == 0
with pytest.raises(ValueError):
job_scheduler_instance.abort_run(run)
assert run["status"] == "pending"
def test_abort_run_running_connected():
""" Test aborting an in progress run on a connected worker """
database_client_instance = MemoryDatabaseClient()
date_time_provider_instance = FakeDateTimeProvider()
run_provider_instance = RunProvider(None, date_time_provider_instance)
worker_instance = Worker("worker_test", None, lambda: database_client_instance, run_provider_instance, None)
supervisor_instance = Supervisor(None, None, None, None, None, None)
job_scheduler_instance = JobScheduler(lambda: database_client_instance, None, run_provider_instance, None, supervisor_instance, None, date_time_provider_instance)
supervisor_instance._active_workers[worker_instance.identifier] = worker_instance
job = { "project": "examples", "identifier": "empty" }
run = run_provider_instance.create(database_client_instance, job["project"], job["identifier"], {}, None)
worker_instance.assign_run(job, run)
run_provider_instance.update_status(database_client_instance, run, status = "running")
assert run["status"] == "running"
assert run["worker"] == worker_instance.identifier
assert len(supervisor_instance._active_workers) == 1
assert len(worker_instance.executors) == 1
operation_result = job_scheduler_instance.abort_run(run)
assert operation_result is True
assert run["status"] == "running"
assert worker_instance.executors[0]["should_abort"] is True
def test_abort_run_running_disconnected():
""" Test aborting an in progress run on a disconnected worker """
database_client_instance = MemoryDatabaseClient()
date_time_provider_instance = FakeDateTimeProvider()
run_provider_instance = RunProvider(None, date_time_provider_instance)
worker_instance = Worker("worker_test", None, lambda: database_client_instance, run_provider_instance, None)
supervisor_instance = Supervisor(None, None, None, None, None, None)
job_scheduler_instance = JobScheduler(lambda: database_client_instance, None, run_provider_instance, None, supervisor_instance, None, date_time_provider_instance)
job = { "project": "examples", "identifier": "empty" }
run = run_provider_instance.create(database_client_instance, job["project"], job["identifier"], {}, None)
worker_instance.assign_run(job, run)
run_provider_instance.update_status(database_client_instance, run, status = "running")
assert run["status"] == "running"
assert run["worker"] == worker_instance.identifier
assert len(supervisor_instance._active_workers) == 0
assert len(worker_instance.executors) == 1
operation_result = job_scheduler_instance.abort_run(run)
assert operation_result is False
assert run["status"] == "running"
assert worker_instance.executors[0]["should_abort"] is False
def test_abort_run_completed():
""" Test aborting a completed run """
database_client_instance = MemoryDatabaseClient()
date_time_provider_instance = FakeDateTimeProvider()
run_provider_instance = RunProvider(None, date_time_provider_instance)
supervisor_instance = Supervisor(None, None, None, None, None, None)
job_scheduler_instance = JobScheduler(lambda: database_client_instance, None, run_provider_instance, None, supervisor_instance, None, date_time_provider_instance)
job = { "project": "examples", "identifier": "empty" }
run = run_provider_instance.create(database_client_instance, job["project"], job["identifier"], {}, None)
run_provider_instance.update_status(database_client_instance, run, status = "succeeded")
assert run["status"] == "succeeded"
assert len(supervisor_instance._active_workers) == 0
with pytest.raises(ValueError):
job_scheduler_instance.abort_run(run)
assert run["status"] == "succeeded"
| 43.725664 | 163 | 0.801255 |
c3da31ca2e9813355c9dbb139f8e0353c7c3ab47
| 325 |
py
|
Python
|
expjobs/status.py
|
omangin/joblib
|
b1ce3b5e16d93bff42c65007b623d5091716771f
|
[
"BSD-3-Clause"
] | 1 |
2020-01-19T04:01:38.000Z
|
2020-01-19T04:01:38.000Z
|
expjobs/status.py
|
omangin/joblib
|
b1ce3b5e16d93bff42c65007b623d5091716771f
|
[
"BSD-3-Clause"
] | null | null | null |
expjobs/status.py
|
omangin/joblib
|
b1ce3b5e16d93bff42c65007b623d5091716771f
|
[
"BSD-3-Clause"
] | null | null | null |
class Status(tuple):
def __new__(_cls, i, n):
return tuple.__new__(_cls, (i, n))
def __repr__(self):
return str(self[1])
NOT_READY = Status(0, 'not-ready')
READY = Status(1, 'ready')
QUEUED = Status(2, 'queued')
RUNNING = Status(3, 'running')
FAILED = Status(4, 'failed')
DONE = Status(5, 'done')
| 20.3125 | 42 | 0.615385 |
bc64192e65d689a1abb353039fa512f10296c929
| 1,235 |
py
|
Python
|
models/training_2L.py
|
ElisaNguyen/tsa-explanations
|
0e9db51817c58fa5a09d25d798e20d67cb575a01
|
[
"MIT"
] | null | null | null |
models/training_2L.py
|
ElisaNguyen/tsa-explanations
|
0e9db51817c58fa5a09d25d798e20d67cb575a01
|
[
"MIT"
] | null | null | null |
models/training_2L.py
|
ElisaNguyen/tsa-explanations
|
0e9db51817c58fa5a09d25d798e20d67cb575a01
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import random
import sys
import numpy as np
import pandas as pd
import torch
random.seed(123)
torch.manual_seed(123)
np.random.seed(123)
dtype = torch.float
# Check whether a GPU is available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
from CoreSNN import *
"""### Import data"""
dataset = load_obj('../data/dataset900.pkl')
X_train = dataset['X_train']
y_train = dataset['y_train']
X_val = dataset['X_val']
y_val = dataset['y_val']
X_test = dataset['X_test']
y_test = dataset['y_test']
"""### Setup of the spiking network model"""
hyperparams = load_obj('best_params_2L.pkl')
print(hyperparams)
hyperparams['nb_hiddens'] = [hyperparams['nb_hidden']]
nb_inputs = 14
nb_outputs = 11
nb_layers = 2
max_time = 900
TwoLayerSNN = SNN(hyperparams=hyperparams,
nb_inputs=nb_inputs,
nb_outputs=nb_outputs,
nb_layers=nb_layers,
max_time=max_time)
"""## Training the network"""
model_save_path = '../models/training/results_2L/'
loss_hist = TwoLayerSNN.train(X_train, y_train, path=model_save_path)
save_obj(loss_hist, model_save_path+"loss_hist_2L.pkl")
| 20.583333 | 69 | 0.68583 |
3d9bb51750c64c4487da7e33fb6f79442c7c78ce
| 187 |
py
|
Python
|
bin/twigs/quasi-polytwigs-123-hexagon-ring-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/twigs/quasi-polytwigs-123-hexagon-ring-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/twigs/quasi-polytwigs-123-hexagon-ring-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1 |
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""many solutions"""
import puzzler
from puzzler.puzzles.quasipolytwigs123 import QuasiPolytwigs123HexagonRing4
puzzler.run(QuasiPolytwigs123HexagonRing4)
| 18.7 | 75 | 0.812834 |
5b1b823f5e8c7ee6ded35c8b275592bc539bbe56
| 3,755 |
py
|
Python
|
flexget/plugins/output/sms_ru.py
|
Crupuk/Flexget
|
0ede246fd4b90e3cd75120ba13746187e11968d2
|
[
"MIT"
] | null | null | null |
flexget/plugins/output/sms_ru.py
|
Crupuk/Flexget
|
0ede246fd4b90e3cd75120ba13746187e11968d2
|
[
"MIT"
] | null | null | null |
flexget/plugins/output/sms_ru.py
|
Crupuk/Flexget
|
0ede246fd4b90e3cd75120ba13746187e11968d2
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
import logging
import hashlib
from flexget.plugin import register_plugin, priority
from flexget.utils.template import RenderError
__version__ = 0.1
log = logging.getLogger("sms_ru")
client_headers = {"User-Agent": "FlexGet sms_ru plugin/%s" % str(__version__)}
sms_send_url = "http://sms.ru/sms/send"
sms_token_url = "http://sms.ru/auth/get_token"
class OutputSMSru(object):
"""
Sends SMS notification through sms.ru http api sms/send.
Phone number is a login assigned to sms.ru account.
Example:
sms_ru:
phonenumber: <PHONE_NUMBER> (accepted format example: "79997776655")
password: <PASSWORD>
[message: <MESSAGE_TEXT>] (default: "accepted {{title}}" -- accepts Jinja)
Configuration parameters are also supported from entries (eg. through set).
"""
def validator(self):
from flexget import validator
config = validator.factory("dict")
config.accept("text", key="phonenumber", required=True)
config.accept("text", key="password", required=True)
config.accept("text", key="message", required=False)
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {"enabled": config}
# Set the defaults
config.setdefault("message", "accepted {{title}}")
return config
# Run last to make sure other outputs are successful before sending notification
@priority(0)
def on_task_output(self, task, config):
# Get the parameters
config = self.prepare_config(config)
phonenumber = config["phonenumber"]
password = config["password"]
# Backend provides temporary token
token_response = task.requests.get(sms_token_url, headers=client_headers, raise_status=False)
if token_response.status_code == 200:
log.debug("Got auth token")
# Auth method without api_id based on hash of password combined with token
sha512 = hashlib.sha512(password + token_response.text).hexdigest()
else:
log.error("Error getting auth token")
# Loop through the accepted entries
for entry in task.accepted:
# Set message from entry
message = config["message"]
# Attempt to render the message field
try:
message = entry.render(message)
except RenderError, e:
log.debug("Problem rendering 'message': %s" % e)
message = "accepted %s" % entry["title"]
# Check for test mode
if task.manager.options.test:
log.info("Test mode. Processing for %s" % phonenumber)
log.info("Message: %s" % message)
# Build request params
send_params = {'login': phonenumber,
'sha512': sha512,
'token': token_response.text,
'to': phonenumber,
'text': message}
if task.manager.options.test:
send_params.update({'test': 1})
# Make the request
response = task.requests.get(sms_send_url, params=send_params, headers=client_headers, raise_status=False)
# Get resul code from sms.ru backend returned in body
result_text = response.text
# Check if it succeeded
if response.text.find("100") == 0:
log.debug("SMS notification for %s sent" % phonenumber)
else:
log.error("SMS was not sent. Server response was %s" % response.text)
register_plugin(OutputSMSru, "sms_ru", api_ver=2)
| 35.093458 | 118 | 0.61225 |
018fc28a3653062277bdae9d566e5ef6950aeb5a
| 1,966 |
py
|
Python
|
src/sima/post/blueprints/addconstantfilter.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/blueprints/addconstantfilter.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/blueprints/addconstantfilter.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
#
# Generated with AddConstantFilterBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from .operationnode import OperationNodeBlueprint
class AddConstantFilterBlueprint(OperationNodeBlueprint):
""""""
def __init__(self, name="AddConstantFilter", package_path="sima/post", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("x","integer","",default=0))
self.attributes.append(Attribute("y","integer","",default=0))
self.attributes.append(Attribute("h","integer","",default=0))
self.attributes.append(Attribute("w","integer","",default=0))
self.attributes.append(BlueprintAttribute("controlSignalInputSlots","sima/post/ControlSignalInputSlot","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("filterInputSlots","sima/post/InputSlot","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("filterOutputSlots","sima/post/OutputSlot","",True,Dimension("*")))
self.attributes.append(Attribute("renameOutput","boolean","",default=True))
self.attributes.append(Attribute("constant","number","Constant value to subtract",default=0.0))
self.attributes.append(EnumAttribute("axis","sima/post/SignalAxis","Select the value from the x-axis or the y-axis"))
self.attributes.append(Attribute("shiftAxis","boolean","Shift the x or y axis so that the first index is 0",default=False))
| 67.793103 | 135 | 0.729908 |
45b3ea8a4457f0d00365d362cbaf1592bf73cdff
| 296 |
py
|
Python
|
payment/templatetags/payment_extras.py
|
cleobatista/django-paypal-simple
|
9bb1f87b3c3d66636437c1ec71a14d87ccacdef6
|
[
"BSD-2-Clause"
] | null | null | null |
payment/templatetags/payment_extras.py
|
cleobatista/django-paypal-simple
|
9bb1f87b3c3d66636437c1ec71a14d87ccacdef6
|
[
"BSD-2-Clause"
] | null | null | null |
payment/templatetags/payment_extras.py
|
cleobatista/django-paypal-simple
|
9bb1f87b3c3d66636437c1ec71a14d87ccacdef6
|
[
"BSD-2-Clause"
] | null | null | null |
from django import template
register = template.Library()
@register.filter(name='subtract')
def subtract(value, arg):
if arg:
return value - arg
else:
return value
@register.filter(name='comma_to_dot')
def comma_to_dot(value):
return str(value).replace(',', '.')
| 17.411765 | 39 | 0.665541 |
78b4828e2b29a5f8e28452b62c51cb15376a25ca
| 5,047 |
py
|
Python
|
demo.py
|
McManning/DynamicProperties
|
36cc81249eed0c5d11c75d079f6b2b081edfa70c
|
[
"MIT"
] | 1 |
2021-09-02T16:22:25.000Z
|
2021-09-02T16:22:25.000Z
|
demo.py
|
McManning/DynamicProperties
|
36cc81249eed0c5d11c75d079f6b2b081edfa70c
|
[
"MIT"
] | null | null | null |
demo.py
|
McManning/DynamicProperties
|
36cc81249eed0c5d11c75d079f6b2b081edfa70c
|
[
"MIT"
] | 1 |
2021-05-29T10:14:15.000Z
|
2021-05-29T10:14:15.000Z
|
class DEMO_OT_change_fizz(bpy.types.Operator):
"""Example of replacing properties in a dynamic property group at runtime"""
bl_idname = 'demo_toggle.change_fizz'
bl_label = 'Change Fizz Properties'
def invoke(self, context, event):
fizz = DynamicProperties.find(bpy.types.Light, 'fizz')
if not fizz:
raise Exception('`fizz` is not registered')
# Clear out all old properties.
# Alternatively, you can do a .remove('prop_name') per property
fizz.clear()
# Existing ones we keep (current values will be maintained)
fizz.add_float('my_float', name='Foo Float', description='Test float')
fizz.add_rgb('diffuse', name='Diffuse', description='Diffuse color')
# New ones to add
fizz.add_float('float2', name='Newer Float', description='Test float 2')
fizz.add_bool('bool2', name='Newer Bool', description='Test Boolean 2')
fizz.add_str('str2', name='Newer String', description='Test String 2')
# Register changes with Blender.
fizz.register()
return {'FINISHED'}
class DEMO_PT_Toggle_DynamicPropertyGroups(bpy.types.Panel):
bl_label = 'Toggle Dynamic Property Group'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
return context.light
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
col = layout.column()
col.prop(context.light.demo_toggle, 'active_group')
# # If there's no active group yet, set one automatically
# if len(context.light.demo_toggle.active_group) < 1:
def update_active_group(self, context):
"""Switch enabled status between two dynamic property groups based on the selected active_group"""
active = context.light.demo_toggle.active_group
if active == 'fizz':
context.light.buzz.enabled = False
context.light.fizz.enabled = True
else:
context.light.buzz.enabled = True
context.light.fizz.enabled = False
class DemoToggleSettings(bpy.types.PropertyGroup):
active_group: bpy.props.EnumProperty(
name='Active Group',
# You would dynamically populate this based on... whatever
# (shaders available, etc)
items=[
('fizz', 'Fizz', 'Use the fizz property group'),
('buzz', 'Buzz', 'Use the buzz property group'),
],
update=update_active_group,
default='fizz'
)
@classmethod
def register(cls):
bpy.types.Light.demo_toggle = PointerProperty(
name='Toggle Dynamic Property Group',
type=cls
)
@classmethod
def unregister(cls):
del bpy.types.Light.demo_toggle
# To nest the dynamic properties under the toggle demo's panel
parent_id = 'DEMO_PT_Toggle_DynamicPropertyGroups'
# Dynamic property group - setup and configure
fizz = DynamicProperties(bpy.types.Light, 'fizz', 'Fizz Settings', panel_parent_id=parent_id)
fizz.add_float('my_float', name='Foo Float', description='Test float')
fizz.add_bool('my_bool', name='Foo Bool', description='Test Boolean')
fizz.add_str('my_str', name='Foo String', description='Some string description')
fizz.add_vec2('my_v2', name='Vec2 Test', description='Test Vec2')
fizz.add_vec3('my_v3', name='Vec3 Test', description='Test Vec3')
fizz.add_rgb('diffuse', name='Diffuse', description='Diffuse color')
fizz.add_rgba('rgba', name='RGBA Color')
fizz.add_enum('my_enum', name='My Enum', description='Some Enum',
items=[('foo', 'Foo', 'Foo Description'), ('bar', 'Bar', 'Bar Description')]
)
fizz.add_header('my_header', 'More Complicated Examples')
fizz.add_file('my_file', name='Filename', description='Some file to load')
fizz.add_file('my_dir', name='Directory', description='Some directory to load')
fizz.add_header('my_header_2', 'Another Group Of Stuff')
fizz.add_image('diffuse_tex', name='Diffuse Texture', description='Image file for diffuse texture')
# Another set - disabled by default
buzz = DynamicProperties(bpy.types.Light, 'buzz', 'Buzz Settings', enabled=False, panel_parent_id=parent_id)
buzz.add_float('my_float', name='Bar Float', description='Test float')
buzz.add_str('my_str', name='Bar String', description='Some string description')
def register():
# Demo stuff
bpy.utils.register_class(DEMO_PT_Toggle_DynamicPropertyGroups)
bpy.utils.register_class(DEMO_OT_change_fizz)
bpy.utils.register_class(DemoToggleSettings)
# Can happen at plugin register() or any time after
fizz.register()
buzz.register()
def unregister():
# Demo stuff
bpy.utils.unregister_class(DemoToggleSettings)
bpy.utils.unregister_class(DEMO_OT_change_fizz)
bpy.utils.unregister_class(DEMO_PT_Toggle_DynamicPropertyGroups)
# Can happen at plugin unregister() or any time before
fizz.unregister()
buzz.unregister()
if __name__ == "__main__":
register()
| 35.293706 | 108 | 0.690707 |
222bc7a247255b38116bfbb6e88d247cf3060896
| 350 |
py
|
Python
|
fipeapi/__version__.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
fipeapi/__version__.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
fipeapi/__version__.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
__title__ = 'fipeapi'
__description__ = 'Python Extra Oficial API for REST Request to consult Vehicles Prices.'
__url__ = 'https://github.com/deibsoncarvalho/tabela-fipe-api'
__version__ = '0.1.0'
__author__ = 'Deibson Carvalho'
__author_email__ = '[email protected]'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2020 Deibson Carvalho'
| 38.888889 | 89 | 0.78 |
fcff5585bc12a75f274bd29236648d5b201a2f2d
| 31,369 |
py
|
Python
|
python/paddle/tensor/linalg.py
|
mamingjie-China/Paddle
|
91d2f1e3e6e51142a74a43d0673a8feff056c39b
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/linalg.py
|
mamingjie-China/Paddle
|
91d2f1e3e6e51142a74a43d0673a8feff056c39b
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/linalg.py
|
mamingjie-China/Paddle
|
91d2f1e3e6e51142a74a43d0673a8feff056c39b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.common_ops_import import *
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type
from ..fluid.framework import in_dygraph_mode, _varbase_creator
from ..fluid.layers import transpose #DEFINE_ALIAS
__all__ = [
'matmul',
'dot',
# 'einsum',
'norm',
'transpose',
'dist',
't',
'cross',
'cholesky',
# 'tensordot',
'bmm',
'histogram'
]
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
:alias_main: paddle.matmul
:alias: paddle.matmul,paddle.tensor.matmul,paddle.tensor.linalg.matmul
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# paddle.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# paddle.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# paddle.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# paddle.matmul(x, y) # out: [1]
# x: [M], y: [N]
# paddle.matmul(x, y, True, True) # out: [M, N]
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
out = paddle.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None):
"""
:alias_main: paddle.norm
:alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
Args:
input (Variable): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `1`, `2`,
and any positive real number yielding the corresponding p-norm.
axis (int|list, optional): The axis on which to apply norm operation. If axis is int
or list with only one element, the vector norm is computed over the axis.
If axis is a list with two elements, the matrix norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
out (Variable, optional): The output tensor, default value is None. It's data type
must be the same as the input Tensor.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
ValueError, If `p` or `axis` is invalid.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3, 5], dtype='float64')
# compute frobenius norm along last two dimensions.
out_fro = paddle.norm(x, p='fro', axis=[1,2])
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.norm(x, p=2, axis=-1)
"""
def frobenius_norm(input, dim=None, keepdim=False, out=None, name=None):
"""
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
out (Variable, optional): The tensor variable storing the output.
"""
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
attrs = {
'dim': dim if dim != None else [-2, -1],
'keep_dim': keepdim,
'reduce_all': False
}
if len(attrs['dim']) == len(input.shape):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
else:
check_type(out, 'out', (Variable), 'frobenius_norm')
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'frobenius_norm',
'(The out data type in frobenius_norm must be the same with input data type.)'
)
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
out=None,
name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
out (Variable, optional): The tensor variable storing the output.
"""
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'epsilon': 1e-12,
}
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
helper = LayerHelper('p_norm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
else:
check_type(out, 'out', (Variable), 'p_norm')
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'p_norm',
'(The out data type in p_norm must be the same with input data type.)'
)
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(
input, dim=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
input, porder=p, axis=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
#calculate vector norm, where axis is int or list with only one integer
if isinstance(axis, int):
if isinstance(p, (int, float)):
return vector_norm(
input, axis=axis, porder=p, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
#calculate matrix norm, where axis is list with two integers
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(
input, dim=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"unspport p for matrix norm, expcept 'fro', found {}".format(p))
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
def dist(x, y, p=2):
"""
:alias_main: paddle.dist
:alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:
- Each input has at least one dimension.
- Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.
Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be
obtained as follows:
1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the
tensor with fewer dimensions.
For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the
dimension of y.
x (4-D Tensor): 8 x 1 x 6 x 1
y (4-D Tensor): 1 x 7 x 1 x 5
2. Determine the size of each dimension of the output z: choose the maximum value from the
two input dimensions.
z (4-D Tensor): 8 x 7 x 6 x 5
If the number of dimensions of the two inputs are the same, the size of the output can be
directly determined in step 2. When p takes different values, the norm formula is as follows:
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Variable: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32))
y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32))
out = paddle.dist(x, y, 0)
print(out.numpy()) # out = [1.]
out = paddle.dist(x, y, 2)
print(out.numpy()) # out = [2.]
out = paddle.dist(x, y, float("inf"))
print(out.numpy()) # out = [2.]
out = paddle.dist(x, y, float("-inf"))
print(out.numpy()) # out = [0.]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def dot(x, y, name=None):
"""
:alias_main: paddle.dot
:alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot
This operator calculates inner product for vectors.
.. note::
Only support 1-d Tensor(vector).
Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Variable: the calculated result Tensor/LoDTensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32))
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32))
z = paddle.dot(x, y)
print(z.numpy())
"""
op_type = 'dot'
# skip var type check in dygraph mode to improve efficiency
if in_dygraph_mode():
op = getattr(core.ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def t(input, name=None):
"""
:alias_main: paddle.t
:alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t
Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the fluid.layers.transpose function which perm dimensions set 0 and 1.
Args:
input (Variable): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
paddle.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
paddle.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3],
dtype='float32')
x_transposed = paddle.t(x)
print x_transposed.shape
#(3L, 2L)
"""
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if in_dygraph_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = core.ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(x, y, axis=None, name=None):
"""
:alias_main: paddle.cross
:alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross
Computes the cross product between two tensors along an axis.
Inputs must have the same shape, and the length of their axes should be equal to 3.
If `axis` is not given, it defaults to the first axis found with the length 3.
Args:
x (Variable): The first input tensor variable.
y (Variable): The second input tensor variable.
axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
from paddle.imperative import to_variable
import numpy as np
paddle.enable_imperative()
data_x = np.array([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
data_y = np.array([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
x = to_variable(data_x)
y = to_variable(data_y)
z1 = paddle.cross(x, y)
print(z1.numpy())
# [[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
z2 = paddle.cross(x, y, axis=1)
print(z2.numpy())
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
"""
if in_dygraph_mode():
if axis is not None:
return core.ops.cross(x, y, 'dim', axis)
else:
return core.ops.cross(x, y)
helper = LayerHelper("cross", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(
type='cross',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def cholesky(x, upper=False):
"""
:alias_main: paddle.cholesky
:alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky
Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice.
If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,
and the returned matrix :math:`U` is upper-triangular. Otherwise, the
decomposition has the form :math:`A = LL^{T}` , and the returned matrix
:math:`L` is lower-triangular.
Args:
x (Variable): The input tensor. Its shape should be `[*, M, M]`,
where * is zero or more batch dimensions, and matrices on the
inner-most 2 dimensions all should be symmetric positive-definite.
Its data type should be float32 or float64.
upper (bool): The flag indicating whether to return upper or lower
triangular matrices. Default: False.
Returns:
Variable: A Tensor with same shape and data type as `x`. It represents \
triangular matrices generated by Cholesky decomposition.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x = np.matmul(a, a_t) + 1e-03
x = fluid.dygraph.to_variable(x)
out = paddle.cholesky(x, upper=False)
print(out.numpy())
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky',
inputs={'X': [x]},
outputs={'Out': out},
attrs={'upper': upper})
return out
def bmm(x, y, name=None):
"""
:alias_main: paddle.bmm
:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm
Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
import paddle
# In imperative mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
paddle.enable_imperative()
x = paddle.imperative.to_variable(input1)
y = paddle.imperative.to_variable(input2)
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 3:
raise ValueError(
"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
format(x_shape, y_shape))
if x_shape[2] != y_shape[1]:
raise ValueError(
"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
helper = LayerHelper('bmm', **locals())
if in_dygraph_mode():
return core.ops.bmm(x, y)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def histogram(input, bins=100, min=0, max=0):
"""
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used.
Args:
input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
should be float32, float64, int32, int64.
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
Returns:
Variable: Tensor or LoDTensor calculated by histogram layer. The data type is int64.
Code Example 1:
.. code-block:: python
import paddle
import numpy as np
startup_program = paddle.Program()
train_program = paddle.Program()
with paddle.program_guard(train_program, startup_program):
inputs = paddle.data(name='input', dtype='int32', shape=[2,3])
output = paddle.histogram(inputs, bins=5, min=1, max=5)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
exe.run(startup_program)
img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
print(np.array(res[0])) # [0,3,0,2,1]
Code Example 2:
.. code-block:: python
import paddle
import numpy as np
with paddle.imperative.guard(paddle.CPUPlace()):
inputs_np = np.array([1, 2, 1]).astype(np.float)
inputs = paddle.imperative.to_variable(inputs_np)
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
"""
if in_dygraph_mode():
return core.ops.histogram(input, "bins", bins, "min", min, "max", max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(
input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='histogram',
inputs={'X': input},
outputs={'Out': out},
attrs={'bins': bins,
'min': min,
'max': max})
return out
| 38.069175 | 147 | 0.566929 |
ccb7c984d9f0691dcd554cd5d6587617d317ec77
| 333 |
py
|
Python
|
PantherC3D/Environment/Value.py
|
pablorocad/S2OLC2-C
|
a452e4de83f315802ad8237d44c477ebf04881f0
|
[
"MIT"
] | null | null | null |
PantherC3D/Environment/Value.py
|
pablorocad/S2OLC2-C
|
a452e4de83f315802ad8237d44c477ebf04881f0
|
[
"MIT"
] | null | null | null |
PantherC3D/Environment/Value.py
|
pablorocad/S2OLC2-C
|
a452e4de83f315802ad8237d44c477ebf04881f0
|
[
"MIT"
] | 1 |
2021-09-11T14:43:07.000Z
|
2021-09-11T14:43:07.000Z
|
from Enum.typeExpression import typeExpression
class Value:
def __init__(self,value:str, isTemp:bool, type: typeExpression) -> None:
self.value = value
self.isTemp = isTemp
self.type = type
self.trueLabel = ""
self.falseLabel = ""
def getValue(self) -> str:
return self.value
| 27.75 | 76 | 0.627628 |
83a78cb80891df123fee3b9d705e77fac914e95b
| 498 |
py
|
Python
|
analis de algoritmos/tarea1/graficadora.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | null | null | null |
analis de algoritmos/tarea1/graficadora.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | null | null | null |
analis de algoritmos/tarea1/graficadora.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | 1 |
2020-03-03T04:16:42.000Z
|
2020-03-03T04:16:42.000Z
|
import numpy as np
import math
arr = [-1, 0, 1, 2, 3, 5, 15, 20, 100, 409, 500, 593,
1000, 1471, 1500, 2801, 3000, 5000, 10000, 20000]
result = []
def log2(num):
try:
r = (np.log2(num))
r_str = str(r)
if( r_str == 'inf' or r_str == 'nan' or r_str == '-inf'):
return 0
return r
except:
return 0
for n in arr:
form = "7*(n-1)+2*n+3"
eval("result.append(math.floor("+form+"))")
for item in result:
print(item)
| 17.785714 | 65 | 0.504016 |
a6e98b5348407f916579808e4efb1196e8e1210b
| 2,634 |
py
|
Python
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateDataSourceRequest.py
|
bricklayer-Liu/aliyun-openapi-python-sdk
|
20da2554de22679fc7c5462c483663e4d79512aa
|
[
"Apache-2.0"
] | 1 |
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateDataSourceRequest.py
|
bricklayer-Liu/aliyun-openapi-python-sdk
|
20da2554de22679fc7c5462c483663e4d79512aa
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateDataSourceRequest.py
|
bricklayer-Liu/aliyun-openapi-python-sdk
|
20da2554de22679fc7c5462c483663e4d79512aa
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class CreateDataSourceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'CreateDataSource')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_NavParentId(self):
return self.get_query_params().get('NavParentId')
def set_NavParentId(self,NavParentId):
self.add_query_param('NavParentId',NavParentId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Conf(self):
return self.get_query_params().get('Conf')
def set_Conf(self,Conf):
self.add_query_param('Conf',Conf)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_SourceType(self):
return self.get_query_params().get('SourceType')
def set_SourceType(self,SourceType):
self.add_query_param('SourceType',SourceType)
| 32.925 | 74 | 0.762718 |
c75fc38b56dd9bad85e413a5b7e742490c0acc07
| 3,288 |
py
|
Python
|
mysite/settings.py
|
UAACC/pro
|
c5424574427ac3521cb70b0d62b841fa128f5166
|
[
"MIT"
] | null | null | null |
mysite/settings.py
|
UAACC/pro
|
c5424574427ac3521cb70b0d62b841fa128f5166
|
[
"MIT"
] | null | null | null |
mysite/settings.py
|
UAACC/pro
|
c5424574427ac3521cb70b0d62b841fa128f5166
|
[
"MIT"
] | null | null | null |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pmkbeepo#g1__ys#^ce^%6h_=j6g)ky=5)#=h660z$h97l&n4t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': {
'rest_framework.permissions.AllowAny',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# AUTH_USER_MODEL = 'users.User'
| 25.099237 | 91 | 0.693127 |
1737dd5e07de7d7d700a2e5d8d0f149f779b08a8
| 2,356 |
py
|
Python
|
api/anubis/rpc/pipeline.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
api/anubis/rpc/pipeline.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
api/anubis/rpc/pipeline.py
|
ShubhamGG/Anubis
|
2c538ef258a1edf5463596a33bc66caa2ef7e35b
|
[
"MIT"
] | null | null | null |
from kubernetes import config, client
from anubis.models import db, Config, Submission
from anubis.utils.data import with_context
from anubis.utils.k8s.pipeline import create_pipeline_job_obj, reap_pipeline_jobs
from anubis.utils.logging import logger
from anubis.utils.config import get_config_int
@with_context
def create_submission_pipeline(submission_id: str):
"""
This function should launch the appropriate testing container
for the assignment, passing along the function arguments.
:param submission_id: submission.id of to test
"""
from anubis.utils.rpc import enqueue_autograde_pipeline
from anubis.lms.submissions import init_submission
# Log the creation event
logger.info(
"Creating submission pipeline job {}".format(submission_id),
extra={
"submission_id": submission_id,
},
)
# Calculate the maximum number of jobs allowed in the cluster
max_jobs = get_config_int('PIPELINE_MAX_JOBS', default=10)
# Initialize kube client
config.load_incluster_config()
# Cleanup finished jobs
active_jobs = reap_pipeline_jobs()
if active_jobs > max_jobs:
logger.info(
"TOO many jobs - re-enqueue {}".format(submission_id),
extra={"submission_id": submission_id},
)
enqueue_autograde_pipeline(submission_id)
exit(0)
# Get the database entry for the submission
submission = Submission.query.filter(
Submission.id == submission_id
).first()
# Make sure that the submission exists
if submission is None:
logger.error(
"Unable to find submission rpc.test_repo",
extra={
"submission_id": submission_id,
},
)
return
# If the build field is not present, then
# we need to initialize the submission.
if submission.build is None:
init_submission(submission, commit=True)
submission.processed = False
submission.state = 'Initializing Pipeline'
db.session.commit()
# Create k8s job object
job = create_pipeline_job_obj(submission)
# Log the pipeline job creation
logger.debug("creating pipeline job: " + job.to_str())
# Send to kube api
batch_v1 = client.BatchV1Api()
batch_v1.create_namespaced_job(body=job, namespace="anubis")
| 29.822785 | 81 | 0.688455 |
28ff71a99e3b17dd6ca18301cde485a9b0d4c495
| 8,262 |
py
|
Python
|
angr/procedures/glibc/__libc_start_main.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | 1 |
2020-11-18T16:39:11.000Z
|
2020-11-18T16:39:11.000Z
|
angr/procedures/glibc/__libc_start_main.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/glibc/__libc_start_main.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | 1 |
2020-11-18T16:39:13.000Z
|
2020-11-18T16:39:13.000Z
|
import logging
import pyvex
import angr
l = logging.getLogger(name=__name__)
######################################
# __libc_start_main
######################################
class __libc_start_main(angr.SimProcedure):
#pylint:disable=arguments-differ,unused-argument,attribute-defined-outside-init
ADDS_EXITS = True
NO_RET = True
local_vars = ('main', 'argc', 'argv', 'init', 'fini')
def _initialize_b_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_b_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
table = self.inline_call(malloc, 768).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bytes).ret_expr
for pos, c in enumerate(self.state.libc.LOCALE_ARRAY):
# Each entry is 2 bytes
self.state.memory.store(table + (pos*2),
self.state.solver.BVV(c, 16),
inspect=False,
disable_actions=True,
)
# Offset for negative chars
# 256 because 2 bytes each, -128 * 2
table += 256
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bytes,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_b_loc_table_ptr = table_ptr
def _initialize_tolower_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_tolower_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
# 384 entries, 4 bytes each
table = self.inline_call(malloc, 384*4).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bytes).ret_expr
for pos, c in enumerate(self.state.libc.TOLOWER_LOC_ARRAY):
self.state.memory.store(table + (pos * 4),
self.state.solver.BVV(c, 32),
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
# Offset for negative chars: -128 index (4 bytes per index)
table += (128 * 4)
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bytes,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_tolower_loc_table_ptr = table_ptr
def _initialize_toupper_loc_table(self):
"""
Initialize ptable for ctype
See __ctype_toupper_loc.c in libc implementation
"""
malloc = angr.SIM_PROCEDURES['libc']['malloc']
# 384 entries, 4 bytes each
table = self.inline_call(malloc, 384*4).ret_expr
table_ptr = self.inline_call(malloc, self.state.arch.bytes).ret_expr
for pos, c in enumerate(self.state.libc.TOUPPER_LOC_ARRAY):
self.state.memory.store(table + (pos * 4),
self.state.solver.BVV(c, 32),
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
# Offset for negative chars: -128 index (4 bytes per index)
table += (128 * 4)
self.state.memory.store(table_ptr,
table,
size=self.state.arch.bytes,
endness=self.state.arch.memory_endness,
inspect=False,
disable_actions=True,
)
self.state.libc.ctype_toupper_loc_table_ptr = table_ptr
def _initialize_ctype_table(self):
self._initialize_b_loc_table()
self._initialize_tolower_loc_table()
self._initialize_toupper_loc_table()
def _initialize_errno(self):
malloc = angr.SIM_PROCEDURES['libc']['malloc']
errno_loc = self.inline_call(malloc, self.state.arch.bytes).ret_expr
self.state.libc.errno_location = errno_loc
self.state.memory.store(errno_loc, self.state.solver.BVV(0, self.state.arch.bits))
@property
def envp(self):
return self.argv + (self.argc+1)*self.state.arch.bytes
def run(self, main, argc, argv, init, fini):
# TODO: handle symbolic and static modes
# TODO: add argument types
self._initialize_ctype_table()
self._initialize_errno()
self.main, self.argc, self.argv, self.init, self.fini = self._extract_args(self.state, main, argc, argv, init,
fini)
# TODO: __cxa_atexit calls for various at-exit needs
self.call(self.init, (self.argc, self.argv, self.envp), 'after_init')
def after_init(self, main, argc, argv, init, fini, exit_addr=0):
self.call(self.main, (self.argc, self.argv, self.envp), 'after_main')
def after_main(self, main, argc, argv, init, fini, exit_addr=0):
self.exit(0)
def static_exits(self, blocks):
# Execute those blocks with a blank state, and then dump the arguments
blank_state = angr.SimState(project=self.project, mode="fastpath", memory_backer=self.project.loader.memory)
# set up the stack pointer
blank_state.regs.sp = 0x7fffffff
# Execute each block
state = blank_state
for b in blocks:
# state.regs.ip = next(iter(stmt for stmt in b.statements if isinstance(stmt, pyvex.IRStmt.IMark))).addr
irsb = self.project.engines.default_engine.process(state, b,
force_addr=next(iter(stmt for stmt in b.statements if isinstance(stmt, pyvex.IRStmt.IMark))).addr)
if irsb.successors:
state = irsb.successors[0]
else:
break
cc = angr.DEFAULT_CC[self.arch.name](self.arch)
args = [ cc.arg(state, _) for _ in range(5) ]
main, _, _, init, fini = self._extract_args(blank_state, *args)
all_exits = [
{'address': init, 'jumpkind': 'Ijk_Call', 'namehint': 'init'},
{'address': main, 'jumpkind': 'Ijk_Call', 'namehint': 'main'},
{'address': fini, 'jumpkind': 'Ijk_Call', 'namehint': 'fini'},
]
return all_exits
@staticmethod
def _extract_args(state, main, argc, argv, init, fini):
"""
Extract arguments and set them to
:param angr.sim_state.SimState state: The program state.
:param main: An argument to __libc_start_main.
:param argc: An argument to __libc_start_main.
:param argv: An argument to __libc_start_main.
:param init: An argument to __libc_start_main.
:param fini: An argument to __libc_start_main.
:return: A tuple of five elements: (main, argc, argv, init, fini)
:rtype: tuple
"""
main_ = main
argc_ = argc
argv_ = argv
init_ = init
fini_ = fini
if state.arch.name == "PPC32":
# for some dumb reason, PPC passes arguments to libc_start_main in some completely absurd way
argv_ = argc_
argc_ = main_
main_ = state.mem[state.regs.r8 + 4:].int.resolved
init_ = state.mem[state.regs.r8 + 8:].int.resolved
fini_ = state.mem[state.regs.r8 + 12:].int.resolved
elif state.arch.name == "PPC64":
main_ = state.mem[state.regs.r8 + 8:].long.resolved
init_ = state.mem[state.regs.r8 + 16:].long.resolved
fini_ = state.mem[state.regs.r8 + 24:].long.resolved
return main_, argc_, argv_, init_, fini_
| 39.156398 | 118 | 0.547446 |
9a520a06d8da36ae8fbb2c9dd31e346551416954
| 4,478 |
py
|
Python
|
analysis/plot-recognition-data.py
|
Dom1L/molassembler
|
dafc656b1aa846b65b1fd1e06f3740ceedcf22db
|
[
"BSD-3-Clause"
] | 17 |
2020-11-27T14:59:34.000Z
|
2022-03-28T10:31:25.000Z
|
analysis/plot-recognition-data.py
|
Dom1L/molassembler
|
dafc656b1aa846b65b1fd1e06f3740ceedcf22db
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/plot-recognition-data.py
|
Dom1L/molassembler
|
dafc656b1aa846b65b1fd1e06f3740ceedcf22db
|
[
"BSD-3-Clause"
] | 6 |
2020-12-09T09:21:53.000Z
|
2021-08-22T15:42:21.000Z
|
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
from recognition_data import symmetrySizes, n_distortion_values, max_distortion, shapes, repeats, results, recognizers
import matplotlib.pyplot as plt
import numpy as np
def make_table(recognizer_idx, shape_idx):
print(recognizers[recognizer_idx])
shape_size = symmetrySizes[shape_idx]
shape_idxs_of_same_size = [c for c, e in enumerate(
symmetrySizes) if e == shape_size]
shape_rec_results = results[(recognizer_idx, shape_idx)]
for distortion_idx in range(n_distortion_values):
result_start = distortion_idx * repeats
result_end = (distortion_idx + 1) * repeats
result_section = shape_rec_results[result_start:result_end]
distortion = distortion_idx * \
max_distortion / (n_distortion_values - 1)
summed = {shapes[i]: result_section.count(i)
for i in shape_idxs_of_same_size}
line = ", ".join(["{}: {}".format(k, v) for k, v in summed.items()])
print("{} : {}".format(distortion, line))
def stack_plot(recognizer_idx, shape_idx, axes):
shape_size = symmetrySizes[shape_idx]
shape_idxs_of_same_size = [c for c, e in enumerate(
symmetrySizes) if e == shape_size]
shape_rec_results = results[(recognizer_idx, shape_idx)]
sums_per_shape = {k: [] for k in shape_idxs_of_same_size}
for distortion_idx in range(n_distortion_values):
result_start = distortion_idx * repeats
result_end = (distortion_idx + 1) * repeats
result_section = shape_rec_results[result_start:result_end]
for i in shape_idxs_of_same_size:
sums_per_shape[i].append(result_section.count(i))
cumulative_sums = [0 for i in range(n_distortion_values)]
assert len(cumulative_sums) == len(sums_per_shape[shape_idx])
xs = np.arange(n_distortion_values)
xs_data = np.arange(0.0, 1.1, 0.1)
# halve number of rows by subsetting everything
for k, v in sums_per_shape.items():
sums_per_shape[k] = v[:6]
cumulative_sums = cumulative_sums[:6]
xs = xs[:6]
xs_data = xs_data[:6]
def autolabel(ax, rects, shape):
for rect in rects:
width = rect.get_width()
if width < 5:
continue
annotate_kwargs = {
"xy": (
rect.get_x() + rect.get_width() / 2,
rect.get_y() + rect.get_height() / 2
),
"ha": "center",
"va": "center",
"color": "white"
}
if rect.get_bbox().y0 < 0:
ax.annotate(shape, weight="bold", **annotate_kwargs)
else:
ax.annotate("{}".format(width), **annotate_kwargs)
for i in shape_idxs_of_same_size:
rects = axes.barh(y=xs, width=sums_per_shape[i], height=0.8,
left=cumulative_sums)
autolabel(axes, rects, shapes[i])
cumulative_sums = [a + b for a, b
in zip(cumulative_sums, sums_per_shape[i])]
axes.set_yticks(xs)
axes.set_yticklabels(["{:.1f}".format(x) for x in xs_data])
if __name__ == "__main__":
shape_size = 4
recognizer_labels = ["Ang. & geom. ind.",
"CShM", "Biased CShM", "CShM prob. distr."]
selected_recognizers = [0, 1, 2, 3]
shape_idxs_of_same_size = [c for c, e in enumerate(symmetrySizes)
if e == shape_size]
fig, axs = plt.subplots(len(selected_recognizers),
len(shape_idxs_of_same_size),
tight_layout=True, sharey="row")
for row_idx, recognizer in enumerate(selected_recognizers):
subplots_row = axs[row_idx]
subplots_row[0].set_ylabel(recognizer_labels[recognizer])
for col_idx, shape in enumerate(shape_idxs_of_same_size):
stack_plot(recognizer, shape, subplots_row[col_idx])
# Axis hiding
# Hide y axis for all columns but the first
for row in axs:
for plot in row[1:]:
plot.get_yaxis().set_visible(False)
# Hide x axis and margins everywhere
for row in axs:
for plot in row:
plot.get_xaxis().set_visible(False)
plot.margins(0)
plt.subplots_adjust(hspace=0.1, wspace=0.1)
plt.show()
| 37.949153 | 118 | 0.619026 |
21c472d90c252a787119cd18c16cea440e5be5df
| 125 |
py
|
Python
|
lagom/version.py
|
zuoxingdong/lagom
|
3b6710804dbc79c6dffb369ac87c68f4055ab6cd
|
[
"MIT"
] | 383 |
2018-07-11T17:43:10.000Z
|
2022-01-24T08:46:23.000Z
|
lagom/version.py
|
LorinChen/lagom
|
273bb7f5babb1f250f6dba0b5f62c6614f301719
|
[
"MIT"
] | 90 |
2018-07-11T23:51:45.000Z
|
2021-12-16T08:56:42.000Z
|
lagom/version.py
|
LorinChen/lagom
|
273bb7f5babb1f250f6dba0b5f62c6614f301719
|
[
"MIT"
] | 32 |
2018-07-12T18:21:03.000Z
|
2021-09-15T05:47:48.000Z
|
# Useful for setup.py and when lagom is not installed yet
# Versioning format: Major.Minor.Maintenance
__version__ = '0.0.3'
| 31.25 | 57 | 0.768 |
4a027bd5444be733fdaa5d06b10f3ac5224beeaa
| 52,859 |
py
|
Python
|
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
pde/grids/base.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | null | null | null |
"""
Bases classes
.. codeauthor:: David Zwicker <[email protected]>
"""
from __future__ import annotations
import functools
import inspect
import itertools
import json
import logging
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
NamedTuple,
Sequence,
Set,
Tuple,
Union,
)
import numba as nb
import numpy as np
from numba.extending import is_jitted, register_jitable
from ..tools.cache import cached_method, cached_property
from ..tools.docstrings import fill_in_docstring
from ..tools.misc import Number, classproperty
from ..tools.numba import jit, jit_allocate_out
from ..tools.typing import CellVolume, FloatNumerical, NumberOrArray, OperatorType
if TYPE_CHECKING:
from .boundaries.axes import Boundaries, BoundariesData # @UnusedImport
PI_4 = 4 * np.pi
PI_43 = 4 / 3 * np.pi
class OperatorInfo(NamedTuple):
"""stores information about an operator"""
factory: Callable[..., OperatorType]
rank_in: int
rank_out: int
def _check_shape(shape) -> Tuple[int, ...]:
"""checks the consistency of shape tuples"""
if not hasattr(shape, "__iter__"):
shape = [shape] # support single numbers
if len(shape) == 0:
raise ValueError("Require at least one dimension")
# convert the shape to a tuple of integers
result = []
for dim in shape:
if dim == int(dim) and dim >= 1:
result.append(int(dim))
else:
raise ValueError(f"{repr(dim)} is not a valid number of support points")
return tuple(result)
def discretize_interval(
x_min: float, x_max: float, num: int
) -> Tuple[np.ndarray, float]:
r""" construct a list of equidistantly placed intervals
The discretization is defined as
.. math::
x_i &= x_\mathrm{min} + \left(i + \frac12\right) \Delta x
\quad \text{for} \quad i = 0, \ldots, N - 1
\\
\Delta x &= \frac{x_\mathrm{max} - x_\mathrm{min}}{N}
where :math:`N` is the number of intervals given by `num`.
Args:
x_min (float): Minimal value of the axis
x_max (float): Maximal value of the axis
num (int): Number of intervals
Returns:
tuple: (midpoints, dx): the midpoints of the intervals and the used
discretization `dx`.
"""
dx = (x_max - x_min) / num
return (np.arange(num) + 0.5) * dx + x_min, dx
class DomainError(ValueError):
"""exception indicating that point lies outside domain"""
pass
class DimensionError(ValueError):
"""exception indicating that dimensions were inconsistent"""
pass
class PeriodicityError(RuntimeError):
"""exception indicating that the grid periodicity is inconsistent"""
pass
class GridBase(metaclass=ABCMeta):
"""Base class for all grids defining common methods and interfaces"""
_subclasses: Dict[str, "GridBase"] = {} # all classes inheriting from this
_operators: Dict[str, OperatorInfo] = {} # all operators defined for the grid
# properties that are defined in subclasses
dim: int # int: The spatial dimension in which the grid is embedded
axes: List[str] # list: Name of all axes that are described by the grid
axes_symmetric: List[str] = []
""" list: The names of the additional axes that the fields do not depend on,
e.g. along which they are constant. """
cell_volume_data: Sequence[FloatNumerical]
coordinate_constraints: List[int] = [] # axes not described explicitly
num_axes: int
periodic: List[bool]
# mandatory, immutable, private attributes
_axes_bounds: Tuple[Tuple[float, float], ...]
_axes_coords: Tuple[np.ndarray, ...]
_discretization: np.ndarray
_shape: Tuple[int, ...]
# to help sphinx, we here list docstrings for classproperties
operators: Set[str]
""" set: names of all operators defined for this grid """
def __init__(self):
"""initialize the grid"""
self._logger = logging.getLogger(self.__class__.__name__)
def __init_subclass__(cls, **kwargs): # @NoSelf
"""register all subclassess to reconstruct them later"""
super().__init_subclass__(**kwargs)
cls._subclasses[cls.__name__] = cls
cls._operators: Dict[str, Callable] = {}
@classmethod
def from_state(cls, state: Union[str, Dict[str, Any]]) -> GridBase:
"""create a field from a stored `state`.
Args:
state (`str` or `dict`):
The state from which the grid is reconstructed. If `state` is a
string, it is decoded as JSON, which should yield a `dict`.
"""
# decode the json data
if isinstance(state, str):
state = dict(json.loads(state))
# create the instance
# create the instance of the correct class
class_name = state.pop("class")
if class_name == cls.__name__:
raise RuntimeError(f"Cannot reconstruct abstract class `{class_name}`")
grid_cls = cls._subclasses[class_name]
return grid_cls.from_state(state)
@property
def axes_bounds(self) -> Tuple[Tuple[float, float], ...]:
"""tuple: lower and upper bounds of each axis"""
return self._axes_bounds
@property
def axes_coords(self) -> Tuple[np.ndarray, ...]:
"""tuple: coordinates of the cells for each axis"""
return self._axes_coords
def get_axis_index(self, key: Union[int, str], allow_symmetric: bool = True) -> int:
"""return the index belonging to an axis
Args:
key (int or str): The index or name of an axis
allow_symmetric (bool): Whether axes with assumed symmetry are included
Returns:
int: The index of the axis
"""
if isinstance(key, str):
# determine key index from name of the axis
if allow_symmetric:
axes = self.axes + self.axes_symmetric
else:
axes = self.axes
if key in axes:
return axes.index(key)
else:
raise IndexError(f"`{key}` is not in the axes {axes}")
elif isinstance(key, int):
# assume that it is already an index
return key
raise IndexError("Index must be an integer or the name of an axes")
@property
def discretization(self) -> np.ndarray:
""":class:`numpy.array`: the linear size of a cell along each axis"""
return self._discretization
@property
def shape(self) -> Tuple[int, ...]:
"""tuple of int: the number of support points of each axis"""
return self._shape
@property
def _shape_full(self) -> Tuple[int, ...]:
"""tuple of int: number of support points including ghost points"""
return tuple(num + 2 for num in self.shape)
@property
def _idx_valid(self) -> Tuple[slice, ...]:
"""tuple: slices to extract valid data from full data"""
return tuple(slice(1, s + 1) for s in self.shape)
def _make_get_valid(self) -> Callable[[np.ndarray], np.ndarray]:
"""callable: function to extract the valid part of a full data array"""
num_axes = self.num_axes
@register_jitable
def get_valid(arr: np.ndarray) -> np.ndarray:
"""return valid part of the data (without ghost cells)"""
if num_axes == 1:
return arr[..., 1:-1] # type: ignore
elif num_axes == 2:
return arr[..., 1:-1, 1:-1] # type: ignore
elif num_axes == 3:
return arr[..., 1:-1, 1:-1, 1:-1] # type: ignore
else:
raise NotImplementedError
return get_valid # type: ignore
def _make_set_valid(self) -> Callable[[np.ndarray, np.ndarray], None]:
"""callable: function to extract the valid part of a full data array"""
num_axes = self.num_axes
@register_jitable
def set_valid(arr: np.ndarray, value: np.ndarray) -> None:
"""return valid part of the data (without ghost cells)"""
if num_axes == 1:
arr[..., 1:-1] = value
elif num_axes == 2:
arr[..., 1:-1, 1:-1] = value
elif num_axes == 3:
arr[..., 1:-1, 1:-1, 1:-1] = value
else:
raise NotImplementedError
return set_valid # type: ignore
@abstractproperty
def state(self) -> Dict[str, Any]:
pass
@property
def state_serialized(self) -> str:
"""str: JSON-serialized version of the state of this grid"""
state = self.state
state["class"] = self.__class__.__name__
return json.dumps(state)
def copy(self) -> GridBase:
"""return a copy of the grid"""
return self.__class__.from_state(self.state)
__copy__ = copy
def __deepcopy__(self, memo: Dict[int, Any]) -> GridBase:
"""create a deep copy of the grid. This function is for instance called when
a grid instance appears in another object that is copied using `copy.deepcopy`
"""
# this implementation assumes that a simple call to copy is sufficient
result = self.copy()
memo[id(self)] = result
return result
def __repr__(self) -> str:
"""return instance as string"""
args = ", ".join(str(k) + "=" + str(v) for k, v in self.state.items())
return f"{self.__class__.__name__}({args})"
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.shape == other.shape
and self.axes_bounds == other.axes_bounds
and self.periodic == other.periodic
)
def _cache_hash(self) -> int:
"""returns a value to determine when a cache needs to be updated"""
return hash(
(
self.__class__.__name__,
self.shape,
self.axes_bounds,
hash(tuple(self.periodic)),
)
)
def compatible_with(self, other: GridBase) -> bool:
"""tests whether this class is compatible with other grids.
Grids are compatible when they cover the same area with the same
discretization. The difference to equality is that compatible grids do
not need to have the same periodicity in their boundaries.
Args:
other (:class:`~pde.grids.base.GridBase`):
The other grid to test against
Returns:
bool: Whether the grid is compatible
"""
return (
self.__class__ == other.__class__
and self.shape == other.shape
and self.axes_bounds == other.axes_bounds
)
def assert_grid_compatible(self, other: GridBase) -> None:
"""checks whether `other` is compatible with the current grid
Args:
other (:class:`~pde.grids.base.GridBase`):
The grid compared to this one
Raises:
ValueError: if grids are not compatible
"""
if not self.compatible_with(other):
raise ValueError(f"Grids {self} and {other} are incompatible")
@property
def numba_type(self) -> str:
"""str: represents type of the grid data in numba signatures"""
return "f8[" + ", ".join([":"] * self.num_axes) + "]"
@cached_property()
def coordinate_arrays(self) -> Tuple[np.ndarray, ...]:
"""tuple: for each axes: coordinate values for all cells"""
return tuple(np.meshgrid(*self.axes_coords, indexing="ij"))
@cached_property()
def cell_coords(self) -> np.ndarray:
""":class:`~numpy.ndarray`: coordinate values for all axes of each cell"""
return np.moveaxis(self.coordinate_arrays, 0, -1)
@cached_property()
def cell_volumes(self) -> np.ndarray:
""":class:`~numpy.ndarray`: volume of each cell"""
vols = functools.reduce(np.outer, self.cell_volume_data)
return np.broadcast_to(vols, self.shape) # type: ignore
@cached_property()
def uniform_cell_volumes(self) -> bool:
"""bool: returns True if all cell volumes are the same"""
return all(np.asarray(vols).ndim == 0 for vols in self.cell_volume_data)
def distance_real(self, p1: np.ndarray, p2: np.ndarray) -> float:
"""Calculate the distance between two points given in real coordinates
This takes periodic boundary conditions into account if need be
Args:
p1 (:class:`~numpy.ndarray`): First position
p2 (:class:`~numpy.ndarray`): Second position
Returns:
float: Distance between the two positions
"""
diff = self.difference_vector_real(p1, p2)
return np.linalg.norm(diff, axis=-1) # type: ignore
def _iter_boundaries(self) -> Iterator[Tuple[int, bool]]:
"""iterate over all boundaries of the grid
Yields:
tuple: for each boundary, the generator returns a tuple indicating
the axis of the boundary together with a boolean value indicating
whether the boundary lies on the upper side of the axis.
"""
return itertools.product(range(self.num_axes), [True, False])
def _boundary_coordinates(self, axis: int, upper: bool) -> np.ndarray:
"""get coordinates of points on the boundary
Args:
axis (int):
The axis perpendicular to the boundary
upper (bool):
Whether the boundary is at the upper side of the axis
Returns:
:class:`~numpy.ndarray`: Coordinates of the boundary points. This array has
one less dimension than the grid has axes.
"""
# get coordinate along the axis determining the boundary
if upper:
c_bndry = np.array([self._axes_bounds[axis][1]])
else:
c_bndry = np.array([self._axes_bounds[axis][0]])
# get orthogonal coordinates
coords = tuple(
c_bndry if i == axis else self._axes_coords[i] for i in range(self.num_axes)
)
points = np.meshgrid(*coords, indexing="ij")
# assemble into array
shape_bndry = tuple(self.shape[i] for i in range(self.num_axes) if i != axis)
shape = shape_bndry + (self.num_axes,)
return np.stack(points, -1).reshape(shape)
@abstractproperty
def volume(self) -> float:
pass
@abstractmethod
def cell_to_point(self, cells: np.ndarray, cartesian: bool = True) -> np.ndarray:
pass
@abstractmethod
def point_to_cell(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def point_to_cartesian(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def point_from_cartesian(self, points: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def difference_vector_real(self, p1: np.ndarray, p2: np.ndarray):
pass
@abstractmethod
def polar_coordinates_real(
self, origin: np.ndarray, *, ret_angle: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
pass
@abstractmethod
def contains_point(self, point: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def iter_mirror_points(
self, point: np.ndarray, with_self: bool = False, only_periodic: bool = True
) -> Generator:
pass
@abstractmethod
def get_boundary_conditions(
self, bc: BoundariesData = "natural", rank: int = 0
) -> Boundaries:
pass
@abstractmethod
def get_line_data(self, data: np.ndarray, extract: str = "auto") -> Dict[str, Any]:
pass
@abstractmethod
def get_image_data(self, data: np.ndarray) -> Dict[str, Any]:
pass
@abstractmethod
def get_random_point(
self, boundary_distance: float = 0, cartesian: bool = True
) -> np.ndarray:
pass
def normalize_point(self, point: np.ndarray, reflect: bool = True) -> np.ndarray:
"""normalize coordinates by applying periodic boundary conditions
Here, the point is assumed to be specified by the physical values along
the non-symmetric axes of the grid. Normalizing points is useful to make sure
they lie within the domain of the grid. This function respects periodic
boundary conditions and can also reflect points off the boundary.
Args:
point (:class:`~numpy.ndarray`):
Coordinates of a single point
reflect (bool):
Flag determining whether coordinates along non-periodic axes are
reflected to lie in the valid range. If `False`, such coordinates are
left unchanged and only periodic boundary conditions are enforced.
Returns:
:class:`~numpy.ndarray`: The respective coordinates with periodic
boundary conditions applied.
"""
point = np.asarray(point, dtype=np.double)
if point.size == 0:
return np.zeros((0, self.num_axes))
if point.ndim == 0:
if self.num_axes > 1:
raise DimensionError(
f"Point {point} is not of dimension {self.num_axes}"
)
elif point.shape[-1] != self.num_axes:
raise DimensionError(
f"Array of shape {point.shape} does not describe points of dimension "
f"{self.num_axes}"
)
# normalize the coordinates for the periodic dimensions
bounds = np.array(self.axes_bounds)
xmin = bounds[:, 0]
xmax = bounds[:, 1]
xdim = xmax - xmin
if self.num_axes == 1:
# single dimension
if self.periodic[0]:
point = (point - xmin[0]) % xdim[0] + xmin[0]
elif reflect:
arg = (point - xmax[0]) % (2 * xdim[0]) - xdim[0]
point = xmin[0] + np.abs(arg)
else:
# multiple dimensions
for i in range(self.num_axes):
if self.periodic[i]:
point[..., i] = (point[..., i] - xmin[i]) % xdim[i] + xmin[i]
elif reflect:
arg = (point[..., i] - xmax[i]) % (2 * xdim[i]) - xdim[i]
point[..., i] = xmin[i] + np.abs(arg)
return point
@classmethod
def register_operator(
cls,
name: str,
factory_func: Callable = None,
rank_in: int = 0,
rank_out: int = 0,
):
"""register an operator for this grid
Example:
The method can either be used directly::
GridClass.register_operator("operator", make_operator)
or as a decorator for the factory function::
@GridClass.register_operator("operator")
def make_operator(bcs: Boundaries):
...
Args:
name (str):
The name of the operator to register
factory_func (callable):
A function with signature ``(bcs: Boundaries, **kwargs)``, which
takes boundary conditions and optional keyword arguments and
returns an implementation of the given operator. This
implementation is a function that takes a
:class:`~numpy.ndarray` of discretized values as arguments and
returns the resulting discretized data in a
:class:`~numpy.ndarray` after applying the operator.
rank_in (int):
The rank of the input field for the operator
rank_out (int):
The rank of the field that is returned by the operator
"""
def register_operator(factor_func_arg: Callable):
"""helper function to register the operator"""
cls._operators[name] = OperatorInfo(
factory=factor_func_arg, rank_in=rank_in, rank_out=rank_out
)
return factor_func_arg
if factory_func is None:
# method is used as a decorator, so return the helper function
return register_operator
else:
# method is used directly
register_operator(factory_func)
@classproperty # type: ignore
def operators(cls) -> Set[str]: # @NoSelf
"""set: all operators defined for this class"""
result = set()
classes = inspect.getmro(cls)[:-1] # type: ignore
for anycls in classes:
result |= set(anycls._operators.keys()) # type: ignore
return result
def _get_operator_info(self, operator: Union[str, OperatorInfo]) -> OperatorInfo:
"""return the operator defined on this grid
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
Returns:
:class:`~pde.grids.base.OperatorInfo`: information for the operator
"""
if isinstance(operator, OperatorInfo):
return operator
# obtain all parent classes, except `object`
classes = inspect.getmro(self.__class__)[:-1]
for cls in classes:
if operator in cls._operators: # type: ignore
return cls._operators[operator] # type: ignore
# operator was not found
op_list = ", ".join(sorted(self.operators))
raise ValueError(
f"'{operator}' is not one of the defined operators ({op_list}). Custom "
"operators can be added using the `register_operator` method."
)
@cached_method()
def make_operator_no_bc(
self,
operator: Union[str, OperatorInfo],
**kwargs,
) -> OperatorType:
"""return a compiled function applying an operator without boundary conditions
A function that takes the discretized full data as an input and an array of
valid data points to which the result of applying the operator is written.
Note:
The resulting function does not check whether the ghost cells of the input
array have been supplied with sensible values. It is the responsibility of
the user to set the values of the ghost cells beforehand. Use this function
only if you absolutely know what you're doing. In all other cases,
:meth:`make_operator` is probably the better choice.
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
**kwargs:
Specifies extra arguments influencing how the operator is created.
Returns:
callable: the function that applies the operator
"""
return self._get_operator_info(operator).factory(self, **kwargs)
@cached_method()
@fill_in_docstring
def make_operator(
self,
operator: Union[str, OperatorInfo],
bc: BoundariesData,
**kwargs,
) -> Callable[[np.ndarray], np.ndarray]:
"""return a compiled function applying an operator with boundary conditions
The returned function takes the discretized data on the grid as an input and
returns the data to which the operator `operator` has been applied. The function
only takes the valid grid points and allocates memory for the ghost points
internally to apply the boundary conditions specified as `bc`. Note that the
function supports an optional argument `out`, which if given should provide
space for the valid output array without the ghost cells. The result of the
operator is then written into this output array.
Args:
operator (str):
Identifier for the operator. Some examples are 'laplace', 'gradient', or
'divergence'. The registered operators for this grid can be obtained
from the :attr:`~pde.grids.base.GridBase.operators` attribute.
bc (str or list or tuple or dict):
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
**kwargs:
Specifies extra arguments influencing how the operator is created.
Returns:
callable: the function that applies the operator
"""
backend = kwargs.get("backend", "numba") # numba is the default backend
# instantiate the operator
operator = self._get_operator_info(operator)
operator_raw = operator.factory(self, **kwargs)
# set the boundary conditions before applying this operator
bcs = self.get_boundary_conditions(bc, rank=operator.rank_in)
# calculate shapes of the full data
shape_in_full = (self.dim,) * operator.rank_in + self._shape_full
shape_out = (self.dim,) * operator.rank_out + self.shape
if backend == "numba":
# create a compiled function to apply to the operator
set_ghost_cells = bcs.make_ghost_cell_setter()
get_valid = self._make_get_valid()
if not is_jitted(operator_raw):
operator_raw = jit(operator_raw)
@jit_allocate_out(out_shape=shape_out)
def apply_op(arr: np.ndarray, out: np.ndarray = None) -> np.ndarray:
"""applies operator to the data"""
# prepare input with boundary conditions
arr_full = np.empty(shape_in_full, dtype=arr.dtype)
arr_valid = get_valid(arr_full)
arr_valid[:] = arr
set_ghost_cells(arr_full)
# apply operator
operator_raw(arr_full, out) # type: ignore
# return valid part of the output
return out # type: ignore
elif backend in {"numpy", "scipy"}:
# create a numpy/scipy function to apply to the operator
def apply_op(arr: np.ndarray, out: np.ndarray = None) -> np.ndarray:
"""set boundary conditions and apply operator"""
# prepare input with boundary conditions
arr_full = np.empty(shape_in_full, dtype=arr.dtype)
arr_full[(...,) + self._idx_valid] = arr
bcs.set_ghost_cells(arr_full)
# apply operator
if out is None:
out = np.empty(shape_out, dtype=arr.dtype)
else:
assert out.shape == shape_out
operator_raw(arr_full, out)
# return valid part of the output
return out
else:
raise NotImplementedError(f"Undefined backend '{backend}'")
return apply_op # type: ignore
def get_operator(
self,
operator: Union[str, OperatorInfo],
bc: BoundariesData,
**kwargs,
) -> Callable[[np.ndarray], np.ndarray]:
"""deprecated alias of method `make_operator`"""
# this was deprecated on 2021-08-05
warnings.warn(
"`get_operator` is deprecated. Use `make_operator` instead",
DeprecationWarning,
)
return self.make_operator(operator, bc, **kwargs)
def get_subgrid(self, indices: Sequence[int]) -> GridBase:
"""return a subgrid of only the specified axes"""
raise NotImplementedError(
f"Subgrids are not implemented for class {self.__class__.__name__}"
)
def plot(self):
"""visualize the grid"""
raise NotImplementedError(
f"Plotting is not implemented for class {self.__class__.__name__}"
)
@property
def typical_discretization(self) -> float:
"""float: the average side length of the cells"""
return np.mean(self.discretization) # type: ignore
def integrate(
self, data: NumberOrArray, axes: Union[int, Sequence[int]] = None
) -> np.ndarray:
"""Integrates the discretized data over the grid
Args:
data (:class:`~numpy.ndarray`):
The values at the support points of the grid that need to be
integrated.
axes (list of int, optional):
The axes along which the integral is performed. If omitted, all
axes are integrated over.
Returns:
:class:`~numpy.ndarray`: The values integrated over the entire grid
"""
# determine the volumes of the individual cells
if axes is None:
volume_list = self.cell_volume_data
else:
# use stored value for the default case of integrating over all axes
if isinstance(axes, int):
axes = (axes,)
else:
axes = tuple(axes) # required for numpy.sum
volume_list = [
cell_vol if ax in axes else 1
for ax, cell_vol in enumerate(self.cell_volume_data)
]
cell_volumes = functools.reduce(np.outer, volume_list)
# determine the axes over which we will integrate
if not isinstance(data, np.ndarray) or data.ndim < self.num_axes:
# deal with the case where data is not supplied for each support
# point, e.g., when a single scalar is integrated over the grid
data = np.broadcast_to(data, self.shape)
elif data.ndim > self.num_axes:
# deal with the case where more than a single value is provided per
# support point, e.g., when a tensorial field is integrated
offset = data.ndim - self.num_axes
if axes is None:
# integrate over all axes of the grid
axes = tuple(range(offset, data.ndim))
else:
# shift the indices to account for the data shape
axes = tuple(offset + i for i in axes)
# calculate integral using a weighted sum along the chosen axes
return (data * cell_volumes).sum(axis=axes) # type: ignore
@cached_method()
def make_normalize_point_compiled(
self, reflect: bool = True
) -> Callable[[np.ndarray], None]:
"""return a compiled function that normalizes a point
Here, the point is assumed to be specified by the physical values along
the non-symmetric axes of the grid. Normalizing points is useful to make sure
they lie within the domain of the grid. This function respects periodic
boundary conditions and can also reflect points off the boundary.
Args:
reflect (bool):
Flag determining whether coordinates along non-periodic axes are
reflected to lie in the valid range. If `False`, such coordinates are
left unchanged and only periodic boundary conditions are enforced.
Returns:
callable: A function that takes a :class:`~numpy.ndarray` as an argument,
which describes the coordinates of the points. This array is modified
in-place!
"""
num_axes = self.num_axes
periodic = np.array(self.periodic) # using a tuple instead led to a numba error
bounds = np.array(self.axes_bounds)
xmin = bounds[:, 0]
xmax = bounds[:, 1]
size = bounds[:, 1] - bounds[:, 0]
@jit
def normalize_point(point: np.ndarray) -> None:
"""helper function normalizing a single point"""
assert point.ndim == 1 # only support single points
for i in range(num_axes):
if periodic[i]:
point[i] = (point[i] - xmin[i]) % size[i] + xmin[i]
elif reflect:
arg = (point[i] - xmax[i]) % (2 * size[i]) - size[i]
point[i] = xmin[i] + abs(arg)
# else: do nothing
return normalize_point # type: ignore
@cached_method()
def make_cell_volume_compiled(self, flat_index: bool = False) -> CellVolume:
"""return a compiled function returning the volume of a grid cell
Args:
flat_index (bool):
When True, cell_volumes are indexed by a single integer into the
flattened array.
Returns:
function: returning the volume of the chosen cell
"""
if all(np.isscalar(d) for d in self.cell_volume_data):
# all cells have the same volume
cell_volume = np.product(self.cell_volume_data)
@jit
def get_cell_volume(*args) -> float:
return cell_volume # type: ignore
else:
# some cells have a different volume
cell_volumes = self.cell_volumes
if flat_index:
@jit
def get_cell_volume(idx: int) -> float:
return cell_volumes.flat[idx] # type: ignore
else:
@jit
def get_cell_volume(*args) -> float:
return cell_volumes[args] # type: ignore
return get_cell_volume # type: ignore
def _make_interpolation_axis_data(
self,
axis: int,
*,
full_data: bool = False,
cell_coords: bool = False,
) -> Callable[[float], Tuple[int, int, float, float]]:
"""factory for obtaining interpolation information
Args:
axis (int):
The axis along which interpolation is performed
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
cell_coords (bool):
Flag indicating whether points are given in cell coordinates or actual
point coordinates.
Returns:
A function that is called with a coordinate value for the axis. The function
returns the indices of the neighboring support points as well as the
associated weights
"""
# obtain information on how this axis is discretized
size = self.shape[axis]
periodic = self.periodic[axis]
lo = self.axes_bounds[axis][0]
dx = self.discretization[axis]
@register_jitable
def get_axis_data(coord: float) -> Tuple[int, int, float, float]:
"""determines data for interpolating along one axis"""
if cell_coords:
c_l, d_l = divmod(coord, 1.0)
else:
c_l, d_l = divmod((coord - lo) / dx - 0.5, 1.0)
if full_data:
c_li = int(c_l) + 1 # left support point
c_hi = c_li + 1 # right support point
elif periodic: # periodic domain
c_li = int(c_l) % size # left support point
c_hi = (c_li + 1) % size # right support point
elif 0 <= c_l + d_l < size - 1: # in bulk part of domain
c_li = int(c_l) # left support point
c_hi = c_li + 1 # right support point
elif size - 1 <= c_l + d_l <= size - 0.5: # close to upper boundary
c_li = c_hi = int(c_l) # both support points close to boundary
# This branch also covers the special case, where size == 1 and data is
# evaluated at the only support point (c_l == d_l == 0.)
elif -0.5 <= c_l + d_l <= 0: # close to lower boundary
c_li = c_hi = int(c_l) + 1 # both support points close to boundary
else:
return -42, -42, 0.0, 0.0 # indicates out of bounds
# determine the weights
w_l, w_h = 1 - d_l, d_l
# set small weights to zero. If this is not done, invalid data at the corner
# of the grid (where two rows of ghost cells intersect) could be accessed.
# If this random data is very large, e.g., 1e100, it contributes
# significantly, even if the weight is low, e.g., 1e-16.
if w_l < 1e-15:
w_l = 0
if w_h < 1e-15:
w_h = 0
return c_li, c_hi, w_l, w_h
return get_axis_data # type: ignore
def _make_interpolator_compiled(
self, *, fill: Number = None, full_data: bool = False, cell_coords: bool = False
) -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""return a compiled function for linear interpolation on the grid
Args:
fill (Number, optional):
Determines how values out of bounds are handled. If `None`, a
`ValueError` is raised when out-of-bounds points are requested.
Otherwise, the given value is returned.
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
cell_coords (bool):
Flag indicating whether points are given in cell coordinates or actual
point coordinates.
Returns:
A function which returns interpolated values when called with
arbitrary positions within the space of the grid. The signature of
this function is (data, point), where `data` is the numpy array
containing the field data and position is denotes the position in
grid coordinates.
"""
if full_data and fill is not None:
self._logger.warning("Interpolation of full data does not use `fill`.")
args = {"full_data": full_data, "cell_coords": cell_coords}
if self.num_axes == 1:
# specialize for 1-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 1d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
c_li, c_hi, w_l, w_h = data_x(point[0])
if c_li == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return w_l * data[..., c_li] + w_h * data[..., c_hi] # type: ignore
elif self.num_axes == 2:
# specialize for 2-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
data_y = self._make_interpolation_axis_data(1, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 2d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
if c_xli == -42 or c_yli == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return ( # type: ignore
w_xl * w_yl * data[..., c_xli, c_yli]
+ w_xl * w_yh * data[..., c_xli, c_yhi]
+ w_xh * w_yl * data[..., c_xhi, c_yli]
+ w_xh * w_yh * data[..., c_xhi, c_yhi]
)
elif self.num_axes == 3:
# specialize for 3-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, **args)
data_y = self._make_interpolation_axis_data(1, **args)
data_z = self._make_interpolation_axis_data(2, **args)
@jit
def interpolate_single(
data: np.ndarray, point: np.ndarray
) -> NumberOrArray:
"""obtain interpolated value of data at a point
Args:
data (:class:`~numpy.ndarray`):
A 2d array of valid values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate
system
Returns:
:class:`~numpy.ndarray`: The interpolated value at the point
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
c_zli, c_zhi, w_zl, w_zh = data_z(point[2])
if c_xli == -42 or c_yli == -42 or c_zli == -42: # out of bounds
if fill is None: # outside the domain
raise DomainError("Point lies outside the grid domain")
else:
return fill
# do the linear interpolation
return ( # type: ignore
w_xl * w_yl * w_zl * data[..., c_xli, c_yli, c_zli]
+ w_xl * w_yl * w_zh * data[..., c_xli, c_yli, c_zhi]
+ w_xl * w_yh * w_zl * data[..., c_xli, c_yhi, c_zli]
+ w_xl * w_yh * w_zh * data[..., c_xli, c_yhi, c_zhi]
+ w_xh * w_yl * w_zl * data[..., c_xhi, c_yli, c_zli]
+ w_xh * w_yl * w_zh * data[..., c_xhi, c_yli, c_zhi]
+ w_xh * w_yh * w_zl * data[..., c_xhi, c_yhi, c_zli]
+ w_xh * w_yh * w_zh * data[..., c_xhi, c_yhi, c_zhi]
)
else:
raise NotImplementedError(
f"Compiled interpolation not implemented for dimension {self.num_axes}"
)
return interpolate_single # type: ignore
def make_inserter_compiled(
self, *, full_data: bool = False
) -> Callable[[np.ndarray, np.ndarray, NumberOrArray], None]:
"""return a compiled function to insert values at interpolated positions
Args:
full_data (bool):
Flag indicating that the interpolator should work on the full data array
that includes values for the grid points. If this is the case, the
boundaries are not checked and the coordinates are used as is.
Returns:
A function with signature (data, position, amount), where `data` is
the numpy array containing the field data, position is denotes the
position in grid coordinates, and `amount` is the that is to be
added to the field.
"""
cell_volume = self.make_cell_volume_compiled()
if self.num_axes == 1:
# specialize for 1-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
c_li, c_hi, w_l, w_h = data_x(point[0])
if c_li == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
data[..., c_li] += w_l * amount / cell_volume(c_li)
data[..., c_hi] += w_h * amount / cell_volume(c_hi)
elif self.num_axes == 2:
# specialize for 2-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
data_y = self._make_interpolation_axis_data(1, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
if c_xli == -42 or c_yli == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
cell_vol = cell_volume(c_xli, c_yli)
data[..., c_xli, c_yli] += w_xl * w_yl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi)
data[..., c_xli, c_yhi] += w_xl * w_yh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli)
data[..., c_xhi, c_yli] += w_xh * w_yl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi)
data[..., c_xhi, c_yhi] += w_xh * w_yh * amount / cell_vol
elif self.num_axes == 3:
# specialize for 3-dimensional interpolation
data_x = self._make_interpolation_axis_data(0, full_data=full_data)
data_y = self._make_interpolation_axis_data(1, full_data=full_data)
data_z = self._make_interpolation_axis_data(2, full_data=full_data)
@jit
def insert(
data: np.ndarray, point: np.ndarray, amount: NumberOrArray
) -> None:
"""add an amount to a field at an interpolated position
Args:
data (:class:`~numpy.ndarray`):
The values at the grid points
point (:class:`~numpy.ndarray`):
Coordinates of a single point in the grid coordinate system
amount (Number or :class:`~numpy.ndarray`):
The amount that will be added to the data. This value describes
an integrated quantity (given by the field value times the
discretization volume). This is important for consistency with
different discretizations and in particular grids with
non-uniform discretizations
"""
# determine surrounding points and their weights
c_xli, c_xhi, w_xl, w_xh = data_x(point[0])
c_yli, c_yhi, w_yl, w_yh = data_y(point[1])
c_zli, c_zhi, w_zl, w_zh = data_z(point[2])
if c_xli == -42 or c_yli == -42 or c_zli == -42: # out of bounds
raise DomainError("Point lies outside the grid domain")
cell_vol = cell_volume(c_xli, c_yli, c_zli)
data[..., c_xli, c_yli, c_zli] += w_xl * w_yl * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yli, c_zhi)
data[..., c_xli, c_yli, c_zhi] += w_xl * w_yl * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi, c_zli)
data[..., c_xli, c_yhi, c_zli] += w_xl * w_yh * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xli, c_yhi, c_zhi)
data[..., c_xli, c_yhi, c_zhi] += w_xl * w_yh * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli, c_zli)
data[..., c_xhi, c_yli, c_zli] += w_xh * w_yl * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yli, c_zhi)
data[..., c_xhi, c_yli, c_zhi] += w_xh * w_yl * w_zh * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi, c_zli)
data[..., c_xhi, c_yhi, c_zli] += w_xh * w_yh * w_zl * amount / cell_vol
cell_vol = cell_volume(c_xhi, c_yhi, c_zhi)
data[..., c_xhi, c_yhi, c_zhi] += w_xh * w_yh * w_zh * amount / cell_vol
else:
raise NotImplementedError(
f"Compiled interpolation not implemented for dimension {self.num_axes}"
)
return insert # type: ignore
def make_integrator(self) -> Callable[[np.ndarray], np.ndarray]:
"""Return function that can be used to integrates discretized data over the grid
Note that currently only scalar fields are supported.
Returns:
callable: A function that takes a numpy array and returns the integral with
the correct weights given by the cell volumes.
"""
num_axes = self.num_axes
if self.uniform_cell_volumes:
# all cells have the same volume
cell_volume = np.product(self.cell_volume_data)
@jit
def integrate(arr: np.ndarray) -> Number:
"""function that integrates data over a uniform grid"""
assert arr.ndim == num_axes
return cell_volume * arr.sum() # type: ignore
else:
# cell volume varies with position
get_cell_volume = self.make_cell_volume_compiled(flat_index=True)
@jit
def integrate(arr: np.ndarray) -> Number:
"""function that integrates scalar data over a non-uniform grid"""
assert arr.ndim == num_axes
total = 0
for i in nb.prange(arr.size):
total += get_cell_volume(i) * arr.flat[i]
return total
return integrate # type: ignore
def registered_operators() -> Dict[str, List[str]]:
"""returns all operators that are currently defined
Returns:
dict: a dictionary with the names of the operators defined for each grid class
"""
return {
name: sorted(cls.operators)
for name, cls in GridBase._subclasses.items()
if not (name.endswith("Base") or hasattr(cls, "deprecated") and cls.deprecated) # type: ignore
}
| 39.125833 | 103 | 0.57572 |
5ab5a04c68f8760c2cf922dfbb192426fda880de
| 404 |
py
|
Python
|
STACK/Rainwater trapped.py
|
rajansh87/Algorithms-Implementations
|
1f3dd1bc2decf10638fe0fdeeede47a650a9057b
|
[
"MIT"
] | 1 |
2020-05-10T19:01:51.000Z
|
2020-05-10T19:01:51.000Z
|
STACK/Rainwater trapped.py
|
rajansh87/Algorithms-Implementations
|
1f3dd1bc2decf10638fe0fdeeede47a650a9057b
|
[
"MIT"
] | 9 |
2021-03-17T18:10:18.000Z
|
2021-03-29T19:35:06.000Z
|
STACK/Rainwater trapped.py
|
rajansh87/Data-Structures-and-Algorithms-Implementations
|
0529079fbcd4d1a047210e9f2ff42c194c0818fe
|
[
"MIT"
] | null | null | null |
arr=[0,1,0,2,1,0,1,3,2,1,2,1]
water=0
lma,rma=0,0
low=0
high=len(arr)-1
while(low<=high):
if(arr[low]<arr[high]):
if(arr[low]>lma):
lma=arr[low]
else:
water+=lma-arr[low]
low+=1
else:
if(arr[high]>rma):
rma=arr[high]
else:
water+=rma-arr[high]
high-=1
print(water)
| 18.363636 | 33 | 0.428218 |
0ac2e91b799891e72c41c520c1529b7fd064bcbd
| 1,655 |
py
|
Python
|
tests/datasets/test_customized_data.py
|
Aliang-CN/cogdl
|
01b1dbf7528240457a5fbe8c24b8271e805dc7ec
|
[
"MIT"
] | 1 |
2021-03-17T07:23:51.000Z
|
2021-03-17T07:23:51.000Z
|
tests/datasets/test_customized_data.py
|
yingyukexiansheng/cogdl
|
cf594cdb3a97f45333d08c937205d1a691828a33
|
[
"MIT"
] | null | null | null |
tests/datasets/test_customized_data.py
|
yingyukexiansheng/cogdl
|
cf594cdb3a97f45333d08c937205d1a691828a33
|
[
"MIT"
] | null | null | null |
import torch
from cogdl.data import Data
from cogdl.datasets import BaseDataset, register_dataset, build_dataset, build_dataset_from_name
from cogdl.utils import build_args_from_dict
@register_dataset("mydataset")
class MyNodeClassificationDataset(BaseDataset):
def __init__(self):
super(MyNodeClassificationDataset, self).__init__()
self.data = self.process()
def process(self):
num_nodes = 100
num_edges = 300
feat_dim = 30
# load or generate your dataset
edge_index = torch.randint(0, num_nodes, (2, num_edges))
x = torch.randn(num_nodes, feat_dim)
y = torch.randint(0, 2, (num_nodes,))
# set train/val/test mask in node_classification task
train_mask = torch.zeros(num_nodes).bool()
train_mask[0 : int(0.3 * num_nodes)] = True
val_mask = torch.zeros(num_nodes).bool()
val_mask[int(0.3 * num_nodes) : int(0.7 * num_nodes)] = True
test_mask = torch.zeros(num_nodes).bool()
test_mask[int(0.7 * num_nodes) :] = True
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, val_mask=val_mask, test_mask=test_mask)
torch.save(data, "mydata.pt")
return data
def test_customized_dataset():
dataset = build_dataset_from_name("mydataset")
assert isinstance(dataset[0], Data)
assert dataset[0].x.shape[0] == 100
def test_build_dataset_from_path():
args = build_args_from_dict({"dataset": "mydata.pt", "task": "node_classification"})
dataset = build_dataset(args)
assert dataset[0].x.shape[0] == 100
if __name__ == "__main__":
test_customized_dataset()
| 33.77551 | 115 | 0.679758 |
c7f86b989dcdcec9f80f276eeba2869dfac1dc74
| 2,536 |
py
|
Python
|
sdk/connectedvmware/azure-mgmt-connectedvmware/setup.py
|
moovy2/azure-sdk-for-python
|
6b0495dc9917d47a7264f26cbd3221d43461a537
|
[
"MIT"
] | null | null | null |
sdk/connectedvmware/azure-mgmt-connectedvmware/setup.py
|
moovy2/azure-sdk-for-python
|
6b0495dc9917d47a7264f26cbd3221d43461a537
|
[
"MIT"
] | null | null | null |
sdk/connectedvmware/azure-mgmt-connectedvmware/setup.py
|
moovy2/azure-sdk-for-python
|
6b0495dc9917d47a7264f26cbd3221d43461a537
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-connectedvmware"
PACKAGE_PPRINT_NAME = "Connected VMWare Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.2.0,<2.0.0',
],
python_requires=">=3.7",
)
| 34.27027 | 91 | 0.606861 |
0f2b5ba04147b3afbb2b735357905e7eac75bc23
| 3,296 |
py
|
Python
|
yt_handle.py
|
luceatnobis/yt_handle
|
f67ddf0f6d312b0af0eda18834a4fe06e8a3002d
|
[
"Apache-2.0"
] | null | null | null |
yt_handle.py
|
luceatnobis/yt_handle
|
f67ddf0f6d312b0af0eda18834a4fe06e8a3002d
|
[
"Apache-2.0"
] | null | null | null |
yt_handle.py
|
luceatnobis/yt_handle
|
f67ddf0f6d312b0af0eda18834a4fe06e8a3002d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import shutil
import httplib2
import oauth2client
try:
import apiclient as googleapiclient
except ImportError:
import googleapiclient
from oauth2client.file import Storage, Credentials
from oauth2client.client import flow_from_clientsecrets
CS = "client_secrets.json"
CREDS = "credentials.json"
YOUTUBE_DATA_ROOT = '~/.youtube'
YOUTUBE_READ_WRITE_SSL_SCOPE = (
"https://www.googleapis.com/auth/youtube.force-ssl")
def return_handle(id_name):
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if not os.path.exists(identity_folder):
n = input("Identity %s is not known; create it? [Y|n] " % id_name)
if not n or n.lower().startswith('y'):
create_identity(id_name)
else:
sys.exit()
identity = _retrieve_files(identity_folder)
c = Credentials().new_from_json(identity['credentials'])
handle = c.authorize(http=httplib2.Http())
return googleapiclient.discovery.build(
"youtube", "v3", http=handle)
def create_identity(id_name, cs_location=None):
if cs_location is None:
n = input("Please specify the location of the client_secrets file: ")
cs_location = os.path.abspath(os.path.expanduser(n))
if os.path.isdir(cs_location):
cs_location = os.path.join(cs_location, CS)
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if os.path.exists(identity_folder):
return
id_cs_location = os.path.join(identity_root, id_name, CS)
id_cred_location = os.path.join(identity_root, id_name, CREDS)
storage = Storage(id_cred_location)
credentials = storage.get()
if credentials and not credentials.invalid:
return credentials # credentials exist
flow = flow_from_clientsecrets(
cs_location, scope=YOUTUBE_READ_WRITE_SSL_SCOPE)
flow.redirect_uri = oauth2client.client.OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url()
code = _console_auth(authorize_url)
if code:
credential = flow.step2_exchange(code, http=None)
os.makedirs(identity_folder)
storage.put(credential)
credential.set_store(storage)
shutil.copyfile(cs_location, id_cs_location)
return credential
else:
print("Invalid input, exiting", file=sys.stderr)
sys.exit()
def _console_auth(authorize_url):
"""Show authorization URL and return the code the user wrote."""
message = "Check this link in your browser: {0}".format(authorize_url)
sys.stderr.write(message + "\n")
try:
input = raw_input # For Python2 compatability
except NameError:
# For Python3 on Windows compatability
try:
from builtins import input as input
except ImportError:
pass
return input("Enter verification code: ")
def _retrieve_files(folder):
cs_f = os.path.join(folder, CS)
creds_f = os.path.join(folder, CREDS)
with open(cs_f) as sec, open(creds_f) as cred:
secrets = sec.read()
credentials = cred.read()
return dict(secrets=secrets, credentials=credentials)
| 29.693694 | 77 | 0.698726 |
1478acb5f90a90376f0530dad0835da7b283bba8
| 753 |
py
|
Python
|
google/cloud/tasks/v2beta3/tasks-v2beta3-py/google/cloud/tasks_v2beta3/services/cloud_tasks/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7 |
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/tasks/v2beta3/tasks-v2beta3-py/google/cloud/tasks_v2beta3/services/cloud_tasks/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6 |
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/tasks/v2beta2/tasks-v2beta2-py/google/cloud/tasks_v2beta2/services/cloud_tasks/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4 |
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import CloudTasksClient
from .async_client import CloudTasksAsyncClient
__all__ = (
'CloudTasksClient',
'CloudTasksAsyncClient',
)
| 32.73913 | 74 | 0.752988 |
335138d3a43d835ac61dca55298fb3ec9317005d
| 28 |
py
|
Python
|
django_scarface/__init__.py
|
dreipol/django-scarface
|
5d9bed10dbca9d8ee105734adacd03f1cfb09952
|
[
"MIT"
] | 53 |
2015-01-05T19:25:48.000Z
|
2019-12-23T01:34:03.000Z
|
django_scarface/__init__.py
|
dreipol/django-scarface
|
5d9bed10dbca9d8ee105734adacd03f1cfb09952
|
[
"MIT"
] | 27 |
2015-01-12T07:18:49.000Z
|
2019-02-19T06:28:47.000Z
|
django_scarface/__init__.py
|
dreipol/django-scarface
|
5d9bed10dbca9d8ee105734adacd03f1cfb09952
|
[
"MIT"
] | 30 |
2015-01-05T18:05:21.000Z
|
2019-04-01T05:54:55.000Z
|
__version__ = '3.2.1-alpha'
| 14 | 27 | 0.678571 |
6afb1893d0ff813b4122eec35d4a6d4bf5beb88a
| 24,845 |
py
|
Python
|
multiagent/graphicsDisplay.py
|
dcalacci/ml-expectimax-agent
|
1b74a1a02f98b702bf2d21e09f616b590bff481c
|
[
"MIT"
] | 2 |
2018-05-02T07:51:25.000Z
|
2020-05-11T00:55:08.000Z
|
multiagent/graphicsDisplay.py
|
dcalacci/ml-expectimax-agent
|
1b74a1a02f98b702bf2d21e09f616b590bff481c
|
[
"MIT"
] | null | null | null |
multiagent/graphicsDisplay.py
|
dcalacci/ml-expectimax-agent
|
1b74a1a02f98b702bf2d21e09f616b590bff481c
|
[
"MIT"
] | 2 |
2017-10-10T22:11:51.000Z
|
2019-12-11T16:05:12.000Z
|
from graphicsUtils import *
import math, time
from game import Directions
###########################
# GRAPHICS DISPLAY CODE #
###########################
# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0,0,0)
WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
INFO_PANE_COLOR = formatColor(.4,.4,0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9,0,0)) # Red
GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
( 0, 0.3 ),
( 0.25, 0.75 ),
( 0.5, 0.3 ),
( 0.75, 0.75 ),
( 0.75, -0.5 ),
( 0.5, -0.75 ),
(-0.5, -0.75 ),
(-0.75, -0.5 ),
(-0.75, 0.75 ),
(-0.5, 0.3 ),
(-0.25, 0.75 )
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1,1,1)
GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS)
PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
PACMAN_SCALE = 0.5
#pacman_speed = 0.25
# Food
FOOD_COLOR = formatColor(1,1,1)
FOOD_SIZE = 0.1
# Laser
LASER_COLOR = formatColor(1,0,0)
LASER_SIZE = 0.02
# Capsule graphics
CAPSULE_COLOR = formatColor(1,1,1)
CAPSULE_SIZE = 0.25
# Drawing walls
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y = None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x,y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x,y
def drawPane(self):
self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
( screen_x, screen_y ) = self.to_screen( (x, y) )
block = square( (screen_x, screen_y),
0.5 * self.gridSize,
color = BACKGROUND_COLOR,
filled = 1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append( (agent, image) )
else:
image = self.drawGhost(agent, index)
self.agentImages.append( (agent, image) )
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width-1) * self.gridSize
grid_height = (height-1) * self.gridSize
screen_width = 2*self.gridSize + grid_width
screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"CS188 Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor = fillColor, outlineColor = outlineColor,
endpoints = endpoints,
width = width)]
def getEndpoints(self, direction, position=(0,0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi* pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180+delta, 180-delta)
elif (direction == 'North'):
endpoints = (90+delta, 90-delta)
elif (direction == 'South'):
endpoints = (270+delta, 270-delta)
else:
endpoints = (0+delta, 0-delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints( direction, position )
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print 'Press any key to step forward, "q" to play'
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1,int(frames) + 1):
pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos) )
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled = 1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos) )
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix ):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum ))
dot = circle( screen,
FOOD_SIZE * self.gridSize,
outlineColor = color, fillColor = color,
width = 1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules ):
capsuleImages = {}
for capsule in capsules:
( screen_x, screen_y ) = self.to_screen(capsule)
dot = circle( (screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor = CAPSULE_COLOR,
fillColor = CAPSULE_COLOR,
width = 1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages ):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages ):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen( cell)
cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color = cellColor,
filled = 1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x,y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0,0.0,0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
# Saving graphical output
# -----------------------
# Note: to make an animated gif from this postscript output, try the command:
# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
# convert is part of imagemagick (freeware)
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
| 37.81583 | 177 | 0.631676 |
01dbd89133f57eb3b3dee626e65f3a7ba088ed1d
| 6,545 |
py
|
Python
|
src/cfnlint/rules/functions/SubNeeded.py
|
brendangibat/cfn-python-lint
|
60500d3926d810352e42b95b09c8e7814033cca6
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/rules/functions/SubNeeded.py
|
brendangibat/cfn-python-lint
|
60500d3926d810352e42b95b09c8e7814033cca6
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/rules/functions/SubNeeded.py
|
brendangibat/cfn-python-lint
|
60500d3926d810352e42b95b09c8e7814033cca6
|
[
"MIT-0"
] | null | null | null |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import re
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class SubNeeded(CloudFormationLintRule):
"""Check if a substitution string exists without a substitution function"""
id = 'E1029'
shortdesc = 'Sub is required if a variable is used in a string'
description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
tags = ['functions', 'sub']
# Free-form text properties to exclude from this rule
# content is part of AWS::CloudFormation::Init
excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
'CloudWatchAlarmDefinition', 'TopicRulePayload']
api_excludes = ['Uri', 'Body']
# IAM Policy has special variables that don't require !Sub, Check for these
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
# https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
# https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
# https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
'${aws:TokenIssueTime}', '${aws:principaltype}',
'${aws:SecureTransport}', '${aws:SourceIp}',
'${aws:UserAgent}', '${aws:userid}',
'${aws:username}', '${ec2:SourceInstanceARN}',
'${iot:Connection.Thing.ThingName}',
'${iot:Connection.Thing.ThingTypeName}',
'${iot:Connection.Thing.IsAttached}',
'${iot:ClientId}', '${transfer:HomeBucket}',
'${transfer:HomeDirectory}', '${transfer:HomeFolder}',
'${transfer:UserName}', '${redshift:DbUser}',
'${cognito-identity.amazonaws.com:aud}',
'${cognito-identity.amazonaws.com:sub}',
'${cognito-identity.amazonaws.com:amr}']
# https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
condition_excludes = [
'${redshift:DbUser}',
]
def _match_values(self, searchRegex, cfnelem, path):
"""Recursively search for values matching the searchRegex"""
values = []
if isinstance(cfnelem, dict):
for key in cfnelem:
pathprop = path[:]
pathprop.append(key)
values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))
elif isinstance(cfnelem, list):
for index, item in enumerate(cfnelem):
pathprop = path[:]
pathprop.append(index)
values.extend(self._match_values(searchRegex, item, pathprop))
else:
# Leaf node
if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):
# Get all variables as seperate paths
regex = re.compile(r'(\$\{.*?\.?.*?})')
for variable in re.findall(regex, cfnelem):
values.append(path + [variable])
return values
def match_values(self, searchRegex, cfn):
"""
Search for values in all parts of the templates that match the searchRegex
"""
results = []
results.extend(self._match_values(searchRegex, cfn.template, []))
# Globals are removed during a transform. They need to be checked manually
results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))
return results
def _api_exceptions(self, value):
""" Key value exceptions """
parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
return re.match(parameter_search, value)
def match(self, cfn):
"""Basic Rule Matching"""
matches = []
# Generic regex to match a string containing at least one ${parameter}
parameter_search = re.compile(r'^.*(\$\{.*\}.*(\$\{.*\}.*)*)$')
# Get a list of paths to every leaf node string containing at least one ${parameter}
parameter_string_paths = self.match_values(parameter_search, cfn)
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
for parameter_string_path in parameter_string_paths:
if parameter_string_path[0] in ['Parameters']:
continue
# Exxclude the special IAM variables
variable = parameter_string_path[-1]
if 'Resource' in parameter_string_path:
if variable in self.resource_excludes:
continue
if 'NotResource' in parameter_string_path:
if variable in self.resource_excludes:
continue
if 'Condition' in parameter_string_path:
if variable in self.condition_excludes:
continue
# Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
if variable.startswith('${!'):
continue
found_sub = False
# Does the path contain an 'Fn::Sub'?
for step in parameter_string_path:
if step in self.api_excludes:
if self._api_exceptions(parameter_string_path[-1]):
found_sub = True
elif step == 'Fn::Sub' or step in self.excludes:
found_sub = True
# If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
if not found_sub:
# Remove the last item (the variable) to prevent multiple errors on 1 line errors
path = parameter_string_path[:-1]
message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format(
'/'.join(map(str, path)))
matches.append(RuleMatch(path, message))
return matches
| 47.427536 | 136 | 0.600764 |
178f882d7f8dffdad2f4b0260e18d06daf9714cb
| 559 |
py
|
Python
|
ClassicalSquareDimer/src/plot/load_binary_file.py
|
CaoRX/ClassicalDimer
|
98f324ab0118455127e6ba8784a6836476cbf843
|
[
"MIT"
] | 1 |
2020-03-03T07:32:18.000Z
|
2020-03-03T07:32:18.000Z
|
src/plot/load_binary_file.py
|
CaoRX/ClassicalDimer
|
98f324ab0118455127e6ba8784a6836476cbf843
|
[
"MIT"
] | null | null | null |
src/plot/load_binary_file.py
|
CaoRX/ClassicalDimer
|
98f324ab0118455127e6ba8784a6836476cbf843
|
[
"MIT"
] | null | null | null |
import numpy as np
import struct
def load_file(filename, data_shape = (1, -1), data_type = 'double'):
type_dict = {'int': 'i', 'double': 'd'}
file_path = '../../build/data/' + filename
data_file = open(file_path, mode = 'rb')
file_contents = data_file.read()
ret = struct.iter_unpack(type_dict[data_type], file_contents)
ret = np.array([a[0] for a in ret])
if (data_shape[0] == 1) and (data_shape[1] == -1):
return ret
ret = np.reshape(ret, data_shape)
return ret
def get_filename(file_type, file_no):
return file_type + str(file_no) + '.dat'
| 31.055556 | 68 | 0.674419 |
da78770aec688ddc3cdbdca3f019be0fc105e977
| 19,782 |
py
|
Python
|
wfexs_backend/cache_handler.py
|
Acivico/WfExS-backend
|
05173ceca4dfdcd65f2a957e3638a65d76c6eb25
|
[
"Apache-2.0"
] | 1 |
2022-01-10T12:21:33.000Z
|
2022-01-10T12:21:33.000Z
|
wfexs_backend/cache_handler.py
|
Acivico/WfExS-backend
|
05173ceca4dfdcd65f2a957e3638a65d76c6eb25
|
[
"Apache-2.0"
] | 1 |
2021-11-19T09:10:51.000Z
|
2021-11-24T08:55:49.000Z
|
wfexs_backend/cache_handler.py
|
Acivico/WfExS-backend
|
05173ceca4dfdcd65f2a957e3638a65d76c6eb25
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import datetime
import fnmatch
import hashlib
import json
import logging
import os
import os.path
import re
import shutil
import urllib.parse
import uuid
from typing import Iterator, List, Mapping
from typing import Optional, Pattern, Tuple, Union
from .common import *
META_JSON_POSTFIX = '_meta.json'
class SchemeHandlerCacheHandler:
def __init__(self, cacheDir, schemeHandlers:Mapping[str,ProtocolFetcher]):
# Getting a logger focused on specific classes
import inspect
self.logger = logging.getLogger(dict(inspect.getmembers(self))['__module__'] + '::' + self.__class__.__name__)
# TODO: create caching database
self.cacheDir = cacheDir
self.schemeHandlers = dict()
self.addSchemeHandlers(schemeHandlers)
def addSchemeHandlers(self, schemeHandlers:Mapping[str, ProtocolFetcher]) -> None:
if isinstance(schemeHandlers, dict):
self.schemeHandlers.update(schemeHandlers)
def _genUriMetaCachedFilename(self, hashDir:AbsPath, the_remote_file:Union[urllib.parse.ParseResult, URIType]) -> Tuple[AbsPath, AbsPath]:
input_file = hashlib.sha1(the_remote_file.encode('utf-8')).hexdigest()
metadata_input_file = input_file + META_JSON_POSTFIX
return os.path.join(hashDir, metadata_input_file), input_file, os.path.join(hashDir, input_file)
@staticmethod
def getHashDir(destdir) -> AbsPath:
hashDir = os.path.join(destdir,'uri_hashes')
if not os.path.exists(hashDir):
try:
os.makedirs(hashDir)
except IOError:
errstr = "ERROR: Unable to create directory for workflow URI hashes {}.".format(hashDir)
raise WFException(errstr)
return hashDir
@staticmethod
def _parseMetaStructure(fMeta: AbsPath) -> Mapping[str, Any]:
with open(fMeta, mode="r", encoding="utf-8") as eH:
metaStructure = json.load(eH)
# Generating an stamp signature
if metaStructure.get('stamp') is None:
metaStructure['stamp'] = datetime.datetime.fromtimestamp(os.path.getmtime(fMeta), tz=datetime.timezone.utc).isoformat() + 'Z'
metaStructure.setdefault('path', dict())['meta'] = {
'relative': os.path.basename(fMeta),
'absolute': fMeta
}
# Generating a path structure for old cases
if (metaStructure.get('resolves_to') is None) and (metaStructure['path'].get('relative') is None):
if fMeta.endswith(META_JSON_POSTFIX):
fname = fMeta[0:-len(META_JSON_POSTFIX)]
if os.path.exists(fname):
finalCachedFilename = os.path.realpath(fname)
hashDir = os.path.dirname(fMeta)
metaStructure['path'].update({
'relative': os.path.relpath(finalCachedFilename, hashDir),
'absolute': finalCachedFilename
})
return metaStructure
@staticmethod
def _translateArgs(args:Iterator[str]) -> List[Pattern]:
return list(map(lambda e: re.compile(fnmatch.translate(e)), args))
def list(self, destdir:AbsPath, *args, acceptGlob:bool=False) -> Iterator[Tuple[URIType, Mapping[str,Any]]]:
"""
This method iterates over the list of metadata entries,
using glob patterns if requested
"""
entries = set(args)
if entries and acceptGlob:
reEntries = self._translateArgs(entries)
else:
reEntries = None
hashDir = self.getHashDir(destdir)
with os.scandir(hashDir) as hD:
for entry in hD:
# We are avoiding to enter in loops around '.' and '..'
if entry.is_file(follow_symlinks=False) and entry.name.endswith(META_JSON_POSTFIX):
try:
metaStructure = self._parseMetaStructure(entry.path)
meta_uri = None
if not entries:
for meta in metaStructure['metadata_array']:
meta_uri = meta['uri']
break
else:
for meta in metaStructure['metadata_array']:
meta_uri = meta['uri']
if reEntries and any(map(lambda r: r.match(meta_uri) is not None, reEntries)):
break
elif meta_uri in entries:
break
meta_uri = None
if meta_uri is not None:
yield meta_uri, metaStructure
except:
pass
def remove(self, destdir:AbsPath, *args, doRemoveFiles:bool=False, acceptGlob:bool=False) -> Iterator[Tuple[URIType, AbsPath, Optional[AbsPath]]]:
"""
This method iterates elements from metadata entries,
and optionally the cached value
"""
if len(args) > 0:
hashDir = self.getHashDir(destdir)
for meta_uri, metaStructure in self.list(destdir, *args, acceptGlob=acceptGlob):
removeCachedCopyPath = None
for meta in metaStructure['metadata_array']:
if doRemoveFiles and not meta['metadata'].get('injected'):
# Decide the removal path
finalCachedFilename = None
relFinalCachedFilename = metaStructure.get('path', {}).get('relative')
if relFinalCachedFilename is not None:
finalCachedFilename = os.path.normpath(os.path.join(hashDir, relFinalCachedFilename))
if not os.path.exists(finalCachedFilename):
self.logger.warning(f'Relative cache path {relFinalCachedFilename} was not found')
if finalCachedFilename is None:
finalCachedFilename = metaStructure.get('path', {}).get('absolute')
if (finalCachedFilename is not None) and os.path.exists(finalCachedFilename):
removeCachedCopyPath = finalCachedFilename
else:
self.logger.warning(f'Absolute cache path {finalCachedFilename} was not found. Cache miss!!!')
break
if removeCachedCopyPath is not None:
self.logger.info(f"Removing cache {metaStructure['fingerprint']} physical path {removeCachedCopyPath}")
if os.path.isdir(removeCachedCopyPath):
shutil.rmtree(removeCachedCopyPath, ignore_errors=True)
else:
os.unlink(removeCachedCopyPath)
metaFile = metaStructure['path']['meta']['absolute']
self.logger.info(f"Removing cache {metaStructure.get('fingerprint')} metadata {metaFile}")
os.unlink(metaFile)
yield meta_uri, metaFile, removeCachedCopyPath
def inject(self, destdir:AbsPath, the_remote_file:Union[urllib.parse.ParseResult, URIType], fetched_metadata_array:Optional[List[URIWithMetadata]]=None, finalCachedFilename:Optional[AbsPath]=None, tempCachedFilename:Optional[AbsPath]=None, inputKind:Optional[Union[ContentKind, AbsPath]]=None) -> Tuple[AbsPath, Fingerprint]:
return self._inject(
self.getHashDir(destdir),
the_remote_file,
fetched_metadata_array=fetched_metadata_array,
finalCachedFilename=finalCachedFilename,
tempCachedFilename=tempCachedFilename,
destdir=destdir,
inputKind=inputKind
)
def _inject(self, hashDir:AbsPath, the_remote_file:Union[urllib.parse.ParseResult, URIType], fetched_metadata_array:Optional[List[URIWithMetadata]]=None, finalCachedFilename:Optional[AbsPath]=None, tempCachedFilename:Optional[AbsPath]=None, destdir:Optional[AbsPath]=None, inputKind:Optional[Union[ContentKind, AbsPath]]=None) -> Tuple[AbsPath, Fingerprint]:
"""
This method has been created to be able to inject a cached metadata entry
"""
if isinstance(the_remote_file, urllib.parse.ParseResult):
the_remote_file = urllib.parse.urlunparse(the_remote_file)
uriMetaCachedFilename , _ , _ = self._genUriMetaCachedFilename(hashDir, the_remote_file)
if tempCachedFilename is None:
tempCachedFilename = finalCachedFilename
if inputKind is None:
if tempCachedFilename is None:
raise WFException(f"No defined paths or input kinds, which would lead to an empty cache entry")
if os.path.isdir(tempCachedFilename):
inputKind = ContentKind.Directory
elif os.path.isfile(tempCachedFilename):
inputKind = ContentKind.File
else:
raise WFException(f"Local path {tempCachedFilename} is neither a file nor a directory")
fingerprint = None
# Are we dealing with a redirection?
if isinstance(inputKind, ContentKind):
if os.path.isfile(tempCachedFilename): # inputKind == ContentKind.File:
fingerprint = ComputeDigestFromFile(tempCachedFilename, repMethod=stringifyFilenameDigest)
putativeInputKind = ContentKind.File
elif os.path.isdir(tempCachedFilename): # inputKind == ContentKind.Directory:
fingerprint = ComputeDigestFromDirectory(tempCachedFilename, repMethod=stringifyFilenameDigest)
putativeInputKind = ContentKind.Directory
else:
raise WFException(f"FIXME: Cached {tempCachedFilename} from {the_remote_file} is neither file nor directory")
if inputKind != putativeInputKind:
self.logger.error(f"FIXME: Mismatch at {the_remote_file} : {inputKind} vs {putativeInputKind}")
if finalCachedFilename is None:
finalCachedFilename = os.path.join(destdir, fingerprint)
else:
finalCachedFilename = None
# Saving the metadata
with open(uriMetaCachedFilename, mode="w", encoding="utf-8") as mOut:
# Serializing the metadata
if fetched_metadata_array is None:
fetched_metadata_array = [
URIWithMetadata(
uri=the_remote_file,
metadata={
'injected': True
}
)
]
metaStructure = {
'stamp': datetime.datetime.utcnow().isoformat() + 'Z',
'metadata_array': list(map(lambda m: {'uri': m.uri, 'metadata': m.metadata, 'preferredName': m.preferredName}, fetched_metadata_array))
}
if finalCachedFilename is not None:
metaStructure['kind'] = str(inputKind.value)
metaStructure['fingerprint'] = fingerprint
metaStructure['path'] = {
'relative': os.path.relpath(finalCachedFilename, hashDir),
'absolute': finalCachedFilename
}
else:
metaStructure['resolves_to'] = inputKind
json.dump(metaStructure, mOut)
return finalCachedFilename, fingerprint
def fetch(self, remote_file:Union[urllib.parse.ParseResult, URIType], destdir:AbsPath, offline:bool, ignoreCache:bool=False, registerInCache:bool=True, secContext:Optional[SecurityContextConfig]=None) -> Tuple[ContentKind, AbsPath, List[URIWithMetadata]]:
# The directory with the content, whose name is based on sha256
if not os.path.exists(destdir):
try:
os.makedirs(destdir)
except IOError:
errstr = "ERROR: Unable to create directory for workflow inputs {}.".format(destdir)
raise WFException(errstr)
# The directory where the symlinks derived from SHA1 obtained from URIs
# to the content are placed
hashDir = self.getHashDir(destdir)
# This filename will only be used when content is being fetched
tempCachedFilename = os.path.join(destdir, 'caching-' + str(uuid.uuid4()))
# This is an iterative process, where the URI is resolved and peeled until a basic fetching protocol is reached
inputKind = remote_file
metadata_array = []
while not isinstance(inputKind, ContentKind):
the_remote_file = inputKind
if isinstance(the_remote_file, urllib.parse.ParseResult):
parsedInputURL = the_remote_file
the_remote_file = urllib.parse.urlunparse(the_remote_file)
else:
parsedInputURL = urllib.parse.urlparse(the_remote_file)
# uriCachedFilename is going to be always a symlink
uriMetaCachedFilename , uriCachedFilename , absUriCachedFilename = self._genUriMetaCachedFilename(hashDir, the_remote_file)
# TODO: check cached state in future database
# Cleaning up
if registerInCache and ignoreCache:
# Removing the metadata
if os.path.exists(uriMetaCachedFilename):
os.unlink(uriMetaCachedFilename)
# Removing the symlink
if os.path.exists(absUriCachedFilename):
os.unlink(absUriCachedFilename)
# We cannot remove the content as
# it could be referenced by other symlinks
refetch = not registerInCache or ignoreCache or not os.path.exists(uriMetaCachedFilename) or os.stat(uriMetaCachedFilename).st_size == 0
metaStructure = None
if not refetch:
try:
metaStructure = self._parseMetaStructure(uriMetaCachedFilename)
except:
# Metadata is corrupted
self.logger.warning(f'Metadata cache {uriMetaCachedFilename} is corrupted. Ignoring.')
pass
if metaStructure is not None:
# Metadata cache hit
inputKind = metaStructure.get('kind')
if inputKind is None:
inputKind = metaStructure['resolves_to']
else:
# Additional checks
inputKind = ContentKind(inputKind)
relFinalCachedFilename = metaStructure.get('path', {}).get('relative', os.readlink(absUriCachedFilename))
finalCachedFilename = os.path.normpath(os.path.join(hashDir, relFinalCachedFilename))
if not os.path.exists(finalCachedFilename):
self.logger.warning(f'Relative cache path {relFinalCachedFilename} was not found')
finalCachedFilename = metaStructure.get('path', {}).get('absolute')
if (finalCachedFilename is None) or not os.path.exists(finalCachedFilename):
self.logger.warning(f'Absolute cache path {finalCachedFilename} was not found. Cache miss!!!')
# Cleaning up
metaStructure = None
if metaStructure is not None:
# Cache hit
# As the content still exists, get the metadata
fetched_metadata_array = list(map(lambda rm: URIWithMetadata(uri=rm['uri'], metadata=rm['metadata'], preferredName=rm.get('preferredName')), metaStructure['metadata_array']))
else:
# Cache miss
# As this is a handler for online resources, comply with offline mode
if offline:
raise WFException(f"Cannot download content in offline mode from {remote_file} to {uriCachedFilename}")
# Content is fetched here
theScheme = parsedInputURL.scheme.lower()
schemeHandler = self.schemeHandlers.get(theScheme)
if schemeHandler is None:
raise WFException(f'No {theScheme} scheme handler for {the_remote_file} (while processing {remote_file}). Was this data injected in the cache?')
try:
# Content is fetched here
inputKind, fetched_metadata_array = schemeHandler(the_remote_file, tempCachedFilename, secContext=secContext)
# The cache entry is injected
finalCachedFilename, fingerprint = self._inject(
hashDir,
the_remote_file,
fetched_metadata_array,
tempCachedFilename=tempCachedFilename,
destdir=destdir,
inputKind=inputKind
)
# Now, creating the symlink
# (which should not be needed in the future)
if finalCachedFilename is not None:
if os.path.isfile(finalCachedFilename):
os.unlink(finalCachedFilename)
elif os.path.isdir(finalCachedFilename):
shutil.rmtree(finalCachedFilename)
os.rename(tempCachedFilename, finalCachedFilename)
next_input_file = os.path.relpath(finalCachedFilename, hashDir)
else:
next_input_file = hashlib.sha1(inputKind.encode('utf-8')).hexdigest()
if os.path.lexists(absUriCachedFilename):
os.unlink(absUriCachedFilename)
os.symlink(next_input_file, absUriCachedFilename)
except WFException as we:
raise we
except Exception as e:
raise WFException("Cannot download content from {} to {} (while processing {}) (temp file {}): {}".format(the_remote_file, uriCachedFilename, remote_file, tempCachedFilename, e))
# Store the metadata
metadata_array.extend(fetched_metadata_array)
return inputKind, finalCachedFilename, metadata_array
| 49.208955 | 362 | 0.579567 |
146b8476b322cf593fb315b0098e064e7a41e0b6
| 38,283 |
py
|
Python
|
vendor/chromium/mojo/public/tools/bindings/generators/mojom_cpp_generator.py
|
mkljczk/fivem
|
187b2e5f922297bcbde5cfb1db70815223c53680
|
[
"MIT"
] | 6 |
2021-03-29T05:26:18.000Z
|
2021-07-13T12:53:03.000Z
|
vendor/chromium/mojo/public/tools/bindings/generators/mojom_cpp_generator.py
|
big-rip/fivem-1
|
c08af22110802e77816dfdde29df1662f8dea563
|
[
"MIT"
] | 7 |
2021-08-31T22:30:30.000Z
|
2022-03-24T06:50:38.000Z
|
vendor/chromium/mojo/public/tools/bindings/generators/mojom_cpp_generator.py
|
big-rip/fivem-1
|
c08af22110802e77816dfdde29df1662f8dea563
|
[
"MIT"
] | 3 |
2020-12-23T02:42:18.000Z
|
2021-04-07T12:06:03.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
_kind_to_cpp_literal_suffix = {
mojom.UINT8: "U",
mojom.UINT16: "U",
mojom.UINT32: "U",
mojom.FLOAT: "f",
mojom.UINT64: "ULL",
}
class _NameFormatter(object):
"""A formatter for the names of kinds or values."""
def __init__(self, token, variant):
self._token = token
self._variant = variant
def Format(self, separator, prefixed=False, internal=False,
include_variant=False, omit_namespace_for_module=None,
flatten_nested_kind=False):
"""Formats the name according to the given configuration.
Args:
separator: Separator between different parts of the name.
prefixed: Whether a leading separator should be added.
internal: Returns the name in the "internal" namespace.
include_variant: Whether to include variant as namespace. If |internal| is
True, then this flag is ignored and variant is not included.
omit_namespace_for_module: If the token is from the specified module,
don't add the namespaces of the module to the name.
flatten_nested_kind: It is allowed to define enums inside structs and
interfaces. If this flag is set to True, this method concatenates the
parent kind and the nested kind with '_', instead of treating the
parent kind as a scope."""
parts = []
if self._ShouldIncludeNamespace(omit_namespace_for_module):
if prefixed:
parts.append("")
parts.extend(self._GetNamespace())
if include_variant and self._variant and not internal:
parts.append(self._variant)
parts.extend(self._GetName(internal, flatten_nested_kind))
return separator.join(parts)
def FormatForCpp(self, omit_namespace_for_module=None, internal=False,
flatten_nested_kind=False):
return self.Format(
"::", prefixed=True,
omit_namespace_for_module=omit_namespace_for_module,
internal=internal, include_variant=True,
flatten_nested_kind=flatten_nested_kind)
def FormatForMojom(self):
return self.Format(".")
def _MapKindName(self, token, internal):
if not internal:
return token.name
if (mojom.IsStructKind(token) or mojom.IsUnionKind(token) or
mojom.IsEnumKind(token)):
return token.name + "_Data"
return token.name
def _GetName(self, internal, flatten_nested_kind):
if isinstance(self._token, mojom.EnumValue):
name_parts = _NameFormatter(self._token.enum, self._variant)._GetName(
internal, flatten_nested_kind)
name_parts.append(self._token.name)
return name_parts
name_parts = []
if internal:
name_parts.append("internal")
if (flatten_nested_kind and mojom.IsEnumKind(self._token) and
self._token.parent_kind):
name = "%s_%s" % (self._token.parent_kind.name,
self._MapKindName(self._token, internal))
name_parts.append(name)
return name_parts
if self._token.parent_kind:
name_parts.append(self._MapKindName(self._token.parent_kind, internal))
name_parts.append(self._MapKindName(self._token, internal))
return name_parts
def _ShouldIncludeNamespace(self, omit_namespace_for_module):
return self._token.module and (
not omit_namespace_for_module or
self._token.module.path != omit_namespace_for_module.path)
def _GetNamespace(self):
if self._token.module:
return NamespaceToArray(self._token.module.namespace)
def NamespaceToArray(namespace):
return namespace.split(".") if namespace else []
def GetWtfHashFnNameForEnum(enum):
return _NameFormatter(enum, None).Format("_", internal=True,
flatten_nested_kind=True) + "HashFn"
def IsNativeOnlyKind(kind):
return (mojom.IsStructKind(kind) or mojom.IsEnumKind(kind)) and \
kind.native_only
def UseCustomSerializer(kind):
return mojom.IsStructKind(kind) and kind.custom_serializer
def AllEnumValues(enum):
"""Return all enum values associated with an enum.
Args:
enum: {mojom.Enum} The enum type.
Returns:
{Set[int]} The values.
"""
return set(field.numeric_value for field in enum.fields)
def GetCppPodType(kind):
return _kind_to_cpp_type[kind]
def RequiresContextForDataView(kind):
for field in kind.fields:
if mojom.IsReferenceKind(field.kind):
return True
return False
def ShouldInlineStruct(struct):
# TODO(darin): Base this on the size of the wrapper class.
if len(struct.fields) > 4:
return False
for field in struct.fields:
if mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind):
return False
return True
def ShouldInlineUnion(union):
return not any(
mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind)
for field in union.fields)
def HasPackedMethodOrdinals(interface):
"""Returns whether all method ordinals are packed such that indexing into a
table would be efficient."""
max_ordinal = len(interface.methods) * 2
return all(method.ordinal < max_ordinal for method in interface.methods)
class StructConstructor(object):
"""Represents a constructor for a generated struct.
Fields:
fields: {[Field]} All struct fields in order.
params: {[Field]} The fields that are passed as params.
"""
def __init__(self, fields, params):
self._fields = fields
self._params = set(params)
@property
def params(self):
return [field for field in self._fields if field in self._params]
@property
def fields(self):
for field in self._fields:
yield (field, field in self._params)
class Generator(generator.Generator):
def __init__(self, *args, **kwargs):
super(Generator, self).__init__(*args, **kwargs)
def _GetExtraTraitsHeaders(self):
extra_headers = set()
for typemap in self._GetAllUsedTypemaps():
extra_headers.update(typemap.get("traits_headers", []))
return sorted(extra_headers)
def _GetAllUsedTypemaps(self):
"""Returns the typemaps for types needed for serialization in this module.
A type is needed for serialization if it is contained by a struct or union
defined in this module, is a parameter of a message in an interface in
this module or is contained within another type needed for serialization.
"""
used_typemaps = []
seen_types = set()
def AddKind(kind):
if (mojom.IsIntegralKind(kind) or mojom.IsStringKind(kind) or
mojom.IsDoubleKind(kind) or mojom.IsFloatKind(kind) or
mojom.IsAnyHandleKind(kind) or
mojom.IsInterfaceKind(kind) or
mojom.IsInterfaceRequestKind(kind) or
mojom.IsAssociatedKind(kind) or
mojom.IsPendingRemoteKind(kind) or
mojom.IsPendingReceiverKind(kind)):
pass
elif mojom.IsArrayKind(kind):
AddKind(kind.kind)
elif mojom.IsMapKind(kind):
AddKind(kind.key_kind)
AddKind(kind.value_kind)
else:
name = self._GetFullMojomNameForKind(kind)
if name in seen_types:
return
seen_types.add(name)
typemap = self.typemap.get(name, None)
if typemap:
used_typemaps.append(typemap)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
for field in kind.fields:
AddKind(field.kind)
for kind in self.module.structs + self.module.unions:
for field in kind.fields:
AddKind(field.kind)
for interface in self.module.interfaces:
for method in interface.methods:
for parameter in method.parameters + (method.response_parameters or []):
AddKind(parameter.kind)
return used_typemaps
def _GetExtraPublicHeaders(self):
all_enums = list(self.module.enums)
for struct in self.module.structs:
all_enums.extend(struct.enums)
for interface in self.module.interfaces:
all_enums.extend(interface.enums)
types = set(self._GetFullMojomNameForKind(typename)
for typename in
self.module.structs + all_enums + self.module.unions)
headers = set()
for typename, typemap in self.typemap.items():
if typename in types:
headers.update(typemap.get("public_headers", []))
return sorted(headers)
def _ReferencesAnyHandleOrInterfaceType(self):
"""Returns whether this module uses interfaces directly or indirectly.
When false, the generated headers do not need to include interface_ptr.h
and similar.
"""
if len(self.module.interfaces) > 0:
return True
return any(map(mojom.ContainsHandlesOrInterfaces,
self.module.structs + self.module.unions))
def _ReferencesAnyNativeType(self):
"""Returns whether this module uses native types directly or indirectly.
When false, the generated headers do not need to include
native_struct_serialization.h and similar.
"""
m = self.module
# Note that interfaces can contain scoped native types.
return any(map(mojom.ContainsNativeTypes,
m.enums + m.structs + m.interfaces))
def _GetDirectlyUsedKinds(self):
for struct in self.module.structs + self.module.unions:
for field in struct.fields:
yield field.kind
for interface in self.module.interfaces:
for method in interface.methods:
for param in method.parameters + (method.response_parameters or []):
yield param.kind
def _GetJinjaExports(self):
all_enums = list(self.module.enums)
for struct in self.module.structs:
all_enums.extend(struct.enums)
for interface in self.module.interfaces:
all_enums.extend(interface.enums)
return {
"all_enums": all_enums,
"disallow_interfaces": self.disallow_interfaces,
"disallow_native_types": self.disallow_native_types,
"enable_kythe_annotations": self.enable_kythe_annotations,
"enums": self.module.enums,
"export_attribute": self.export_attribute,
"export_header": self.export_header,
"extra_public_headers": self._GetExtraPublicHeaders(),
"extra_traits_headers": self._GetExtraTraitsHeaders(),
"for_blink": self.for_blink,
"imports": self.module.imports,
"interfaces": self.module.interfaces,
"kinds": self.module.kinds,
"module": self.module,
"module_namespace": self.module.namespace,
"namespaces_as_array": NamespaceToArray(self.module.namespace),
"structs": self.module.structs,
"support_lazy_serialization": self.support_lazy_serialization,
"unions": self.module.unions,
"uses_interfaces": self._ReferencesAnyHandleOrInterfaceType(),
"uses_native_types": self._ReferencesAnyNativeType(),
"variant": self.variant,
}
@staticmethod
def GetTemplatePrefix():
return "cpp_templates"
def GetFilters(self):
cpp_filters = {
"all_enum_values": AllEnumValues,
"constant_value": self._ConstantValue,
"contains_handles_or_interfaces": mojom.ContainsHandlesOrInterfaces,
"contains_move_only_members": self._ContainsMoveOnlyMembers,
"cpp_data_view_type": self._GetCppDataViewType,
"cpp_field_type": self._GetCppFieldType,
"cpp_union_field_type": self._GetCppUnionFieldType,
"cpp_pod_type": GetCppPodType,
"cpp_union_getter_return_type": self._GetUnionGetterReturnType,
"cpp_union_trait_getter_return_type": self._GetUnionTraitGetterReturnType,
"cpp_wrapper_call_type": self._GetCppWrapperCallType,
"cpp_wrapper_param_type": self._GetCppWrapperParamType,
"cpp_wrapper_param_type_new": self._GetCppWrapperParamTypeNew,
"cpp_wrapper_type": self._GetCppWrapperType,
"default_value": self._DefaultValue,
"expression_to_text": self._ExpressionToText,
"format_constant_declaration": self._FormatConstantDeclaration,
"get_container_validate_params_ctor_args":
self._GetContainerValidateParamsCtorArgs,
"get_full_mojom_name_for_kind": self._GetFullMojomNameForKind,
"get_name_for_kind": self._GetNameForKind,
"get_pad": pack.GetPad,
"get_qualified_name_for_kind": self._GetQualifiedNameForKind,
"has_callbacks": mojom.HasCallbacks,
"has_packed_method_ordinals": HasPackedMethodOrdinals,
"has_sync_methods": mojom.HasSyncMethods,
"method_supports_lazy_serialization":
self._MethodSupportsLazySerialization,
"requires_context_for_data_view": RequiresContextForDataView,
"should_inline": ShouldInlineStruct,
"should_inline_union": ShouldInlineUnion,
"is_array_kind": mojom.IsArrayKind,
"is_enum_kind": mojom.IsEnumKind,
"is_integral_kind": mojom.IsIntegralKind,
"is_interface_kind": mojom.IsInterfaceKind,
"is_receiver_kind": self._IsReceiverKind,
"is_native_only_kind": IsNativeOnlyKind,
"is_any_handle_kind": mojom.IsAnyHandleKind,
"is_any_interface_kind": mojom.IsAnyInterfaceKind,
"is_any_handle_or_interface_kind": mojom.IsAnyHandleOrInterfaceKind,
"is_associated_kind": mojom.IsAssociatedKind,
"is_hashable": self._IsHashableKind,
"is_map_kind": mojom.IsMapKind,
"is_nullable_kind": mojom.IsNullableKind,
"is_object_kind": mojom.IsObjectKind,
"is_reference_kind": mojom.IsReferenceKind,
"is_string_kind": mojom.IsStringKind,
"is_struct_kind": mojom.IsStructKind,
"is_typemapped_kind": self._IsTypemappedKind,
"is_union_kind": mojom.IsUnionKind,
"passes_associated_kinds": mojom.PassesAssociatedKinds,
"struct_constructors": self._GetStructConstructors,
"under_to_camel": generator.ToCamel,
"unmapped_type_for_serializer": self._GetUnmappedTypeForSerializer,
"use_custom_serializer": UseCustomSerializer,
"wtf_hash_fn_name_for_enum": GetWtfHashFnNameForEnum,
}
return cpp_filters
@UseJinja("module.h.tmpl")
def _GenerateModuleHeader(self):
return self._GetJinjaExports()
@UseJinja("module-forward.h.tmpl")
def _GenerateModuleForwardHeader(self):
return self._GetJinjaExports()
@UseJinja("module.cc.tmpl")
def _GenerateModuleSource(self):
return self._GetJinjaExports()
@UseJinja("module-import-headers.h.tmpl")
def _GenerateModuleImportHeadersHeader(self):
return self._GetJinjaExports()
@UseJinja("module-shared.h.tmpl")
def _GenerateModuleSharedHeader(self):
return self._GetJinjaExports()
@UseJinja("module-shared-internal.h.tmpl")
def _GenerateModuleSharedInternalHeader(self):
return self._GetJinjaExports()
@UseJinja("module-shared-message-ids.h.tmpl")
def _GenerateModuleSharedMessageIdsHeader(self):
return self._GetJinjaExports()
@UseJinja("module-shared.cc.tmpl")
def _GenerateModuleSharedSource(self):
return self._GetJinjaExports()
@UseJinja("module-test-utils.h.tmpl")
def _GenerateModuleTestUtilsHeader(self):
return self._GetJinjaExports()
@UseJinja("module-test-utils.cc.tmpl")
def _GenerateModuleTestUtilsSource(self):
return self._GetJinjaExports()
@UseJinja("module-params-data.h.tmpl")
def _GenerateModuleParamsDataHeader(self):
return self._GetJinjaExports()
def GenerateFiles(self, args):
self.module.Stylize(generator.Stylizer())
if self.generate_non_variant_code:
if self.generate_message_ids:
self.WriteWithComment(self._GenerateModuleSharedMessageIdsHeader(),
"%s-shared-message-ids.h" % self.module.path)
else:
self.WriteWithComment(self._GenerateModuleSharedHeader(),
"%s-shared.h" % self.module.path)
self.WriteWithComment(self._GenerateModuleSharedInternalHeader(),
"%s-shared-internal.h" % self.module.path)
self.WriteWithComment(self._GenerateModuleSharedSource(),
"%s-shared.cc" % self.module.path)
self.WriteWithComment(self._GenerateModuleParamsDataHeader(),
"%s-params-data.h" % self.module.path)
else:
suffix = "-%s" % self.variant if self.variant else ""
self.WriteWithComment(self._GenerateModuleHeader(),
"%s%s.h" % (self.module.path, suffix))
self.WriteWithComment(self._GenerateModuleForwardHeader(),
"%s%s-forward.h" % (self.module.path, suffix))
self.WriteWithComment(self._GenerateModuleSource(),
"%s%s.cc" % (self.module.path, suffix))
self.WriteWithComment(self._GenerateModuleImportHeadersHeader(),
"%s%s-import-headers.h" % (self.module.path,
suffix))
self.WriteWithComment(self._GenerateModuleTestUtilsHeader(),
"%s%s-test-utils.h" % (self.module.path, suffix))
self.WriteWithComment(self._GenerateModuleTestUtilsSource(),
"%s%s-test-utils.cc" % (self.module.path, suffix))
def _ConstantValue(self, constant):
return self._ExpressionToText(constant.value, kind=constant.kind)
def _DefaultValue(self, field):
if not field.default:
return ""
if mojom.IsStructKind(field.kind):
assert field.default == "default"
if self._IsTypemappedKind(field.kind):
return ""
return "%s::New()" % self._GetNameForKind(field.kind)
expression = self._ExpressionToText(field.default, kind=field.kind)
if mojom.IsEnumKind(field.kind) and self._IsTypemappedKind(field.kind):
expression = "mojo::internal::ConvertEnumValue<%s, %s>(%s)" % (
self._GetNameForKind(field.kind), self._GetCppWrapperType(field.kind),
expression)
return expression
def _GetNameForKind(self, kind, internal=False, flatten_nested_kind=False,
add_same_module_namespaces=False):
return _NameFormatter(kind, self.variant).FormatForCpp(
internal=internal, flatten_nested_kind=flatten_nested_kind,
omit_namespace_for_module = (None if add_same_module_namespaces
else self.module))
def _GetQualifiedNameForKind(self, kind, internal=False,
flatten_nested_kind=False, include_variant=True):
return _NameFormatter(
kind, self.variant if include_variant else None).FormatForCpp(
internal=internal, flatten_nested_kind=flatten_nested_kind)
def _GetFullMojomNameForKind(self, kind):
return _NameFormatter(kind, self.variant).FormatForMojom()
def _IsTypemappedKind(self, kind):
return hasattr(kind, "name") and \
self._GetFullMojomNameForKind(kind) in self.typemap
def _IsHashableKind(self, kind):
"""Check if the kind can be hashed.
Args:
kind: {Kind} The kind to check.
Returns:
{bool} True if a value of this kind can be hashed.
"""
checked = set()
def Check(kind):
if kind.spec in checked:
return True
checked.add(kind.spec)
if mojom.IsNullableKind(kind):
return False
elif mojom.IsStructKind(kind):
if kind.native_only:
return False
if (self._IsTypemappedKind(kind) and
not self.typemap[self._GetFullMojomNameForKind(kind)]["hashable"]):
return False
return all(Check(field.kind) for field in kind.fields)
elif mojom.IsEnumKind(kind):
return not self._IsTypemappedKind(kind) or self.typemap[
self._GetFullMojomNameForKind(kind)]["hashable"]
elif mojom.IsUnionKind(kind):
return all(Check(field.kind) for field in kind.fields)
elif mojom.IsAnyHandleKind(kind):
return False
elif mojom.IsAnyInterfaceKind(kind):
return False
# TODO(crbug.com/735301): Arrays and maps could be made hashable. We just
# don't have a use case yet.
elif mojom.IsArrayKind(kind):
return False
elif mojom.IsMapKind(kind):
return False
else:
return True
return Check(kind)
def _GetNativeTypeName(self, typemapped_kind):
return self.typemap[self._GetFullMojomNameForKind(typemapped_kind)][
"typename"]
def _FormatConstantDeclaration(self, constant, nested=False):
if mojom.IsStringKind(constant.kind):
if nested:
return "const char %s[]" % constant.name
return "%sextern const char %s[]" % \
((self.export_attribute + " ") if self.export_attribute else "",
constant.name)
return "constexpr %s %s = %s" % (
GetCppPodType(constant.kind), constant.name,
self._ConstantValue(constant))
def _GetCppWrapperType(self, kind, add_same_module_namespaces=False):
def _AddOptional(type_name):
return "base::Optional<%s>" % type_name
if self._IsTypemappedKind(kind):
type_name = self._GetNativeTypeName(kind)
if (mojom.IsNullableKind(kind) and
not self.typemap[self._GetFullMojomNameForKind(kind)][
"nullable_is_same_type"]):
type_name = _AddOptional(type_name)
return type_name
if mojom.IsEnumKind(kind):
return self._GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sPtr" % self._GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsArrayKind(kind):
pattern = "WTF::Vector<%s>" if self.for_blink else "std::vector<%s>"
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
return pattern % self._GetCppWrapperType(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsMapKind(kind):
pattern = ("WTF::HashMap<%s, %s>" if self.for_blink else
"base::flat_map<%s, %s>")
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
return pattern % (
self._GetCppWrapperType(
kind.key_kind,
add_same_module_namespaces=add_same_module_namespaces),
self._GetCppWrapperType(
kind.value_kind,
add_same_module_namespaces=add_same_module_namespaces))
if mojom.IsInterfaceKind(kind):
return "%sPtrInfo" % self._GetNameForKind(
kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequest" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsPendingRemoteKind(kind):
return "mojo::PendingRemote<%s>" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsPendingReceiverKind(kind):
return "mojo::PendingReceiver<%s>" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsPendingAssociatedRemoteKind(kind):
return "mojo::PendingAssociatedRemote<%s>" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsPendingAssociatedReceiverKind(kind):
return "mojo::PendingAssociatedReceiver<%s>" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsAssociatedInterfaceKind(kind):
return "%sAssociatedPtrInfo" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "%sAssociatedRequest" % self._GetNameForKind(
kind.kind, add_same_module_namespaces=add_same_module_namespaces)
if mojom.IsStringKind(kind):
if self.for_blink:
return "WTF::String"
type_name = "std::string"
return (_AddOptional(type_name) if mojom.IsNullableKind(kind)
else type_name)
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
if not kind in _kind_to_cpp_type:
raise Exception("Unrecognized kind %s" % kind.spec)
return _kind_to_cpp_type[kind]
def _IsMoveOnlyKind(self, kind):
if self._IsTypemappedKind(kind):
if mojom.IsEnumKind(kind):
return False
return self.typemap[self._GetFullMojomNameForKind(kind)]["move_only"]
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return True
if mojom.IsArrayKind(kind):
return self._IsMoveOnlyKind(kind.kind)
if mojom.IsMapKind(kind):
return (self._IsMoveOnlyKind(kind.value_kind) or
self._IsMoveOnlyKind(kind.key_kind))
if mojom.IsAnyHandleOrInterfaceKind(kind):
return True
return False
def _IsReceiverKind(self, kind):
return (mojom.IsPendingReceiverKind(kind) or
mojom.IsInterfaceRequestKind(kind))
def _IsCopyablePassByValue(self, kind):
if not self._IsTypemappedKind(kind):
return False
return self.typemap[self._GetFullMojomNameForKind(kind)][
"copyable_pass_by_value"]
def _ShouldPassParamByValue(self, kind):
return ((not mojom.IsReferenceKind(kind)) or self._IsMoveOnlyKind(kind) or
self._IsCopyablePassByValue(kind))
def _GetCppWrapperCallType(self, kind):
# TODO: Remove this once interfaces are always passed as PtrInfo.
if mojom.IsInterfaceKind(kind):
return "%sPtr" % self._GetNameForKind(kind)
return self._GetCppWrapperType(kind)
def _GetCppWrapperParamType(self, kind):
# TODO: Remove all usage of this method in favor of
# _GetCppWrapperParamTypeNew. This requires all generated code which passes
# interface handles to use PtrInfo instead of Ptr.
if mojom.IsInterfaceKind(kind):
return "%sPtr" % self._GetNameForKind(kind)
cpp_wrapper_type = self._GetCppWrapperType(kind)
return (cpp_wrapper_type if self._ShouldPassParamByValue(kind)
else "const %s&" % cpp_wrapper_type)
def _GetCppWrapperParamTypeNew(self, kind):
cpp_wrapper_type = self._GetCppWrapperType(kind)
return (cpp_wrapper_type if self._ShouldPassParamByValue(kind)
or mojom.IsArrayKind(kind)
else "const %s&" % cpp_wrapper_type)
def _GetCppFieldType(self, kind):
if mojom.IsStructKind(kind):
return ("mojo::internal::Pointer<%s>" %
self._GetNameForKind(kind, internal=True))
if mojom.IsUnionKind(kind):
return "%s" % self._GetNameForKind(kind, internal=True)
if mojom.IsArrayKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Array_Data<%s>>" %
self._GetCppFieldType(kind.kind))
if mojom.IsMapKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Map_Data<%s, %s>>" %
(self._GetCppFieldType(kind.key_kind),
self._GetCppFieldType(kind.value_kind)))
if mojom.IsInterfaceKind(kind) or mojom.IsPendingRemoteKind(kind):
return "mojo::internal::Interface_Data"
if mojom.IsInterfaceRequestKind(kind) or mojom.IsPendingReceiverKind(kind):
return "mojo::internal::Handle_Data"
if (mojom.IsAssociatedInterfaceKind(kind) or
mojom.IsPendingAssociatedRemoteKind(kind)):
return "mojo::internal::AssociatedInterface_Data"
if (mojom.IsAssociatedInterfaceRequestKind(kind) or
mojom.IsPendingAssociatedReceiverKind(kind)):
return "mojo::internal::AssociatedEndpointHandle_Data"
if mojom.IsEnumKind(kind):
return "int32_t"
if mojom.IsStringKind(kind):
return "mojo::internal::Pointer<mojo::internal::String_Data>"
if mojom.IsAnyHandleKind(kind):
return "mojo::internal::Handle_Data"
return _kind_to_cpp_type[kind]
def _GetCppUnionFieldType(self, kind):
if mojom.IsUnionKind(kind):
return ("mojo::internal::Pointer<%s>" %
self._GetNameForKind(kind, internal=True))
return self._GetCppFieldType(kind)
def _GetUnionGetterReturnType(self, kind):
if mojom.IsReferenceKind(kind):
return "%s&" % self._GetCppWrapperType(kind)
return self._GetCppWrapperType(kind)
def _GetUnionTraitGetterReturnType(self, kind):
"""Get field type used in UnionTraits template specialization.
The type may be qualified as UnionTraits specializations live outside the
namespace where e.g. structs are defined.
Args:
kind: {Kind} The type of the field.
Returns:
{str} The C++ type to use for the field.
"""
if mojom.IsReferenceKind(kind):
return "%s&" % self._GetCppWrapperType(kind,
add_same_module_namespaces=True)
return self._GetCppWrapperType(kind, add_same_module_namespaces=True)
def _KindMustBeSerialized(self, kind, processed_kinds=None):
if not processed_kinds:
processed_kinds = set()
if kind in processed_kinds:
return False
if (self._IsTypemappedKind(kind) and
self.typemap[self._GetFullMojomNameForKind(kind)]["force_serialize"]):
return True
processed_kinds.add(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return any(self._KindMustBeSerialized(field.kind,
processed_kinds=processed_kinds)
for field in kind.fields)
return False
def _MethodSupportsLazySerialization(self, method):
if not self.support_lazy_serialization:
return False
# TODO(crbug.com/753433): Support lazy serialization for methods which pass
# associated handles.
if mojom.MethodPassesAssociatedKinds(method):
return False
return not any(self._KindMustBeSerialized(param.kind) for param in
method.parameters + (method.response_parameters or []))
def _TranslateConstants(self, token, kind):
if isinstance(token, mojom.NamedValue):
return self._GetNameForKind(token, flatten_nested_kind=True)
if isinstance(token, mojom.BuiltinValue):
if token.value == "double.INFINITY":
return "std::numeric_limits<double>::infinity()"
if token.value == "float.INFINITY":
return "std::numeric_limits<float>::infinity()"
if token.value == "double.NEGATIVE_INFINITY":
return "-std::numeric_limits<double>::infinity()"
if token.value == "float.NEGATIVE_INFINITY":
return "-std::numeric_limits<float>::infinity()"
if token.value == "double.NAN":
return "std::numeric_limits<double>::quiet_NaN()"
if token.value == "float.NAN":
return "std::numeric_limits<float>::quiet_NaN()"
if (kind is not None and mojom.IsFloatKind(kind)):
return token if token.isdigit() else token + "f"
# Per C++11, 2.14.2, the type of an integer literal is the first of the
# corresponding list in Table 6 in which its value can be represented. In
# this case, the list for decimal constants with no suffix is:
# int, long int, long long int
# The standard considers a program ill-formed if it contains an integer
# literal that cannot be represented by any of the allowed types.
#
# As it turns out, MSVC doesn't bother trying to fall back to long long int,
# so the integral constant -2147483648 causes it grief: it decides to
# represent 2147483648 as an unsigned integer, and then warns that the unary
# minus operator doesn't make sense on unsigned types. Doh!
if kind == mojom.INT32 and token == "-2147483648":
return "(-%d - 1) /* %s */" % (
2**31 - 1, "Workaround for MSVC bug; see https://crbug.com/445618")
return "%s%s" % (token, _kind_to_cpp_literal_suffix.get(kind, ""))
def _ExpressionToText(self, value, kind=None):
return self._TranslateConstants(value, kind)
def _ContainsMoveOnlyMembers(self, struct):
for field in struct.fields:
if self._IsMoveOnlyKind(field.kind):
return True
return False
def _GetStructConstructors(self, struct):
"""Returns a list of constructors for a struct.
Params:
struct: {Struct} The struct to return constructors for.
Returns:
{[StructConstructor]} A list of StructConstructors that should be
generated for |struct|.
"""
if not mojom.IsStructKind(struct):
raise TypeError
# Types that are neither copyable nor movable can't be passed to a struct
# constructor so only generate a default constructor.
if any(self._IsTypemappedKind(field.kind) and self.typemap[
self._GetFullMojomNameForKind(field.kind)]["non_copyable_non_movable"]
for field in struct.fields):
return [StructConstructor(struct.fields, [])]
param_counts = [0]
for version in struct.versions:
if param_counts[-1] != version.num_fields:
param_counts.append(version.num_fields)
ordinal_fields = sorted(struct.fields, key=lambda field: field.ordinal)
return (StructConstructor(struct.fields, ordinal_fields[:param_count])
for param_count in param_counts)
def _GetContainerValidateParamsCtorArgs(self, kind):
if mojom.IsStringKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = "nullptr"
element_validate_params = "nullptr"
enum_validate_func = "nullptr"
elif mojom.IsMapKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = self._GetNewContainerValidateParams(mojom.Array(
kind=kind.key_kind))
element_validate_params = self._GetNewContainerValidateParams(mojom.Array(
kind=kind.value_kind))
enum_validate_func = "nullptr"
else: # mojom.IsArrayKind(kind)
expected_num_elements = generator.ExpectedArraySize(kind) or 0
element_is_nullable = mojom.IsNullableKind(kind.kind)
key_validate_params = "nullptr"
element_validate_params = self._GetNewContainerValidateParams(kind.kind)
if mojom.IsEnumKind(kind.kind):
enum_validate_func = ("%s::Validate" %
self._GetQualifiedNameForKind(kind.kind, internal=True,
flatten_nested_kind=True))
else:
enum_validate_func = "nullptr"
if enum_validate_func == "nullptr":
if key_validate_params == "nullptr":
return "%d, %s, %s" % (expected_num_elements,
"true" if element_is_nullable else "false",
element_validate_params)
else:
return "%s, %s" % (key_validate_params, element_validate_params)
else:
return "%d, %s" % (expected_num_elements, enum_validate_func)
def _GetNewContainerValidateParams(self, kind):
if (not mojom.IsArrayKind(kind) and not mojom.IsMapKind(kind) and
not mojom.IsStringKind(kind)):
return "nullptr"
return "new mojo::internal::ContainerValidateParams(%s)" % (
self._GetContainerValidateParamsCtorArgs(kind))
def _GetCppDataViewType(self, kind, qualified=False):
def _GetName(input_kind):
return _NameFormatter(input_kind, None).FormatForCpp(
omit_namespace_for_module=(None if qualified else self.module),
flatten_nested_kind=True)
if mojom.IsEnumKind(kind):
return _GetName(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sDataView" % _GetName(kind)
if mojom.IsArrayKind(kind):
return "mojo::ArrayDataView<%s>" % (
self._GetCppDataViewType(kind.kind, qualified))
if mojom.IsMapKind(kind):
return ("mojo::MapDataView<%s, %s>" % (
self._GetCppDataViewType(kind.key_kind, qualified),
self._GetCppDataViewType(kind.value_kind, qualified)))
if mojom.IsStringKind(kind):
return "mojo::StringDataView"
if mojom.IsInterfaceKind(kind):
return "%sPtrDataView" % _GetName(kind)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequestDataView" % _GetName(kind.kind)
if mojom.IsPendingRemoteKind(kind):
return ("mojo::InterfacePtrDataView<%sInterfaceBase>" %
_GetName(kind.kind))
if mojom.IsPendingReceiverKind(kind):
return ("mojo::InterfaceRequestDataView<%sInterfaceBase>" %
_GetName(kind.kind))
if (mojom.IsAssociatedInterfaceKind(kind) or
mojom.IsPendingAssociatedRemoteKind(kind)):
return "%sAssociatedPtrInfoDataView" % _GetName(kind.kind)
if (mojom.IsAssociatedInterfaceRequestKind(kind) or
mojom.IsPendingAssociatedReceiverKind(kind)):
return "%sAssociatedRequestDataView" % _GetName(kind.kind)
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def _GetUnmappedTypeForSerializer(self, kind):
return self._GetCppDataViewType(kind, qualified=True)
| 39.264615 | 80 | 0.690489 |
4687c71a05ea0c88a9336256f2dcac6d58c491db
| 2,462 |
py
|
Python
|
run.py
|
bhoepner/piradio
|
e28038af2a3ed304ed085ea1fb868a997b8387a6
|
[
"MIT"
] | null | null | null |
run.py
|
bhoepner/piradio
|
e28038af2a3ed304ed085ea1fb868a997b8387a6
|
[
"MIT"
] | null | null | null |
run.py
|
bhoepner/piradio
|
e28038af2a3ed304ed085ea1fb868a997b8387a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from signal import SIGINT, SIGTERM, signal
from time import sleep
from piradio.rotary import RotaryEncoder
from piradio.settings import Settings
from piradio.vlc import VlcRemote
class Runner:
MODE_VOLUME = 0
MODE_STATIONS = 1
def __init__(self):
self._vlc = VlcRemote()
self._rot = RotaryEncoder()
self._mode = self.MODE_VOLUME
self._station = None
def run(self):
self._initialize()
prev_title = None
prev_playing = None
while True:
title = self._vlc.get_title()
playing = self._vlc.get_playing()
if title != prev_title:
print(title or '')
prev_title = title
if playing != prev_playing:
print(playing or '')
prev_playing = playing
sleep(0.5)
def _initialize(self):
settings = Settings.read('settings.ini')
self._vlc.connect()
self._vlc.stop()
self._vlc.clear()
for station in settings.stations:
self._vlc.enqueue(station.url)
self._vlc.set_volume(settings.volume)
self._vlc.play()
for _ in range(settings.station):
self._vlc.next()
self._station = settings.station
self._rot.setup()
self._rot.on_rotate(self._on_rotate)
self._rot.on_switch(self._on_switch)
signal(SIGINT, self._on_signal)
signal(SIGTERM, self._on_signal)
def _shutdown(self):
self._rot.off_rotate()
self._rot.off_switch()
self._rot.shutdown()
self._vlc.stop()
self._vlc.disconnect()
def _on_signal(self, sig, frame):
self._shutdown()
def _on_rotate(self, direction):
print('rotation:', direction)
if direction == RotaryEncoder.CLOCKWISE:
if self._mode == self.MODE_VOLUME:
self._vlc.volume_up()
else:
self._vlc.next()
if direction == RotaryEncoder.COUNTERCLOCKWISE:
if self._mode == self.MODE_VOLUME:
self._vlc.volume_down()
else:
self._vlc.prev()
def _on_switch(self):
print('switch')
self._mode = (
self.MODE_STATIONS
if self._mode == self.MODE_VOLUME
else self.MODE_VOLUME
)
if __name__ == '__main__':
runner = Runner()
runner.run()
| 24.868687 | 55 | 0.572299 |
af30459d974fae6e4fa61abe75cc096e1de08f4f
| 18,053 |
py
|
Python
|
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/rsa/rsa/key.py
|
mikestaub/arangodb
|
1bdf414de29b31bcaf80769a095933f66f8256ce
|
[
"ICU",
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | 27 |
2016-04-27T01:02:03.000Z
|
2021-12-13T08:53:19.000Z
|
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/rsa/rsa/key.py
|
mikestaub/arangodb
|
1bdf414de29b31bcaf80769a095933f66f8256ce
|
[
"ICU",
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | 2 |
2017-03-09T09:00:50.000Z
|
2017-09-21T15:48:20.000Z
|
3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/rsa/rsa/key.py
|
mikestaub/arangodb
|
1bdf414de29b31bcaf80769a095933f66f8256ce
|
[
"ICU",
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | 17 |
2016-04-27T02:06:39.000Z
|
2019-12-18T08:07:00.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''RSA key generation code.
Create new keys with the newkeys() function. It will give you a PublicKey and a
PrivateKey object.
Loading and saving keys requires the pyasn1 module. This module is imported as
late as possible, such that other functionality will remain working in absence
of pyasn1.
'''
import logging
from rsa._compat import b, bytes_type
import rsa.prime
import rsa.pem
import rsa.common
log = logging.getLogger(__name__)
class AbstractKey(object):
'''Abstract superclass for private and public keys.'''
@classmethod
def load_pkcs1(cls, keyfile, format='PEM'):
r'''Loads a key in PKCS#1 DER or PEM format.
:param keyfile: contents of a DER- or PEM-encoded file that contains
the public key.
:param format: the format of the file to load; 'PEM' or 'DER'
:return: a PublicKey object
'''
methods = {
'PEM': cls._load_pkcs1_pem,
'DER': cls._load_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method(keyfile)
def save_pkcs1(self, format='PEM'):
'''Saves the public key in PKCS#1 DER or PEM format.
:param format: the format to save; 'PEM' or 'DER'
:returns: the DER- or PEM-encoded public key.
'''
methods = {
'PEM': self._save_pkcs1_pem,
'DER': self._save_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method()
class PublicKey(AbstractKey):
'''Represents a public RSA key.
This key is also known as the 'encryption key'. It contains the 'n' and 'e'
values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PublicKey(5, 3)
PublicKey(5, 3)
>>> key = PublicKey(5, 3)
>>> key.n
5
>>> key['n']
5
>>> key.e
3
>>> key['e']
3
'''
__slots__ = ('n', 'e')
def __init__(self, n, e):
self.n = n
self.e = e
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PublicKey(%i, %i)' % (self.n, self.e)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PublicKey):
return False
return self.n == other.n and self.e == other.e
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the public
key.
@return: a PublicKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MAwCBQCNGmYtAgMBAAE='
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PublicKey._load_pkcs1_der(der)
PublicKey(2367317549, 65537)
'''
from pyasn1.codec.der import decoder
from rsa.asn1 import AsnPubKey
(priv, _) = decoder.decode(keyfile, asn1Spec=AsnPubKey())
return cls(n=int(priv['modulus']), e=int(priv['publicExponent']))
def _save_pkcs1_der(self):
'''Saves the public key in PKCS#1 DER format.
@returns: the DER-encoded public key.
'''
from pyasn1.codec.der import encoder
from rsa.asn1 import AsnPubKey
# Create the ASN object
asn_key = AsnPubKey()
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded public key file.
The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
after the "-----END RSA PUBLIC KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the public
key.
@return: a PublicKey object
'''
der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded public key file.
@return: contents of a PEM-encoded file that contains the public key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
@classmethod
def load_pkcs1_openssl_pem(cls, keyfile):
'''Loads a PKCS#1.5 PEM-encoded public key file from OpenSSL.
These files can be recognised in that they start with BEGIN PUBLIC KEY
rather than BEGIN RSA PUBLIC KEY.
The contents of the file before the "-----BEGIN PUBLIC KEY-----" and
after the "-----END PUBLIC KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the public
key, from OpenSSL.
@return: a PublicKey object
'''
der = rsa.pem.load_pem(keyfile, 'PUBLIC KEY')
return cls.load_pkcs1_openssl_der(der)
@classmethod
def load_pkcs1_openssl_der(cls, keyfile):
'''Loads a PKCS#1 DER-encoded public key file from OpenSSL.
@param keyfile: contents of a DER-encoded file that contains the public
key, from OpenSSL.
@return: a PublicKey object
'''
from rsa.asn1 import OpenSSLPubKey
from pyasn1.codec.der import decoder
from pyasn1.type import univ
(keyinfo, _) = decoder.decode(keyfile, asn1Spec=OpenSSLPubKey())
if keyinfo['header']['oid'] != univ.ObjectIdentifier('1.2.840.113549.1.1.1'):
raise TypeError("This is not a DER-encoded OpenSSL-compatible public key")
return cls._load_pkcs1_der(keyinfo['key'][1:])
class PrivateKey(AbstractKey):
'''Represents a private RSA key.
This key is also known as the 'decryption key'. It contains the 'n', 'e',
'd', 'p', 'q' and other values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PrivateKey(3247, 65537, 833, 191, 17)
PrivateKey(3247, 65537, 833, 191, 17)
exp1, exp2 and coef don't have to be given, they will be calculated:
>>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
>>> pk.exp1
55063
>>> pk.exp2
10095
>>> pk.coef
50797
If you give exp1, exp2 or coef, they will be used as-is:
>>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
>>> pk.exp1
6
>>> pk.exp2
7
>>> pk.coef
8
'''
__slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
# Calculate the other values if they aren't supplied
if exp1 is None:
self.exp1 = int(d % (p - 1))
else:
self.exp1 = exp1
if exp1 is None:
self.exp2 = int(d % (q - 1))
else:
self.exp2 = exp2
if coef is None:
self.coef = rsa.common.inverse(q, p)
else:
self.coef = coef
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PrivateKey):
return False
return (self.n == other.n and
self.e == other.e and
self.d == other.d and
self.p == other.p and
self.q == other.q and
self.exp1 == other.exp1 and
self.exp2 == other.exp2 and
self.coef == other.coef)
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the private
key.
@return: a PrivateKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PrivateKey._load_pkcs1_der(der)
PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded private key:
#
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER, -- (inverse of q) mod p
# otherPrimeInfos OtherPrimeInfos OPTIONAL
# }
if priv[0] != 0:
raise ValueError('Unable to read this file, version %s != 0' % priv[0])
as_ints = tuple(int(x) for x in priv[1:9])
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the private key in PKCS#1 DER format.
@returns: the DER-encoded private key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPrivKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPrivKey()
asn_key.setComponentByName('version', 0)
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
asn_key.setComponentByName('privateExponent', self.d)
asn_key.setComponentByName('prime1', self.p)
asn_key.setComponentByName('prime2', self.q)
asn_key.setComponentByName('exponent1', self.exp1)
asn_key.setComponentByName('exponent2', self.exp2)
asn_key.setComponentByName('coefficient', self.coef)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded private key file.
The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
after the "-----END RSA PRIVATE KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the private
key.
@return: a PrivateKey object
'''
der = rsa.pem.load_pem(keyfile, b('RSA PRIVATE KEY'))
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded private key file.
@return: contents of a PEM-encoded file that contains the private key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, b('RSA PRIVATE KEY'))
def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
''''Returns a tuple of two different primes of nbits bits each.
The resulting p * q has exacty 2 * nbits bits, and the returned p and q
will not be equal.
:param nbits: the number of bits in each of p and q.
:param getprime_func: the getprime function, defaults to
:py:func:`rsa.prime.getprime`.
*Introduced in Python-RSA 3.1*
:param accurate: whether to enable accurate mode or not.
:returns: (p, q), where p > q
>>> (p, q) = find_p_q(128)
>>> from rsa import common
>>> common.bit_size(p * q)
256
When not in accurate mode, the number of bits can be slightly less
>>> (p, q) = find_p_q(128, accurate=False)
>>> from rsa import common
>>> common.bit_size(p * q) <= 256
True
>>> common.bit_size(p * q) > 240
True
'''
total_bits = nbits * 2
# Make sure that p and q aren't too close or the factoring programs can
# factor n.
shift = nbits // 16
pbits = nbits + shift
qbits = nbits - shift
# Choose the two initial primes
log.debug('find_p_q(%i): Finding p', nbits)
p = getprime_func(pbits)
log.debug('find_p_q(%i): Finding q', nbits)
q = getprime_func(qbits)
def is_acceptable(p, q):
'''Returns True iff p and q are acceptable:
- p and q differ
- (p * q) has the right nr of bits (when accurate=True)
'''
if p == q:
return False
if not accurate:
return True
# Make sure we have just the right amount of bits
found_size = rsa.common.bit_size(p * q)
return total_bits == found_size
# Keep choosing other primes until they match our requirements.
change_p = False
while not is_acceptable(p, q):
# Change p on one iteration and q on the other
if change_p:
p = getprime_func(pbits)
else:
q = getprime_func(qbits)
change_p = not change_p
# We want p > q as described on
# http://www.di-mgt.com.au/rsa_alg.html#crt
return (max(p, q), min(p, q))
def calculate_keys(p, q, nbits):
'''Calculates an encryption and a decryption key given p and q, and
returns them as a tuple (e, d)
'''
phi_n = (p - 1) * (q - 1)
# A very common choice for e is 65537
e = 65537
try:
d = rsa.common.inverse(e, phi_n)
except ValueError:
raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
(e, phi_n))
if (e * d) % phi_n != 1:
raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
"phi_n (%d)" % (e, d, phi_n))
return (e, d)
def gen_keys(nbits, getprime_func, accurate=True):
'''Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
:param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
``q`` will use ``nbits/2`` bits.
:param getprime_func: either :py:func:`rsa.prime.getprime` or a function
with similar signature.
'''
(p, q) = find_p_q(nbits // 2, getprime_func, accurate)
(e, d) = calculate_keys(p, q, nbits // 2)
return (p, q, e, d)
def newkeys(nbits, accurate=True, poolsize=1):
'''Generates public and private keys, and returns them as (pub, priv).
The public key is also known as the 'encryption key', and is a
:py:class:`rsa.PublicKey` object. The private key is also known as the
'decryption key' and is a :py:class:`rsa.PrivateKey` object.
:param nbits: the number of bits required to store ``n = p*q``.
:param accurate: when True, ``n`` will have exactly the number of bits you
asked for. However, this makes key generation much slower. When False,
`n`` may have slightly less bits.
:param poolsize: the number of processes to use to generate the prime
numbers. If set to a number > 1, a parallel algorithm will be used.
This requires Python 2.6 or newer.
:returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
Python 2.6 or newer.
'''
if nbits < 16:
raise ValueError('Key too small')
if poolsize < 1:
raise ValueError('Pool size (%i) should be >= 1' % poolsize)
# Determine which getprime function to use
if poolsize > 1:
from rsa import parallel
import functools
getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
else: getprime_func = rsa.prime.getprime
# Generate the key components
(p, q, e, d) = gen_keys(nbits, getprime_func)
# Create the key objects
n = p * q
return (
PublicKey(n, e),
PrivateKey(n, e, d, p, q)
)
__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
if __name__ == '__main__':
import doctest
try:
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if (count and count % 10 == 0) or count == 1:
print('%i times' % count)
except KeyboardInterrupt:
print('Aborted')
else:
print('Doctests done')
| 29.450245 | 87 | 0.594583 |
f9272700d1d43ad5be4886d2c7f776d88bec3b51
| 35,741 |
py
|
Python
|
math_eval/test_math_eval.py
|
molsonkiko/math_eval
|
e458dd9f1cb702d9b55f53119637ab13490f20bf
|
[
"MIT"
] | null | null | null |
math_eval/test_math_eval.py
|
molsonkiko/math_eval
|
e458dd9f1cb702d9b55f53119637ab13490f20bf
|
[
"MIT"
] | null | null | null |
math_eval/test_math_eval.py
|
molsonkiko/math_eval
|
e458dd9f1cb702d9b55f53119637ab13490f20bf
|
[
"MIT"
] | null | null | null |
from math_eval import *
import random
import string
import itertools
import traceback
import sys
import math
try:
import pandas as pd
except:
pass # should enable people w/o pandas to use my ComputeTester object for testing
values = lambda dict_: dict_.values()
def five_to_the_x(x): return 5**x
def five_to_the_float_x(x): return 5**float(x)
problem_eqn = "-3----3//-(Q)*(2/-2.8659-2.4492)"
my_eqn = "5.3*2**x // x - 5.4*43 + x ** -3/0.7"
xs = [x/2 for x in range(-11, 12, 2)]
ufunctions_binops_example = "len(int(2*(-(2+3**2))/x)*tuple(`int` map y))"
nested_map_example = ("int(2*(-(2+3**2))/x)"
"*sum(tuple("
"`float(x)**(3/"
"tuple(\\`int\\` map \\`733\\`)[2])`"
"map y))")
def approx_equal(x,y, epsilon = 10*sys.float_info.epsilon, na_equal = False):
'''If x and y are both numbers, returns True if abs(x-y) is very close to the
minimum precision of float numbers.
If x or y is not a number, returns (x==y).
If na_equal is True, it will return True if x and y are both float('nan').'''
try:
good = abs(x-y) <= epsilon
if na_equal and not good:
return good | (math.isnan(x) & math.isnan(y))
return good
except:
return x == y
def test_resolve_binop(elts = [five_to_the_x, 0, -1, 2, -0.5, 'w', 'x'],
binops = [operator.add, operator.mul],
xs = [-1, 0, 1]):
for e1, func, e2, x in itertools.product(elts, binops, elts, xs):
outfunc = resolve_binop(e1, func, e2, ['w', 'x'])
if isinstance(outfunc, (function, ResoBin)):
print(e1, func, e2, x, outfunc(x))
else:
print(e1, func, e2, x, outfunc)
class ImmutableDict:
'''What it says on the tin. Behaves like a normal dict, but has no
methods for adding or removing elements. Also doesn't have the overloaded '|'.
Useful chiefly because an ImmutableDict can be an item in a set or a key in a dict
because it's hashable, whereas a normal dict is not hashable.'''
def __init__(self, items):
self.__dict = dict(items)
def __getitem__(self, x):
return self.__dict[x]
def get(self, x, default = None):
return self.__dict.get(x, default)
def keys(self):
return self.__dict.keys()
def values(self):
return self.__dict.values()
def items(self):
return self.__dict.items()
def copy(self):
return ImmutableDict(self.__dict.copy())
def __str__(self):
return "ImmutableDict({})".format(self.__dict)
__repr__ = __str__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, ImmutableDict) and str(self)==str(other)
def __len__(self):
return len(self.__dict)
def __iter__(self):
return iter(self.__dict)
@classmethod
def fromkeys(self, itbl, value = None):
return ImmutableDict({k: value for k in itbl})
class ComputeTester: #TODO: figure out how to capture error eqns
def __init__(self):
self.bad_eqns = {}
self.good_eqns = {}
self.error_eqns = {}
self.comp_errors = {}
self.true_errors = {}
self.dframe = None
def _build_dframe(self):
try:
self.dframe = pd.DataFrame()
except:
return
eqns = []
inputs = []
true_outputs = []
comp_outputs = []
comp_errors, true_errors = [], []
statuses = ['good' for x in self.good_eqns] + ['bad' for x in self.bad_eqns] + ['error' for x in self.error_eqns]
for eqn in self.good_eqns:
if isinstance(eqn, tuple):
eqns.append(eqn[0])
inputs.append(eqn[1])
else:
eqns.append(eqn)
inputs.append(None)
true_outputs.append(self.good_eqns[eqn]['true'])
comp_outputs.append(self.good_eqns[eqn]['comp'])
true_errors.append(self.true_errors[eqn])
comp_errors.append(self.comp_errors[eqn])
for eqn in self.bad_eqns:
if isinstance(eqn, tuple):
eqns.append(eqn[0])
inputs.append(eqn[1])
else:
eqns.append(eqn)
inputs.append(None)
true_outputs.append(self.bad_eqns[eqn]['true'])
comp_outputs.append(self.bad_eqns[eqn]['comp'])
true_errors.append(self.true_errors[eqn])
comp_errors.append(self.comp_errors[eqn])
for eqn in self.error_eqns:
if isinstance(eqn, tuple):
eqns.append(eqn[0])
inputs.append(eqn[1])
else:
eqns.append(eqn)
inputs.append(None)
true_outputs.append(self.error_eqns[eqn]['true'])
comp_outputs.append(self.error_eqns[eqn]['comp'])
true_errors.append(self.true_errors[eqn])
comp_errors.append(self.comp_errors[eqn])
self.dframe['eqns'] = eqns
self.dframe['inputs'] = inputs
self.dframe['true_outputs'] = true_outputs
self.dframe['comp_outputs'] = comp_outputs
self.dframe['statuses'] = statuses
self.dframe['operators'] = [tuple(op_regex.findall(eqn)) for eqn in eqns]
self.dframe['true_errors'] = true_errors
self.dframe['comp_errors'] = comp_errors
del statuses, eqns, inputs, true_outputs, comp_outputs, true_errors, comp_errors
# self.statuses = self.dframe['statuses']
# self.eqns = self.dframe['eqns']
# self.inputs = self.dframe['inputs']
# self.true_outputs = self.dframe['true_outputs']
# self.comp_outputs = self.dframe['comp_outputs']
# self.statuses = self.dframe['statuses']
# self.operators = self.dframe['operators']
backtickstring_acceptable_errors = "(?:un|not )supported.+?\'(?:str|int|bool)\' and \'(?:str|int|bool)\'"
# errors of the type "TypeError: unsupported operand type(s) for -: 'str' and 'float'"
numeric_acceptable_errors = "ZeroDivision|Overflow|complex"
# the "complex" covers errors like
# "TypeError: '>' not supported between instances of 'complex' and 'int'"
def _compare_eqn_evaluations(tester,
eqn,
tb_true,
tb_comp,
input_,
c_out,
t_out,
acceptable_error_types,
na_equal):
if (tb_true is None) and (tb_comp is None): # neither function had an error
if approx_equal(c_out, t_out, na_equal = na_equal): # all good
if input_ is None:
tester.good_eqns[eqn] = {'comp': c_out, 'true': t_out}
tester.comp_errors[eqn] = 'None'
tester.true_errors[eqn] = 'None'
else:
tester.good_eqns[(eqn, input_)] = {'comp': c_out, 'true': t_out}
tester.comp_errors[(eqn, input_)] = 'None'
tester.true_errors[(eqn, input_)] = 'None'
else: # the two outputs are not equal
if input_ is None:
tester.bad_eqns[eqn] = {'comp': c_out, 'true': t_out}
tester.comp_errors[eqn] = 'None'
tester.true_errors[eqn] = 'None'
else:
tester.bad_eqns[(eqn, input_)] = {'comp': c_out, 'true': t_out}
tester.comp_errors[(eqn, input_)] = 'None'
tester.true_errors[(eqn, input_)] = 'None'
return
true_error_OK = None
comp_error_OK = None
tb_string_comp, tb_string_true = 'None', 'None'
fatal = False
message = "eqn = {}, input_ = {}\n".format(repr(eqn), input_)
if tb_true is not None: # true function had an error
tb_string_true = list(tb_true.format_exception_only())[0]
true_error_OK = re.findall(acceptable_error_types, tb_string_true)
message += "~~~~~~~~~~~~~\nTrue output:\n" + ''.join(tb_true.format())
if not true_error_OK:
fatal = True
else:
message += "~~~~~~~~~~~~~\nTrue output:\n" + str(t_out)
if tb_comp is not None: # compute had an error
tb_string_comp = list(tb_comp.format_exception_only())[0]
comp_error_OK = re.findall(acceptable_error_types, tb_string_comp)
message += "\n~~~~~~~~~~~~~\nCompute output:\n" + ''.join(tb_comp.format())
if not comp_error_OK:
fatal = True
else:
message += "\n~~~~~~~~~~~~~\nCompute output:\n" + str(c_out)
if fatal:
# Either my function or the "true" function errored out for an unacceptable
# reason, and testing should be halted and the user notified.
return message
elif isinstance(comp_error_OK, list) and isinstance(true_error_OK, list):
if comp_error_OK[-1] != true_error_OK[-1]:
# they both errored out for "acceptable" reasons, but not the same reason.
# My function is faulted for this, but the discrepancy should not cause
# testing to stop.
if input_ is None:
tester.bad_eqns[eqn] = {'comp': c_out, 'true': t_out}
tester.comp_errors[eqn] = tb_string_comp
tester.true_errors[eqn] = tb_string_true
else:
tester.bad_eqns[(eqn, input_)] = {'comp': c_out, 'true': t_out}
tester.comp_errors[(eqn, input_)] = tb_string_comp
tester.true_errors[(eqn, input_)] = tb_string_true
else:
# They both errored out for the same "acceptable" reason.
# My function is not faulted for this, and testing should not stop.
if input_ is None:
tester.error_eqns[eqn] = {'comp': c_out, 'true': t_out}
tester.comp_errors[eqn] = tb_string_comp
tester.true_errors[eqn] = tb_string_true
else:
tester.error_eqns[(eqn, input_)] = {'comp': c_out, 'true': t_out}
tester.comp_errors[(eqn, input_)] = tb_string_comp
tester.true_errors[(eqn, input_)] = tb_string_true
else:
# One of them errored out for an "acceptable" reason and the other did not
# have an error at all.
# My function is faulted for this discrepancy, but testing should not stop.
if input_ is None:
tester.bad_eqns[eqn] = {'comp': c_out, 'true': t_out}
tester.comp_errors[eqn] = tb_string_comp
tester.true_errors[eqn] = tb_string_true
else:
tester.bad_eqns[(eqn, input_)] = {'comp': c_out, 'true': t_out}
tester.comp_errors[(eqn, input_)] = tb_string_comp
tester.true_errors[(eqn, input_)] = tb_string_true
def evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal,
lambda_eqn = None):
varnames = get_varnames(tokenize(eqn), safe = False)
if varnames:
tb_comp = None
tb_true = None
full_inputs = []
for ii in range(5):
full_inputs.append(tuple(random.choice(inputs) for var in range(len(varnames))))
try:
comp_fun = computer(eqn)
except Exception as ex:
tb_comp = traceback.TracebackException.from_exception(ex)
try:
if lambda_eqn:
# for cases where compute syntax differs from normal Python syntax
true_fun = eval("lambda {}: {}".format(','.join(varnames), lambda_eqn))
else:
true_fun = eval("lambda {}: {}".format(','.join(varnames), eqn))
except Exception as ex:
tb_true = traceback.TracebackException.from_exception(ex)
for input_ in full_inputs:
comp_out = float('nan')
true_out = float('nan')
if tb_comp is not None: # there was an error building the compute function
tb_comp_in = tb_comp
else: # let's try the compute function on this input
try:
comp_out = comp_fun(*input_)
tb_comp_in = None
except Exception as ex: # oops! bad input!
tb_comp_in = traceback.TracebackException.from_exception(ex)
if tb_true is not None: # there was an error building the true function
tb_true_in = tb_true
else: # let's try the true function on this input
try:
true_out = true_fun(*input_)
tb_true_in = None
except Exception as ex: # oops! bad input!
tb_true_in = traceback.TracebackException.from_exception(ex)
compare_result = _compare_eqn_evaluations(tester,
eqn,
tb_true_in,
tb_comp_in,
input_,
comp_out,
true_out,
acceptable_error_types,
na_equal)
if isinstance(compare_result, str): # fatal error, kill testing now
return compare_result
else: # both inputs are scalars
comp_out = float('nan')
true_out = float('nan')
tb_comp = None
tb_true = None
try:
comp_out = computer(eqn)
except Exception as ex:
tb_comp = traceback.TracebackException.from_exception(ex)
try:
if lambda_eqn:
true_out = eval(lambda_eqn)
else:
true_out = eval(eqn)
except Exception as ex:
tb_true = traceback.TracebackException.from_exception(ex)
compare_result = _compare_eqn_evaluations(tester,
eqn,
tb_true,
tb_comp,
None,
comp_out,
true_out,
acceptable_error_types,
na_equal)
if isinstance(compare_result, str): # fatal error, kill testing now
return compare_result
def test_compute_one_op(computer = compute,
include_comparisons = True,
include_logic = True,
include_strings = True,
acceptable_error_types = numeric_acceptable_errors,
na_equal = True):
tester = ComputeTester()
tokens = ['x', '3', '5.7432', '0', '-4', '-2.3', 'w', "`ab`"]
inputs = [3, 0.5, -4, -5.2, 0, frozenset({'1',2,(1,2)}), '123']
funcs = ['+', '-', '*', '/', '//', '%', '**']
comparison_funcs = ['>', '<', '<=', '>=', '==', '!=']
ufunctions = list(globals()['safe_ufunctions']) + ['']
if include_logic:
funcs = ['+', '-', '*', '//', '%', '**', '^', '|', '&']
if include_comparisons:
funcs += comparison_funcs
if include_strings:
ufunctions = ['sum', 'len','int','float','str','tuple', 'not', 'set', '']
funcs += ['in']
for (ufunc1, in1, func, ufunc2, in2) in itertools.product(ufunctions,
tokens,
funcs,
ufunctions,
tokens):
eqn = ' {u1}({i1}) {f} ({u2}({i2}))'.format(u1=ufunc1, i1=in1, f=func, u2=ufunc2, i2=in2)
lambda_eqn = None
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal,
lambda_eqn)
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def test_map(computer = compute,
acceptable_error_types = numeric_acceptable_errors + '|' + backtickstring_acceptable_errors,
na_equal = True):
tester = ComputeTester()
ufunctions = ['str', 'tuple','set', '']
inputs = [five_to_the_x, '123', frozenset({2, 5, 9})]
tokens = ['w', "`int`", "`str(x)+\\`3\\``", "`a\\`bun\\`bar`", 'x']
oldlen = 0
for tok1, ufunc, tok2, input_ in itertools.product(tokens,
ufunctions,
tokens,
inputs):
eqn = 'tuple( {t1} map {uf}({t2}))'.format(t1=tok1, uf=ufunc, t2=tok2)
if tok1 == "`str(x)+\\`3\\``":
tok1_adj = "lambda x: str(x) + '3'"
elif tok1 == '`int`':
tok1_adj = 'int'
else:
tok1_adj = tok1
lambda_eqn = 'tuple(map({t1a}, {uf}({t2})))'.format(t1a=tok1_adj, uf=ufunc, t2=tok2)
lambda_eqn = re.sub("(?<!\\\\)`", "'", lambda_eqn).replace("\\`", "`")
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal,
lambda_eqn)
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def test_compute_two_ops(computer = compute,
include_comparisons = True,
include_logic = False,
acceptable_error_types = numeric_acceptable_errors,
na_equal = True):
tester = ComputeTester()
inputs = [1, 0.5, -4, -5.2, 0]
funcs = ['+', '-', '*', '/', '//', '%', '**']
tokens = ['x', '3', '5.7432', '0', '-4', '-2.3', 'w', "`ab`"]
if include_logic:
funcs = ['+', '-', '*', '//', '%', '**', '^', '|', '&']
tokens = ['a', '1', '-2', '0', 'b', 'c']
inputs = [2, 0, -3]
if include_comparisons:
funcs += ['>', '<', '<=', '>=', '==', '!=']
for (optional_uminus, in1, func1, in2, func2, in3) in itertools.product(['', '-'], tokens, funcs, tokens, funcs, tokens):
eqn = optional_uminus + in1 + func1 + in2 + func2 + in3
if include_logic and ('**' in eqn):
eqn = re.sub("\*\*-", '**', eqn)
inputs = [2, 0]
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal)
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def test_getitem(computer = compute,
acceptable_error_types = numeric_acceptable_errors + '|' + backtickstring_acceptable_errors,
na_equal = True):
tester = ComputeTester()
inputs = [ImmutableDict({'a': 1, 1: 2}),
ImmutableDict({'b': 3}),
'123',
(7, 8, 9, 10, 11)]
iterables = ['w', "tuple(`str` map y)", "str(z)", "`a\\`bun\\`bar`",
'-x']
slicers = ['`a`', '1', '1:', ':', '-3::-1', ':4:2', ':3', 'int(x[0])']
oldlen = 0
for itbl, inp, slicer in itertools.product(iterables, inputs, slicers):
eqn = '{it}[{sli}]'.format(it=itbl, sli=slicer)
if itbl == "tuple(`str` map y)":
itbl_adj = "tuple(map(str, y))"
else:
itbl_adj = itbl
lambda_eqn = '{ita}[{sli}]'.format(ita=itbl_adj, sli = slicer)
lambda_eqn = re.sub("(?<!\\\\)`", "'", lambda_eqn).replace("\\`", "`")
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal,
lambda_eqn)
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def examine_bad_two_ops(tester2):
bad_ops = {}
for eqn in tester2.bad_eqns:
if isinstance(eqn, tuple):
eqn, input_ = eqn
else:
input_ = None
split_eqn = op_regex.split(eqn)
bad_ops.setdefault((split_eqn[1],split_eqn[3]), set())
bad_ops[(split_eqn[1], split_eqn[3])].add(input_)
return bad_ops
def make_random_eqn(num_ops = 5,
num_vars = 1,
intrange = range(-20, 21),
floatrange = range(-20, 21),
include_comparisons = True,
include_logic = False,
include_ufunctions = False):
out = ''
varnames = []
for varnum in range(num_vars):
var_ = ''.join(random.sample(string.ascii_letters, random.randint(1,2)))
while var_ in varnames:
var_ = ''.join(random.sample(string.ascii_letters, random.randint(1,2)))
try: # check if this varname is syntactically valid in Python
fun = eval("lambda {}: 1".format(var_))
except: # we randomly generated a reserved word as a varname
var_ = 'x'*(varnum+3)
if include_ufunctions and random.random() < 0.5:
var_ += random.choice(['ii','ff'])
varnames.append(var_)
comparators = ['>', '<', '>=', '<=', '==', '!=']
logic_funcs = ['^', '|', '&']
ufunctions = ['int', 'float', 'not']
if include_logic:
# bitwise logic functions raise TypeErrors when used with floats
vartypes = [int] + [str]*num_vars
funcs = ['-', '+', '*', '//', '%', '**']
intrange = range(0, intrange.stop)
else:
vartypes = [int, float]+[str]*num_vars
funcs = ['-', '+', '*', '/', '//', '%', '**']
ufunc = False
parens_opened = []
for opnum in range(num_ops):
if random.random() < 0.3:
if include_logic:
if out[-2:] != '**':
# if the last token was exponentiation, the unary minus would lead
# exponentiation to a negative power, and the integers are not
# closed under that operation
out += '-'
else:
pass
else:
out += '-'
vartype = random.choice(vartypes)
if not parens_opened:
if random.random() < 0.25:
if random.random() < 0.5 and include_ufunctions:
out += '(' + random.choice(ufunctions)
parens_opened.append(opnum)
ufunc = True
out += '('
parens_opened.append(opnum)
if vartype == int:
out += str(random.choice(intrange))
elif vartype == float:
out += str(round(random.choice(floatrange) + random.random(), 4))
else:
out += random.choice(varnames)
if random.random() < min(1, 0.33333*len(parens_opened)):
if ufunc:
out += ')'
parens_opened.pop(0)
ufunc = False
out += ')'
parens_opened.pop(0)
rand = random.random()
if include_comparisons and include_logic:
if rand < 0.18:
out += random.choice(comparators)
elif rand > 0.82:
out += random.choice(logic_funcs)
else:
out += random.choice(funcs)
elif include_logic:
if rand < 0.25:
out += random.choice(logic_funcs)
else:
out += random.choice(funcs)
elif include_comparisons:
if rand < 0.25:
out += random.choice(comparators)
else:
out += random.choice(funcs)
else:
out += random.choice(funcs)
vartype = random.choice(vartypes)
if vartype == int:
out += str(random.choice(intrange))
elif vartype == float:
out += str(round(random.choice(intrange) + random.random(), 4))
else:
out += random.choice(varnames)
for ii in range(len(parens_opened)):
out += ')'
return out
def make_random_backtickstring_eqn(num_ops = 5,
num_vars = 1,
intrange = range(-1, 50),
include_comparisons = True,
include_logic = False,
include_ufunctions = True):
out = ''
vartypes = [int, str] + ['var']*num_vars
comparison_since_last_logic = False
logic_since_last_comparison = True
comparisons = 0
funcs = ['+', '*']
comparators = ['>', '<', '==', '!=', '<=', '>=', '=~']
logic_funcs = ['^', '|', '&']
ufunctions = ['int', 'float', 'not', 'str', 'tuple', 'len', 'set']
varnames = []
for varnum in range(num_vars):
var_ = ''.join(random.sample(string.ascii_letters, random.randint(1,2)))
while var_ in varnames:
var_ = ''.join(random.sample(string.ascii_letters, random.randint(1,2)))
try: # check if this varname is syntactically valid in Python
fun = eval("lambda {}: 1".format(var_))
except: # we randomly generated a reserved word as a varname
var_ = 'x'*(varnum+3)
varnames.append(var_)
parens_opened = []
for opnum in range(num_ops):
if not parens_opened:
if random.random() < 0.25:
if random.random() < 0.5 and include_ufunctions:
out += random.choice(ufunctions)
out += '('
parens_opened.append(opnum)
vartype = random.choice(vartypes)
if vartype == int:
out += str(random.choice(intrange))
elif vartype == str:
out += '`' + ''.join(random.choices(string.ascii_letters+'1234567890', k=3)) + '`'
else:
out += random.choice(varnames)
if random.random() < min(1, 0.33333*len(parens_opened)):
out += ')'
parens_opened.pop(0)
rand = random.random()
if include_comparisons and include_logic:
if (rand < 0.18 or not comparison_since_last_logic) and logic_since_last_comparison and (comparisons < 2):
comparison_since_last_logic = True
logic_since_last_comparison = False
comparisons += 1
out += random.choice(comparators)
elif (rand > 0.82 and comparison_since_last_logic) or not logic_since_last_comparison:
out += random.choice(logic_funcs)
comparison_since_last_logic = False
logic_since_last_comparison = True
else:
out += random.choice(funcs)
elif include_comparisons:
if rand < 0.25:
out += random.choice(comparators)
else:
out += random.choice(funcs)
else:
out += random.choice(funcs)
vartype = random.choice(vartypes)
if vartype == int:
out += str(random.choice(intrange))
elif vartype == str:
out += '`' + re.sub('`', '\\`',
''.join(random.choices(string.printable, k=3))) + '`'
else:
out += random.choice(varnames)
for ii in range(len(parens_opened)):
out += ')'
return out
def test_random_eqns(computer = compute,
n = 2000,
num_inputs = 4,
sizerange = range(6, 9),
numvar_range = range(0,5),
intrange = range(-20, 21),
floatrange = range(-20, 21),
include_comparisons = False,
include_logic = False,
include_ufunctions = False,
acceptable_error_types = numeric_acceptable_errors,
na_equal = True):
tester = ComputeTester()
inputs = []
for ii in range(-2,num_inputs-2):
if int(ii/2)==ii:
inputs.append(int(ii/2))
elif not include_logic:
inputs.append(ii/2)
for ii in range(n):
eqn = make_random_eqn(random.choice(sizerange),
random.choice(numvar_range),
intrange,
floatrange,
include_comparisons,
include_logic,
include_ufunctions)
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal)
# return inputs, eqn, eval_result
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def test_random_backtickstring_eqns(computer = compute,
n = 2000,
num_inputs = 5,
sizerange = range(6, 9),
numvar_range = range(0,5),
intrange = range(-3, 50),
include_comparisons = True,
include_logic = True,
include_ufunctions = False,
acceptable_error_types = backtickstring_acceptable_errors,
na_equal = True):
tester = ComputeTester()
inputs = list(range(-1, -1+num_inputs))
for ii in range(n):
eqn = make_random_backtickstring_eqn(random.choice(sizerange),
random.choice(numvar_range),
intrange,
include_comparisons,
include_logic,
include_ufunctions)
lambda_eqn = re.sub(r'\\?`','"', eqn)
eval_result = evaluate_eqn_on_inputs(computer,
eqn,
inputs,
tester,
acceptable_error_types,
na_equal,
lambda_eqn)
# return inputs, eqn, eval_result
if eval_result is not None:
print(eval_result)
return
tester._build_dframe()
return tester
def test_IntRange():
ranges = [range(1,2),
range(17),
range(5, 72, 4),
range(115, 40, -5),
range(1, 1),
range(1, -1)]
IntRanges = [IntRange(1, 2),
IntRange(17),
IntRange(5, 72, 4),
IntRange(115, 40, -5),
IntRange(1, 1),
IntRange(1, -1)]
results = []
for rng, irng in zip(ranges, IntRanges):
results.append(set(rng).symmetric_difference(set(irng)))
return results, ranges, IntRanges
if __name__ == '__main__':
pass
for rst, rng, irng in zip(*test_IntRange()):
if len(rst) != 0:
print("Expected {} to have same contents as {}, but it didn't!".format(irng, rng))
tester1 = test_compute_one_op(computer = compute, include_logic = True, include_strings = True, acceptable_error_types = '')
if tester1.dframe is not None:
df1 = tester1.dframe
del df1['operators']
bad1 = df1.loc[df1.statuses == 'bad', df1.columns!='statuses']
testermap = test_map(acceptable_error_types = '')
if testermap.dframe is not None:
dfmap = testermap.dframe
del dfmap['operators']
badmap = dfmap.loc[dfmap.statuses == 'bad', dfmap.columns!='statuses']
testerg = test_getitem(acceptable_error_types='')
if testerg.dframe is not None:
dfg = testerg.dframe
badg = dfg[dfg.statuses=='bad']
dfg_errors = dfg[['comp_errors', 'true_errors']].value_counts()
tester2 = test_compute_two_ops(computer = compute, include_logic = True)
if tester2.dframe is not None:
bad2 = tester2.dframe[tester2.dframe.statuses == 'bad']
bad2 = pd.concat((bad2, bad2.operators.astype(str).str.split(', ',expand=True).rename({ii: 'op'+str(ii+1) for ii in range(6)},axis = 1)), axis = 1)
bad2_ops = [bad2['op'+str(ii)].value_counts() for ii in range(1,7)]
assert all(bad2.operators.astype(str).str.count("\||\^|&|[<=>!]=?")==2), \
'There was at least one case in which an equation with two operators failed for some reason other than having two logical/comparison operators.'
# we know that every time we have two comparison/logical operators without
# appropriate grouping parentheses, my function will get a different answer
# from the corresponding lambda functions, but only because we follow different
# orders of operations.
# Thus, we only care about different return values due to something other than
# the order of operations.
tester3 = test_random_eqns(n = 25_000,
numvar_range = range(0,5),
sizerange = range(6, 9),
intrange = range(-3, 4),
floatrange = range(-3, 4),
acceptable_error_types = numeric_acceptable_errors,
include_ufunctions = True)
if tester3.dframe is not None:
df3 = tester3.dframe
bad3 = df3.loc[df3.statuses=='bad', df3.columns!='statuses']
bad3_errors = bad3[['comp_errors','true_errors']].value_counts()
df3_errors = df3[['comp_errors', 'true_errors']].value_counts()
# tester4 = test_random_backtickstring_eqns(10000, acceptable_error_types = '')
# if tester4.dframe is not None:
# df4 = tester4.dframe
# bad4 = df4.loc[df4.statuses=='bad', df4.columns!='statuses']
# badeqn = '`z^H`>gJ^(`ecC`<gJ)+`|fu`<S+gJ+S'
# tester = ComputeTester()
| 42.854916 | 156 | 0.499398 |
92613347c7a30571da6e97e101862845c1aad2f5
| 873 |
py
|
Python
|
code/tmp_rtrip/encodings/utf_7.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24 |
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/encodings/utf_7.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17 |
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/encodings/utf_7.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
""" Python 'utf-7' Codec
Written by Brian Quinlan ([email protected]).
"""
import codecs
encode = codecs.utf_7_encode
def decode(input, errors='strict'):
return codecs.utf_7_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_7_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_7_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_7_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_7_decode
def getregentry():
return codecs.CodecInfo(name='utf-7', encode=encode, decode=decode,
incrementalencoder=IncrementalEncoder, incrementaldecoder=
IncrementalDecoder, streamreader=StreamReader, streamwriter=
StreamWriter)
| 24.25 | 71 | 0.756014 |
b2c8606da1e32522afef65e00801d52edb9cb597
| 131,274 |
py
|
Python
|
venv/Lib/site-packages/music21/midi/translate.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | 1 |
2022-01-28T00:03:19.000Z
|
2022-01-28T00:03:19.000Z
|
venv/Lib/site-packages/music21/midi/translate.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/music21/midi/translate.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | 1 |
2021-11-23T00:49:26.000Z
|
2021-11-23T00:49:26.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: midi.translate.py
# Purpose: Translate MIDI and music21 objects
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2010-2015, 2019 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Module to translate MIDI data to music21 Streams and vice versa. Note that quantization of
notes takes place in the :meth:`~music21.stream.Stream.quantize` method not here.
'''
import unittest
import math
import copy
from typing import Optional, List, Tuple, Dict, Union, Any
from music21 import chord
from music21 import common
from music21 import defaults
from music21 import note
from music21 import exceptions21
from music21 import environment
from music21 import stream
from music21.instrument import Conductor, deduplicate
_MOD = 'midi.translate'
environLocal = environment.Environment(_MOD)
# ------------------------------------------------------------------------------
class TranslateException(exceptions21.Music21Exception):
pass
# ------------------------------------------------------------------------------
# Durations
def offsetToMidiTicks(o, addStartDelay=False):
'''
Helper function to convert a music21 offset value to MIDI ticks,
depends on *defaults.ticksPerQuarter* and *defaults.ticksAtStart*.
Returns an int.
>>> defaults.ticksPerQuarter
1024
>>> defaults.ticksAtStart
1024
>>> midi.translate.offsetToMidiTicks(0)
0
>>> midi.translate.offsetToMidiTicks(0, addStartDelay=True)
1024
>>> midi.translate.offsetToMidiTicks(1)
1024
>>> midi.translate.offsetToMidiTicks(20.5)
20992
'''
ticks = int(round(o * defaults.ticksPerQuarter))
if addStartDelay:
ticks += defaults.ticksAtStart
return ticks
def durationToMidiTicks(d):
# noinspection PyShadowingNames
'''
Converts a :class:`~music21.duration.Duration` object to midi ticks.
Depends on *defaults.ticksPerQuarter*, Returns an int.
Does not use defaults.ticksAtStart
>>> n = note.Note()
>>> n.duration.type = 'half'
>>> midi.translate.durationToMidiTicks(n.duration)
2048
>>> d = duration.Duration('quarter')
>>> dReference = midi.translate.ticksToDuration(1024, inputM21DurationObject=d)
>>> dReference is d
True
>>> d.type
'quarter'
>>> d.type = '16th'
>>> d.quarterLength
0.25
>>> midi.translate.durationToMidiTicks(d)
256
'''
return int(round(d.quarterLength * defaults.ticksPerQuarter))
def ticksToDuration(ticks, ticksPerQuarter=None, inputM21DurationObject=None):
# noinspection PyShadowingNames
'''
Converts a number of MIDI Ticks to a music21 duration.Duration() object.
Optional parameters include ticksPerQuarter -- in case something other
than the default.ticksPerQuarter (1024) is used in this file. And
it can take a :class:`~music21.duration.Duration` object to modify, specified
as *inputM21DurationObject*
>>> d = midi.translate.ticksToDuration(1024)
>>> d
<music21.duration.Duration 1.0>
>>> d.type
'quarter'
>>> n = note.Note()
>>> midi.translate.ticksToDuration(3072, inputM21DurationObject=n.duration)
<music21.duration.Duration 3.0>
>>> n.duration.type
'half'
>>> n.duration.dots
1
More complex rhythms can also be set automatically:
>>> d2 = duration.Duration()
>>> d2reference = midi.translate.ticksToDuration(1200, inputM21DurationObject=d2)
>>> d2 is d2reference
True
>>> d2.quarterLength
1.171875
>>> d2.type
'complex'
>>> d2.components
(DurationTuple(type='quarter', dots=0, quarterLength=1.0),
DurationTuple(type='32nd', dots=0, quarterLength=0.125),
DurationTuple(type='128th', dots=1, quarterLength=0.046875))
>>> d2.components[2].type
'128th'
>>> d2.components[2].dots
1
'''
if inputM21DurationObject is None:
from music21 import duration
d = duration.Duration()
else:
d = inputM21DurationObject
if ticksPerQuarter is None:
ticksPerQuarter = defaults.ticksPerQuarter
# given a value in ticks
d.quarterLength = float(ticks) / ticksPerQuarter
return d
# ------------------------------------------------------------------------------
# utility functions for getting commonly used event
def getStartEvents(mt=None, channel=1, instrumentObj=None):
'''
Returns a list of midi.MidiEvent objects found at the beginning of a track.
A MidiTrack reference can be provided via the `mt` parameter.
>>> midi.translate.getStartEvents()
[<MidiEvent DeltaTime, t=0, track=None, channel=1>,
<MidiEvent SEQUENCE_TRACK_NAME, t=0, track=None, channel=1, data=b''>]
>>> midi.translate.getStartEvents(channel=2, instrumentObj=instrument.Harpsichord())
[<MidiEvent DeltaTime, t=0, track=None, channel=2>,
<MidiEvent SEQUENCE_TRACK_NAME, t=0, track=None, channel=2, data=b'Harpsichord'>,
<MidiEvent DeltaTime, t=0, track=None, channel=2>,
<MidiEvent PROGRAM_CHANGE, t=0, track=None, channel=2, data=6>]
'''
from music21 import midi as midiModule
events = []
if isinstance(instrumentObj, Conductor):
return events
elif instrumentObj is None or instrumentObj.bestName() is None:
partName = ''
else:
partName = instrumentObj.bestName()
dt = midiModule.DeltaTime(mt, channel=channel)
events.append(dt)
me = midiModule.MidiEvent(mt, channel=channel)
me.type = midiModule.MetaEvents.SEQUENCE_TRACK_NAME
me.data = partName
events.append(me)
# additional allocation of instruments may happen elsewhere
# this may lead to two program changes happening at time zero
# however, this assures that the program change happens before the
# the clearing of the pitch bend data
if instrumentObj is not None and instrumentObj.midiProgram is not None:
sub = instrumentToMidiEvents(instrumentObj, includeDeltaTime=True,
channel=channel)
events += sub
return events
def getEndEvents(mt=None, channel=1):
'''
Returns a list of midi.MidiEvent objects found at the end of a track.
>>> midi.translate.getEndEvents(channel=2)
[<MidiEvent DeltaTime, t=1024, track=None, channel=2>,
<MidiEvent END_OF_TRACK, t=0, track=None, channel=2, data=b''>]
'''
from music21 import midi as midiModule
events = []
dt = midiModule.DeltaTime(track=mt, channel=channel)
dt.time = defaults.ticksAtStart
events.append(dt)
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.MetaEvents.END_OF_TRACK
me.channel = channel
me.data = '' # must set data to empty string
events.append(me)
return events
# ------------------------------------------------------------------------------
# Multi-object conversion
def music21ObjectToMidiFile(
music21Object,
*,
addStartDelay=False,
) -> 'music21.midi.MidiFile':
'''
Either calls streamToMidiFile on the music21Object or
puts a copy of that object into a Stream (so as
not to change activeSites, etc.) and calls streamToMidiFile on
that object.
'''
classes = music21Object.classes
if 'Stream' in classes:
if music21Object.atSoundingPitch is False:
music21Object = music21Object.toSoundingPitch()
return streamToMidiFile(music21Object, addStartDelay=addStartDelay)
else:
m21ObjectCopy = copy.deepcopy(music21Object)
s = stream.Stream()
s.insert(0, m21ObjectCopy)
return streamToMidiFile(s, addStartDelay=addStartDelay)
# ------------------------------------------------------------------------------
# Notes
def midiEventsToNote(eventList, ticksPerQuarter=None, inputM21=None):
# noinspection PyShadowingNames
'''
Convert from a list of midi.DeltaTime and midi.MidiEvent objects to a music21 Note.
The list can be presented in one of two forms:
[deltaTime1, midiEvent1, deltaTime2, midiEvent2]
or
[(deltaTime1, midiEvent1), (deltaTime2, midiEvent2)]
It is assumed, but not checked, that midiEvent2 is an appropriate Note_Off command. Thus, only
three elements are really needed.
The `inputM21` parameter can be a Note or None; in the case of None, a Note object is created.
In either case it returns a Note (N.B.: this will change soon so that None will be returned
if `inputM21` is given. This will match the behavior of other translate objects).
N.B. this takes in a list of music21 MidiEvent objects so see [...] on how to
convert raw MIDI data to MidiEvent objects
In this example, we start a NOTE_ON event at offset 1.0 that lasts
for 2.0 quarter notes until we
send a zero-velocity NOTE_ON (=NOTE_OFF) event for the same pitch.
>>> mt = midi.MidiTrack(1)
>>> dt1 = midi.DeltaTime(mt)
>>> dt1.time = 1024
>>> me1 = midi.MidiEvent(mt)
>>> me1.type = midi.ChannelVoiceMessages.NOTE_ON
>>> me1.pitch = 45
>>> me1.velocity = 94
>>> dt2 = midi.DeltaTime(mt)
>>> dt2.time = 2048
>>> me2 = midi.MidiEvent(mt)
>>> me2.type = midi.ChannelVoiceMessages.NOTE_ON
>>> me2.pitch = 45
>>> me2.velocity = 0
>>> n = midi.translate.midiEventsToNote([dt1, me1, dt2, me2])
>>> n.pitch
<music21.pitch.Pitch A2>
>>> n.duration.quarterLength
1.0
>>> n.volume.velocity
94
An `inputM21` object can be given in which case it's set.
>>> m = note.Note()
>>> dummy = midi.translate.midiEventsToNote([dt1, me1, dt2, me2], inputM21=m)
>>> m.pitch
<music21.pitch.Pitch A2>
>>> m.duration.quarterLength
1.0
>>> m.volume.velocity
94
'''
if inputM21 is None:
n = note.Note()
else:
n = inputM21
if ticksPerQuarter is None:
ticksPerQuarter = defaults.ticksPerQuarter
# pre sorted from a stream
if len(eventList) == 2:
tOn, eOn = eventList[0]
tOff, unused_eOff = eventList[1]
# a representation closer to stream
elif len(eventList) == 4:
# delta times are first and third
dur = eventList[2].time - eventList[0].time
# shift to start at zero; only care about duration here
tOn, eOn = 0, eventList[1]
tOff, unused_eOff = dur, eventList[3]
else:
raise TranslateException(f'cannot handle MIDI event list in the form: {eventList!r}')
n.pitch.midi = eOn.pitch
n.volume.velocity = eOn.velocity
n.volume.velocityIsRelative = False # not relative coming from MIDI
# n._midiVelocity = eOn.velocity
# here we are handling an issue that might arise with double-stemmed notes
if (tOff - tOn) != 0:
ticksToDuration(tOff - tOn, ticksPerQuarter, n.duration)
else:
# environLocal.printDebug(['cannot translate found midi event with zero duration:', eOn, n])
# for now, substitute grace note
n.getGrace(inPlace=True)
return n
def noteToMidiEvents(inputM21, includeDeltaTime=True, channel=1):
# noinspection PyShadowingNames
'''
Translate a music21 Note to a list of four MIDI events --
the DeltaTime for the start of the note (0), the NOTE_ON event, the
DeltaTime to the end of the note, and the NOTE_OFF event.
If `includeDeltaTime` is not True then the DeltaTime events
aren't returned, thus only two events are returned.
The initial deltaTime object is always 0. It will be changed when
processing Notes from a Stream.
The `channel` can be specified, otherwise channel 1 is assumed.
>>> n1 = note.Note('C#4')
>>> eventList = midi.translate.noteToMidiEvents(n1)
>>> eventList
[<MidiEvent DeltaTime, t=0, track=None, channel=1>,
<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=61, velocity=90>,
<MidiEvent DeltaTime, t=1024, track=None, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=61, velocity=0>]
>>> n1.duration.quarterLength = 2.5
>>> eventList = midi.translate.noteToMidiEvents(n1)
>>> eventList
[<MidiEvent DeltaTime, t=0, track=None, channel=1>,
<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=61, velocity=90>,
<MidiEvent DeltaTime, t=2560, track=None, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=61, velocity=0>]
Omitting DeltaTimes:
>>> eventList2 = midi.translate.noteToMidiEvents(n1, includeDeltaTime=False, channel=9)
>>> eventList2
[<MidiEvent NOTE_ON, t=0, track=None, channel=9, pitch=61, velocity=90>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=9, pitch=61, velocity=0>]
'''
from music21 import midi as midiModule
n = inputM21
mt = None # use a midi track set to None
eventList = []
if includeDeltaTime:
dt = midiModule.DeltaTime(mt, channel=channel)
# add to track events
eventList.append(dt)
me1 = midiModule.MidiEvent(track=mt)
me1.type = midiModule.ChannelVoiceMessages.NOTE_ON
me1.channel = channel
me1.pitch = n.pitch.midi
if not n.pitch.isTwelveTone():
me1.centShift = n.pitch.getCentShiftFromMidi()
# TODO: not yet using dynamics or velocity
# volScalar = n.volume.getRealized(useDynamicContext=False,
# useVelocity=True, useArticulations=False)
# use cached realized, as realized values should have already been set
me1.velocity = int(round(n.volume.cachedRealized * 127))
eventList.append(me1)
if includeDeltaTime:
# add note off / velocity zero message
dt = midiModule.DeltaTime(mt, channel=channel)
dt.time = durationToMidiTicks(n.duration)
# add to track events
eventList.append(dt)
me2 = midiModule.MidiEvent(track=mt)
me2.type = midiModule.ChannelVoiceMessages.NOTE_OFF
me2.channel = channel
me2.pitch = n.pitch.midi
if not n.pitch.isTwelveTone():
me2.centShift = n.pitch.getCentShiftFromMidi()
me2.velocity = 0 # must be zero
eventList.append(me2)
# set correspondence
me1.correspondingEvent = me2
me2.correspondingEvent = me1
return eventList
# ------------------------------------------------------------------------------
# Chords
def midiEventsToChord(eventList, ticksPerQuarter=None, inputM21=None):
# noinspection PyShadowingNames
'''
Creates a Chord from a list of :class:`~music21.midi.DeltaTime`
and :class:`~music21.midi.MidiEvent` objects. See midiEventsToNote
for details.
All DeltaTime objects except the first are ignored.
>>> mt = midi.MidiTrack(1)
>>> dt1 = midi.DeltaTime(mt)
>>> me1 = midi.MidiEvent(mt)
>>> me1.type = midi.ChannelVoiceMessages.NOTE_ON
>>> me1.pitch = 45
>>> me1.velocity = 94
>>> dt2 = midi.DeltaTime(mt)
>>> me2 = midi.MidiEvent(mt)
>>> me2.type = midi.ChannelVoiceMessages.NOTE_ON
>>> me2.pitch = 46
>>> me2.velocity = 94
>>> dt3 = midi.DeltaTime(mt)
>>> dt3.time = 2048
>>> me3 = midi.MidiEvent(mt)
>>> me3.type = midi.ChannelVoiceMessages.NOTE_OFF
>>> me3.pitch = 45
>>> me3.velocity = 0
>>> dt4 = midi.DeltaTime(mt)
>>> me4 = midi.MidiEvent(mt)
>>> me4.type = midi.ChannelVoiceMessages.NOTE_OFF
>>> me4.pitch = 46
>>> me4.velocity = 0
>>> c = midi.translate.midiEventsToChord([dt1, me1, dt2, me2, dt3, me3, dt4, me4])
>>> c
<music21.chord.Chord A2 B-2>
>>> c.duration.quarterLength
2.0
'''
tOn = 0
if inputM21 is None:
c = chord.Chord()
else:
c = inputM21
if ticksPerQuarter is None:
ticksPerQuarter = defaults.ticksPerQuarter
from music21 import pitch
from music21 import volume
pitches = []
volumes = []
# this is a format provided by the Stream conversion of
# midi events; it pre groups events for a chord together in nested pairs
# of abs start time and the event object
if isinstance(eventList, list) and isinstance(eventList[0], tuple):
# pairs of pairs
tOff = eventList[0][1][0]
for onPair, unused_offPair in eventList:
tOn, eOn = onPair
p = pitch.Pitch()
p.midi = eOn.pitch
pitches.append(p)
v = volume.Volume(velocity=eOn.velocity)
v.velocityIsRelative = False # velocity is absolute coming from
volumes.append(v)
# assume it is a flat list
else:
onEvents = eventList[:(len(eventList) // 2)]
offEvents = eventList[(len(eventList) // 2):]
# first is always delta time
tOn = onEvents[0].time
tOff = offEvents[0].time
# create pitches for the odd on Events:
for i in range(1, len(onEvents), 2):
p = pitch.Pitch()
p.midi = onEvents[i].pitch
pitches.append(p)
v = volume.Volume(velocity=onEvents[i].velocity)
v.velocityIsRelative = False # velocity is absolute coming from
volumes.append(v)
c.pitches = pitches
c.volume = volumes # can set a list to volume property
# can simply use last-assigned pair of tOff, tOn
if (tOff - tOn) != 0:
ticksToDuration(tOff - tOn, ticksPerQuarter, c.duration)
else:
# environLocal.printDebug(['cannot translate found midi event with zero duration:',
# eventList, c])
# for now, get grace
c.getGrace(inPlace=True)
return c
def chordToMidiEvents(inputM21, includeDeltaTime=True):
# noinspection PyShadowingNames
'''
Translates a :class:`~music21.chord.Chord` object to a
list of base.DeltaTime and base.MidiEvents objects.
See noteToMidiEvents above for more details.
>>> c = chord.Chord(['c3', 'g#4', 'b5'])
>>> c.volume = volume.Volume(velocity=90)
>>> c.volume.velocityIsRelative = False
>>> eventList = midi.translate.chordToMidiEvents(c)
>>> eventList
[<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=48, velocity=90>,
<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=68, velocity=90>,
<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=83, velocity=90>,
<MidiEvent DeltaTime, t=1024, track=None, channel=None>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=48, velocity=0>,
<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=68, velocity=0>,
<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=83, velocity=0>]
'''
from music21 import midi as midiModule
mt = None # midi track
eventList = []
c = inputM21
# temporary storage for setting correspondence
noteOn = []
noteOff = []
chordVolume = c.volume # use if component volume are not defined
hasComponentVolumes = c.hasComponentVolumes()
for i in range(len(c)):
chordComponent = c[i]
# pitchObj = c.pitches[i]
# noteObj = chordComponent
if includeDeltaTime:
dt = midiModule.DeltaTime(track=mt)
# for a chord, only the first delta time should have the offset
# here, all are zero
# leave dt.time at zero; will be shifted later as necessary
# add to track events
eventList.append(dt)
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.ChannelVoiceMessages.NOTE_ON
me.channel = 1
me.pitch = chordComponent.pitch.midi
if not chordComponent.pitch.isTwelveTone():
me.centShift = chordComponent.pitch.getCentShiftFromMidi()
# if 'volume' in chordComponent:
if hasComponentVolumes:
# volScalar = chordComponent.volume.getRealized(
# useDynamicContext=False,
# useVelocity=True, useArticulations=False)
volScalar = chordComponent.volume.cachedRealized
else:
# volScalar = chordVolume.getRealized(
# useDynamicContext=False,
# useVelocity=True, useArticulations=False)
volScalar = chordVolume.cachedRealized
me.velocity = int(round(volScalar * 127))
eventList.append(me)
noteOn.append(me)
# must create each note on in chord before each note on
for i in range(len(c.pitches)):
pitchObj = c.pitches[i]
if includeDeltaTime:
# add note off / velocity zero message
dt = midiModule.DeltaTime(track=mt)
# for a chord, only the first delta time should have the dur
if i == 0:
dt.time = durationToMidiTicks(c.duration)
eventList.append(dt)
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.ChannelVoiceMessages.NOTE_OFF
me.channel = 1
me.pitch = pitchObj.midi
if not pitchObj.isTwelveTone():
me.centShift = pitchObj.getCentShiftFromMidi()
me.velocity = 0 # must be zero
eventList.append(me)
noteOff.append(me)
# set correspondence
for i, meOn in enumerate(noteOn):
meOff = noteOff[i]
meOn.correspondingEvent = meOff
meOff.correspondingEvent = meOn
return eventList
# ------------------------------------------------------------------------------
def instrumentToMidiEvents(inputM21,
includeDeltaTime=True,
midiTrack=None,
channel=1):
'''
Converts a :class:`~music21.instrument.Instrument` object to a list of MidiEvents
TODO: DOCS and TESTS
'''
from music21 import midi as midiModule
inst = inputM21
mt = midiTrack # midi track
events = []
if includeDeltaTime:
dt = midiModule.DeltaTime(track=mt, channel=channel)
events.append(dt)
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.ChannelVoiceMessages.PROGRAM_CHANGE
me.channel = channel
instMidiProgram = inst.midiProgram
if instMidiProgram is None:
instMidiProgram = 0
me.data = instMidiProgram # key step
events.append(me)
return events
# ------------------------------------------------------------------------------
# Meta events
def midiEventsToInstrument(eventList):
'''
Convert a single MIDI event into a music21 Instrument object.
'''
from music21 import midi as midiModule
if not common.isListLike(eventList):
event = eventList
else: # get the second event; first is delta time
event = eventList[1]
from music21 import instrument
decoded: str = ''
try:
if isinstance(event.data, bytes):
# MuseScore writes MIDI files with null-terminated
# instrument names. Thus stop before the byte-0x0
decoded = event.data.decode('utf-8').split('\x00')[0]
decoded = decoded.strip()
i = instrument.fromString(decoded)
else:
i = instrument.instrumentFromMidiProgram(event.data)
except (instrument.InstrumentException, UnicodeDecodeError): # pragma: no cover
i = instrument.Instrument()
# Set partName or instrumentName with literal value from parsing
if decoded:
# Except for lousy instrument names
if (
decoded.lower() in ('instrument', 'inst')
or decoded.lower().replace('instrument ', '').isdigit()
or decoded.lower().replace('inst ', '').isdigit()
):
return i
elif event.type == midiModule.MetaEvents.SEQUENCE_TRACK_NAME:
i.partName = decoded
elif event.type == midiModule.MetaEvents.INSTRUMENT_NAME:
i.instrumentName = decoded
return i
def midiEventsToTimeSignature(eventList):
# noinspection PyShadowingNames
'''
Convert a single MIDI event into a music21 TimeSignature object.
>>> mt = midi.MidiTrack(1)
>>> me1 = midi.MidiEvent(mt)
>>> me1.type = midi.MetaEvents.TIME_SIGNATURE
>>> me1.data = midi.putNumbersAsList([3, 1, 24, 8]) # 3/2 time
>>> ts = midi.translate.midiEventsToTimeSignature(me1)
>>> ts
<music21.meter.TimeSignature 3/2>
>>> me2 = midi.MidiEvent(mt)
>>> me2.type = midi.MetaEvents.TIME_SIGNATURE
>>> me2.data = midi.putNumbersAsList([3, 4]) # 3/16 time
>>> ts = midi.translate.midiEventsToTimeSignature(me2)
>>> ts
<music21.meter.TimeSignature 3/16>
'''
# http://www.sonicspot.com/guide/midifiles.html
# The time signature defined with 4 bytes, a numerator, a denominator,
# a metronome pulse and number of 32nd notes per MIDI quarter-note.
# The numerator is specified as a literal value, but the denominator
# is specified as (get ready) the value to which the power of 2 must be
# raised to equal the number of subdivisions per whole note. For example,
# a value of 0 means a whole note because 2 to the power of 0 is 1
# (whole note), a value of 1 means a half-note because 2 to the power
# of 1 is 2 (half-note), and so on.
# The metronome pulse specifies how often the metronome should click in
# terms of the number of clock signals per click, which come at a rate
# of 24 per quarter-note. For example, a value of 24 would mean to click
# once every quarter-note (beat) and a value of 48 would mean to click
# once every half-note (2 beats). And finally, the fourth byte specifies
# the number of 32nd notes per 24 MIDI clock signals. This value is usually
# 8 because there are usually 8 32nd notes in a quarter-note. At least one
# Time Signature Event should appear in the first track chunk (or all track
# chunks in a Type 2 file) before any non-zero delta time events. If one
# is not specified 4/4, 24, 8 should be assumed.
from music21 import meter
from music21 import midi as midiModule
if not common.isListLike(eventList):
event = eventList
else: # get the second event; first is delta time
event = eventList[1]
# time signature is 4 byte encoding
post = midiModule.getNumbersAsList(event.data)
n = post[0]
d = pow(2, post[1])
ts = meter.TimeSignature(f'{n}/{d}')
return ts
def timeSignatureToMidiEvents(ts, includeDeltaTime=True):
# noinspection PyShadowingNames
'''
Translate a :class:`~music21.meter.TimeSignature` to a pair of events: a DeltaTime and
a MidiEvent TIME_SIGNATURE.
Returns a two-element list
>>> ts = meter.TimeSignature('5/4')
>>> eventList = midi.translate.timeSignatureToMidiEvents(ts)
>>> eventList[0]
<MidiEvent DeltaTime, t=0, track=None, channel=None>
>>> eventList[1]
<MidiEvent TIME_SIGNATURE, t=0, track=None, channel=1, data=b'\\x05\\x02\\x18\\x08'>
'''
from music21 import midi as midiModule
mt = None # use a midi track set to None
eventList = []
if includeDeltaTime:
dt = midiModule.DeltaTime(track=mt)
# dt.time set to zero; will be shifted later as necessary
# add to track events
eventList.append(dt)
n = ts.numerator
# need log base 2 to solve for exponent of 2
# 1 is 0, 2 is 1, 4 is 2, 16 is 4, etc
d = int(math.log2(ts.denominator))
metroClick = 24 # clock signals per click, clicks are 24 per quarter
subCount = 8 # number of 32 notes in a quarter note
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.MetaEvents.TIME_SIGNATURE
me.channel = 1
me.data = midiModule.putNumbersAsList([n, d, metroClick, subCount])
eventList.append(me)
return eventList
def midiEventsToKey(eventList) -> 'music21.key.Key':
# noinspection PyShadowingNames
r'''
Convert a single MIDI event into a :class:`~music21.key.KeySignature` object.
>>> mt = midi.MidiTrack(1)
>>> me1 = midi.MidiEvent(mt)
>>> me1.type = midi.MetaEvents.KEY_SIGNATURE
>>> me1.data = midi.putNumbersAsList([2, 0]) # d major
>>> ks = midi.translate.midiEventsToKey(me1)
>>> ks
<music21.key.Key of D major>
>>> ks.mode
'major'
>>> me2 = midi.MidiEvent(mt)
>>> me2.type = midi.MetaEvents.KEY_SIGNATURE
>>> me2.data = midi.putNumbersAsList([-2, 1]) # g minor
>>> me2.data
b'\xfe\x01'
>>> midi.getNumbersAsList(me2.data)
[254, 1]
>>> ks = midi.translate.midiEventsToKey(me2)
>>> ks
<music21.key.Key of g minor>
>>> ks.sharps
-2
>>> ks.mode
'minor'
'''
# This meta event is used to specify the key (number of sharps or flats)
# and scale (major or minor) of a sequence. A positive value for
# the key specifies the number of sharps and a negative value specifies
# the number of flats. A value of 0 for the scale specifies a major key
# and a value of 1 specifies a minor key.
from music21 import key
from music21 import midi as midiModule
if not common.isListLike(eventList):
event = eventList
else: # get the second event; first is delta time
event = eventList[1]
post = midiModule.getNumbersAsList(event.data)
# first value is number of sharp, or neg for number of flat
if post[0] > 12:
# flip around 256
sharpCount = post[0] - 256 # need negative values
else:
sharpCount = post[0]
mode = 'major'
if post[1] == 1:
mode = 'minor'
# environLocal.printDebug(['midiEventsToKey', post, sharpCount])
ks = key.KeySignature(sharpCount)
k = ks.asKey(mode)
return k
def keySignatureToMidiEvents(ks: 'music21.key.KeySignature', includeDeltaTime=True):
# noinspection PyShadowingNames
r'''
Convert a single :class:`~music21.key.Key` or
:class:`~music21.key.KeySignature` object to
a two-element list of midi events,
where the first is an empty DeltaTime (unless includeDeltaTime is False) and the second
is a KEY_SIGNATURE :class:`~music21.midi.MidiEvent`
>>> ks = key.KeySignature(2)
>>> ks
<music21.key.KeySignature of 2 sharps>
>>> eventList = midi.translate.keySignatureToMidiEvents(ks)
>>> eventList
[<MidiEvent DeltaTime, t=0, track=None, channel=None>,
<MidiEvent KEY_SIGNATURE, t=0, track=None, channel=1, data=b'\x02\x00'>]
>>> k = key.Key('b-')
>>> k
<music21.key.Key of b- minor>
>>> eventList = midi.translate.keySignatureToMidiEvents(k, includeDeltaTime=False)
>>> eventList
[<MidiEvent KEY_SIGNATURE, t=0, track=None, channel=1, data=b'\xfb\x01'>]
'''
from music21 import midi as midiModule
mt = None # use a midi track set to None
eventList = []
if includeDeltaTime:
dt = midiModule.DeltaTime(track=mt)
# leave dt.time set to zero; will be shifted later as necessary
# add to track events
eventList.append(dt)
sharpCount = ks.sharps
if hasattr(ks, 'mode') and ks.mode == 'minor':
mode = 1
else: # major or None; must define one
mode = 0
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.MetaEvents.KEY_SIGNATURE
me.channel = 1
me.data = midiModule.putNumbersAsList([sharpCount, mode])
eventList.append(me)
return eventList
def midiEventsToTempo(eventList):
'''
Convert a single MIDI event into a music21 Tempo object.
TODO: Need Tests
'''
from music21 import midi as midiModule
from music21 import tempo
if not common.isListLike(eventList):
event = eventList
else: # get the second event; first is delta time
event = eventList[1]
# get microseconds per quarter
mspq = midiModule.getNumber(event.data, 3)[0] # first data is number
bpm = round(60_000_000 / mspq, 2)
# post = midiModule.getNumbersAsList(event.data)
# environLocal.printDebug(['midiEventsToTempo, got bpm', bpm])
mm = tempo.MetronomeMark(number=bpm)
return mm
def tempoToMidiEvents(tempoIndication, includeDeltaTime=True):
# noinspection PyShadowingNames
r'''
Given any TempoIndication, convert it to list of :class:`~music21.midi.MidiEvent`
objects that signifies a MIDI tempo indication.
>>> mm = tempo.MetronomeMark(number=90)
>>> events = midi.translate.tempoToMidiEvents(mm)
>>> events
[<MidiEvent DeltaTime...>, <MidiEvent SET_TEMPO...>]
>>> len(events)
2
>>> events[0]
<MidiEvent DeltaTime, t=0, track=None, channel=None>
>>> evt1 = events[1]
>>> evt1
<MidiEvent SET_TEMPO, t=0, track=None, channel=1, data=b'\n,+'>
>>> evt1.data
b'\n,+'
>>> microSecondsPerQuarterNote = midi.getNumber(evt1.data, len(evt1.data))[0]
>>> microSecondsPerQuarterNote
666667
>>> round(60_000_000 / microSecondsPerQuarterNote, 1)
90.0
If includeDeltaTime is False then the DeltaTime object is omitted:
>>> midi.translate.tempoToMidiEvents(mm, includeDeltaTime=False)
[<MidiEvent SET_TEMPO...>]
Test round-trip. Note that for pure tempo numbers, by default
we create a text name if there's an appropriate one:
>>> midi.translate.midiEventsToTempo(events)
<music21.tempo.MetronomeMark maestoso Quarter=90.0>
'''
from music21 import midi as midiModule
mt = None # use a midi track set to None
eventList = []
if includeDeltaTime:
dt = midiModule.DeltaTime(track=mt)
eventList.append(dt)
me = midiModule.MidiEvent(track=mt)
me.type = midiModule.MetaEvents.SET_TEMPO
me.channel = 1
# from any tempo indication, get the sounding metronome mark
mm = tempoIndication.getSoundingMetronomeMark()
bpm = mm.getQuarterBPM()
mspq = int(round(60_000_000 / bpm)) # microseconds per quarter note
me.data = midiModule.putNumber(mspq, 3)
eventList.append(me)
return eventList
# ------------------------------------------------------------------------------
# Streams
def getPacketFromMidiEvent(
trackId: int,
offset: int,
midiEvent: 'music21.midi.MidiEvent',
obj: Optional['music21.base.Music21Object'] = None,
lastInstrument: Optional['music21.instrument.Instrument'] = None
) -> Dict[str, Any]:
'''
Pack a dictionary of parameters for each event.
Packets are used for sorting and configuring all note events.
Includes offset, any cent shift, the midi event, and the source object.
Offset and duration values stored here are MIDI ticks, not quarter lengths.
>>> n = note.Note('C4')
>>> midiEvents = midi.translate.elementToMidiEventList(n)
>>> getPacket = midi.translate.getPacketFromMidiEvent
>>> getPacket(trackId=1, offset=0, midiEvent=midiEvents[0], obj=n)
{'trackId': 1,
'offset': 0,
'midiEvent': <MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=60, velocity=90>,
'obj': <music21.note.Note C>,
'centShift': None,
'duration': 1024,
'lastInstrument': None}
>>> inst = instrument.Harpsichord()
>>> getPacket(trackId=1, offset=0, midiEvent=midiEvents[1], obj=n, lastInstrument=inst)
{'trackId': 1,
'offset': 0,
'midiEvent': <MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=60, velocity=0>,
'obj': <music21.note.Note C>,
'centShift': None,
'duration': 0,
'lastInstrument': <music21.instrument.Harpsichord 'Harpsichord'>}
'''
from music21 import midi as midiModule
post = {
'trackId': trackId,
'offset': offset, # offset values are in midi ticks
'midiEvent': midiEvent,
'obj': obj, # keep a reference to the source object
'centShift': midiEvent.centShift,
'duration': 0,
# store last m21 instrument object, as needed to reset program changes
'lastInstrument': lastInstrument,
}
# allocate channel later
# post['channel'] = None
if midiEvent.type != midiModule.ChannelVoiceMessages.NOTE_OFF and obj is not None:
# store duration so as to calculate when the
# channel/pitch bend can be freed
post['duration'] = durationToMidiTicks(obj.duration)
# note offs will have the same object ref, and seem like the have a
# duration when they do not
return post
def elementToMidiEventList(
el: 'music21.base.Music21Object'
) -> Optional[List['music21.midi.MidiEvent']]:
'''
Return a list of MidiEvents (or None) from a Music21Object,
assuming that dynamics have already been applied, etc.
Does not include DeltaTime objects.
Channel is set to the default, 1.
Track is not set.
>>> n = note.Note('C4')
>>> midiEvents = midi.translate.elementToMidiEventList(n)
>>> midiEvents
[<MidiEvent NOTE_ON, t=0, track=None, channel=1, pitch=60, velocity=90>,
<MidiEvent NOTE_OFF, t=0, track=None, channel=1, pitch=60, velocity=0>]
'''
classes = el.classes
if 'Rest' in classes:
return
elif 'Note' in classes:
# get a list of midi events
# using this property here is easier than using the above conversion
# methods, as we do not need to know what the object is
sub = noteToMidiEvents(el, includeDeltaTime=False)
# TODO: unpitched
elif 'Chord' in classes:
# TODO: skip Harmony unless showAsChord
sub = chordToMidiEvents(el, includeDeltaTime=False)
elif 'Dynamic' in classes:
return # dynamics have already been applied to notes
elif 'TimeSignature' in classes:
# return a pair of events
el: 'music21.meter.TimeSignature'
sub = timeSignatureToMidiEvents(el, includeDeltaTime=False)
elif 'KeySignature' in classes:
el: 'music21.key.KeySignature'
sub = keySignatureToMidiEvents(el, includeDeltaTime=False)
elif 'TempoIndication' in classes:
# any tempo indication will work
# note: tempo indications need to be in channel one for most playback
el: 'music21.tempo.TempoIndication'
sub = tempoToMidiEvents(el, includeDeltaTime=False)
elif 'Instrument' in classes:
# first instrument will have been gathered above with get start elements
sub = instrumentToMidiEvents(el, includeDeltaTime=False)
else:
# other objects may have already been added
return
return sub
def streamToPackets(
s: stream.Stream,
trackId: int = 1,
addStartDelay: bool = False,
) -> List[Dict[str, Any]]:
'''
Convert a (flattened, sorted) Stream to packets.
This assumes that the Stream has already been flattened,
ties have been stripped, and instruments,
if necessary, have been added.
In converting from a Stream to MIDI, this is called first,
resulting in a collection of packets by offset.
Then, packets to events is called.
'''
from music21 import midi as midiModule
# store all events by offset by offset without delta times
# as (absTime, event)
packetsByOffset = []
lastInstrument = None
# s should already be flat and sorted
for el in s:
midiEventList = elementToMidiEventList(el)
if 'Instrument' in el.classes:
lastInstrument = el # store last instrument
if midiEventList is None:
continue
# we process midiEventList here, which is a list of midi events
# for each event, we create a packet representation
# all events: delta/note-on/delta/note-off
# strip delta times
elementPackets = []
firstNotePlayed = False
for i in range(len(midiEventList)):
# store offset, midi event, object
# add channel and pitch change also
midiEvent = midiEventList[i]
if (midiEvent.type == midiModule.ChannelVoiceMessages.NOTE_ON
and firstNotePlayed is False):
firstNotePlayed = True
if firstNotePlayed is False:
o = offsetToMidiTicks(s.elementOffset(el), addStartDelay=False)
else:
o = offsetToMidiTicks(s.elementOffset(el), addStartDelay=addStartDelay)
if midiEvent.type != midiModule.ChannelVoiceMessages.NOTE_OFF:
# use offset
p = getPacketFromMidiEvent(
trackId,
o,
midiEvent,
obj=el,
lastInstrument=lastInstrument,
)
elementPackets.append(p)
# if its a note_off, use the duration to shift offset
# midi events have already been created;
else:
p = getPacketFromMidiEvent(
trackId,
o + durationToMidiTicks(el.duration),
midiEvent,
obj=el,
lastInstrument=lastInstrument)
elementPackets.append(p)
packetsByOffset += elementPackets
# sorting is useful here, as we need these to be in order to assign last
# instrument
packetsByOffset.sort(
key=lambda x: (x['offset'], x['midiEvent'].sortOrder)
)
# return packets and stream, as this flat stream should be retained
return packetsByOffset
def assignPacketsToChannels(
packets,
channelByInstrument=None,
channelsDynamic=None,
initTrackIdToChannelMap=None):
'''
Given a list of packets, assign each to a channel.
Do each track one at time, based on the track id.
Shift to different channels if a pitch bend is necessary.
Keep track of which channels are available.
Need to insert a program change in the empty channel
too, based on last instrument.
Insert pitch bend messages as well,
one for start of event, one for end of event.
`packets` is a list of packets.
`channelByInstrument` should be a dictionary.
`channelsDynamic` should be a list.
`initTrackIdToChannelMap` should be a dictionary.
'''
from music21 import midi as midiModule
if channelByInstrument is None:
channelByInstrument = {}
if channelsDynamic is None:
channelsDynamic = []
if initTrackIdToChannelMap is None:
initTrackIdToChannelMap = {}
# allChannels = list(range(1, 10)) + list(range(11, 17)) # all but 10
uniqueChannelEvents = {} # dict of (start, stop, usedChannel) : channel
post = []
usedTracks = []
for p in packets:
# environLocal.printDebug(['assignPacketsToChannels', p['midiEvent'].track, p['trackId']])
# must use trackId, as .track on MidiEvent is not yet set
if p['trackId'] not in usedTracks:
usedTracks.append(p['trackId'])
# only need note_ons, as stored correspondingEvent attr can be used
# to get noteOff
if p['midiEvent'].type != midiModule.ChannelVoiceMessages.NOTE_ON:
# set all not note-off messages to init channel
if p['midiEvent'].type != midiModule.ChannelVoiceMessages.NOTE_OFF:
p['midiEvent'].channel = p['initChannel']
post.append(p) # add the non note_on packet first
# if this is a note off, and has a cent shift, need to
# rest the pitch bend back to 0 cents
if p['midiEvent'].type == midiModule.ChannelVoiceMessages.NOTE_OFF:
# environLocal.printDebug(['got note-off', p['midiEvent']])
# cent shift is set for note on and note off
if p['centShift']:
# do not set channel, as already set
me = midiModule.MidiEvent(p['midiEvent'].track,
type=midiModule.ChannelVoiceMessages.PITCH_BEND,
channel=p['midiEvent'].channel)
# note off stores a note on for each pitch; do not invert, simply
# set to zero
me.setPitchBend(0)
pBendEnd = getPacketFromMidiEvent(
trackId=p['trackId'],
offset=p['offset'],
midiEvent=me,
)
post.append(pBendEnd)
# environLocal.printDebug(['adding pitch bend', pBendEnd])
continue # store and continue
# set default channel for all packets
p['midiEvent'].channel = p['initChannel']
# find a free channel
# if necessary, add pitch change at start of Note,
# cancel pitch change at end
o = p['offset']
oEnd = p['offset'] + p['duration']
channelExclude = [] # channels that cannot be used
centShift = p['centShift'] # may be None
# environLocal.printDebug(['\n\n', 'offset', o, 'oEnd', oEnd, 'centShift', centShift])
# iterate through all past events/channels, and find all
# that are active and have a pitch bend
for key in uniqueChannelEvents:
start, stop, usedChannel = key
# if offset (start time) is in this range of a found event
# or if any start or stop is within this span
# if o >= start and o < stop: # found an offset that is used
if ((o <= start < oEnd)
or (o < stop < oEnd)
or (start <= o < stop)
or (start < oEnd < stop)):
# if there is a cent shift active in the already used channel
# environLocal.printDebug(['matchedOffset overlap'])
centShiftList = uniqueChannelEvents[key]
if centShiftList:
# only add if unique
if usedChannel not in channelExclude:
channelExclude.append(usedChannel)
# or if this event has shift, then we can exclude
# the channel already used without a shift
elif centShift:
if usedChannel not in channelExclude:
channelExclude.append(usedChannel)
# cannot break early w/o sorting
# if no channels are excluded, get a new channel
# environLocal.printDebug(['post process channelExclude', channelExclude])
if channelExclude: # only change if necessary
ch = None
# iterate in order over all channels: lower will be added first
for x in channelsDynamic:
if x not in channelExclude:
ch = x
break
if ch is None:
raise TranslateException(
'no unused channels available for microtone/instrument assignment')
p['midiEvent'].channel = ch
# change channel of note off; this is used above to turn off bend
p['midiEvent'].correspondingEvent.channel = ch
# environLocal.printDebug(['set channel of correspondingEvent:',
# p['midiEvent'].correspondingEvent])
# TODO: must add program change, as we are now in a new
# channel; regardless of if we have a pitch bend (we may
# move channels for a different reason
if p['lastInstrument'] is not None:
meList = instrumentToMidiEvents(inputM21=p['lastInstrument'],
includeDeltaTime=False,
midiTrack=p['midiEvent'].track,
channel=ch)
pgmChangePacket = getPacketFromMidiEvent(
trackId=p['trackId'],
offset=o, # keep offset here
midiEvent=meList[0],
)
post.append(pgmChangePacket)
else: # use the existing channel
ch = p['midiEvent'].channel
# always set corresponding event to the same channel
p['midiEvent'].correspondingEvent.channel = ch
# environLocal.printDebug(['assigning channel', ch, 'channelsDynamic', channelsDynamic,
# 'p['initChannel']', p['initChannel']])
if centShift:
# add pitch bend
me = midiModule.MidiEvent(p['midiEvent'].track,
type=midiModule.ChannelVoiceMessages.PITCH_BEND,
channel=ch)
me.setPitchBend(centShift)
pBendStart = getPacketFromMidiEvent(
trackId=p['trackId'],
offset=o,
midiEvent=me, # keep offset here
)
post.append(pBendStart)
# environLocal.printDebug(['adding pitch bend', me])
# removal of pitch bend will happen above with note off
# key includes channel, so that durations can span once in each channel
key = (p['offset'], p['offset'] + p['duration'], ch)
if key not in uniqueChannelEvents:
# need to count multiple instances of events on the same
# span and in the same channel (fine if all have the same pitch bend
uniqueChannelEvents[key] = []
# always add the cent shift if it is not None
if centShift:
uniqueChannelEvents[key].append(centShift)
post.append(p) # add packet/ done after ch change or bend addition
# environLocal.printDebug(['uniqueChannelEvents', uniqueChannelEvents])
# this is called once at completion
# environLocal.printDebug(['uniqueChannelEvents', uniqueChannelEvents])
# after processing, collect all channels used
foundChannels = []
for start, stop, usedChannel in list(uniqueChannelEvents): # a list
if usedChannel not in foundChannels:
foundChannels.append(usedChannel)
# for ch in chList:
# if ch not in foundChannels:
# foundChannels.append(ch)
# environLocal.printDebug(['foundChannels', foundChannels])
# environLocal.printDebug(['usedTracks', usedTracks])
# post processing of entire packet collection
# for all used channels, create a zero pitch bend at time zero
# for ch in foundChannels:
# for each track, places a pitch bend in its initChannel
for trackId in usedTracks:
if trackId == 0:
continue # Conductor track: do not add pitch bend
ch = initTrackIdToChannelMap[trackId]
# use None for track; will get updated later
me = midiModule.MidiEvent(track=trackId,
type=midiModule.ChannelVoiceMessages.PITCH_BEND,
channel=ch)
me.setPitchBend(0)
pBendEnd = getPacketFromMidiEvent(
trackId=trackId,
offset=0,
midiEvent=me,
)
post.append(pBendEnd)
# environLocal.printDebug(['adding pitch bend for found channels', me])
# this sort is necessary
post.sort(
key=lambda x_event: (x_event['offset'], x_event['midiEvent'].sortOrder)
)
# TODO: for each track, add an additional silent event to make sure
# entire duration gets played
# diagnostic display
# for p in post: environLocal.printDebug(['processed packet', p])
# post = packets
return post
def filterPacketsByTrackId(
packetsSrc: List[Dict[str, Any]],
trackIdFilter: Optional[int] = None,
) -> List[Dict[str, Any]]:
'''
Given a list of Packet dictionaries, return a list of
only those whose trackId matches the filter.
>>> packets = [
... {'trackId': 1, 'name': 'hello'},
... {'trackId': 2, 'name': 'bye'},
... {'trackId': 1, 'name': 'hi'},
... ]
>>> midi.translate.filterPacketsByTrackId(packets, 1)
[{'trackId': 1, 'name': 'hello'},
{'trackId': 1, 'name': 'hi'}]
>>> midi.translate.filterPacketsByTrackId(packets, 2)
[{'trackId': 2, 'name': 'bye'}]
If no trackIdFilter is passed, the original list is returned:
>>> midi.translate.filterPacketsByTrackId(packets) is packets
True
'''
if trackIdFilter is None:
return packetsSrc
outPackets = []
for packet in packetsSrc:
if packet['trackId'] == trackIdFilter:
outPackets.append(packet)
return outPackets
def packetsToDeltaSeparatedEvents(
packets: List[Dict[str, Any]],
midiTrack: 'music21.midi.MidiTrack'
) -> List['music21.midi.MidiEvent']:
'''
Given a list of packets (which already contain MidiEvent objects)
return a list of those Events with proper delta times between them.
At this stage MIDI event objects have been created.
The key process here is finding the adjacent time
between events and adding DeltaTime events before each MIDI event.
Delta time channel values are derived from the previous midi event.
'''
from music21.midi import DeltaTime
events = []
lastOffset = 0
for packet in packets:
midiEvent = packet['midiEvent']
t = packet['offset'] - lastOffset
if t < 0:
raise TranslateException('got a negative delta time')
# set the channel from the midi event
dt = DeltaTime(midiTrack, time=t, channel=midiEvent.channel)
# environLocal.printDebug(['packetsByOffset', packet])
events.append(dt)
events.append(midiEvent)
lastOffset = packet['offset']
# environLocal.printDebug(['packetsToDeltaSeparatedEvents', 'total events:', len(events)])
return events
def packetsToMidiTrack(packets, trackId=1, channel=1, instrumentObj=None):
'''
Given packets already allocated with channel
and/or instrument assignments, place these in a MidiTrack.
Note that all packets can be sent; only those with
matching trackIds will be collected into the resulting track
The `channel` defines the channel that startEvents and endEvents
will be assigned to
Use streamToPackets to convert the Stream to the packets
'''
from music21 import midi as midiModule
# TODO: for a given track id, need to find start/end channel
mt = midiModule.MidiTrack(trackId)
# set startEvents to preferred channel
mt.events += getStartEvents(mt,
channel=channel,
instrumentObj=instrumentObj)
# filter only those packets for this track
trackPackets = filterPacketsByTrackId(packets, trackId)
mt.events += packetsToDeltaSeparatedEvents(trackPackets, mt)
# must update all events with a ref to this MidiTrack
mt.events += getEndEvents(mt, channel=channel)
mt.updateEvents() # sets this track as .track for all events
return mt
def getTimeForEvents(
mt: 'music21.midi.MidiTrack'
) -> List[Tuple[int, 'music21.midi.MidiEvent']]:
'''
Get a list of tuples of (tickTime, MidiEvent) from the events with time deltas.
'''
# get an abs start time for each event, discard deltas
events = []
currentTime = 0
# pair deltas with events, convert abs time
# get even numbers
# in some cases, the first event may not be a delta time, but
# a SEQUENCE_TRACK_NAME or something else. thus, need to get
# first delta time
i = 0
while i < len(mt.events):
currentEvent = mt.events[i]
try:
nextEvent = mt.events[i + 1]
except IndexError: # pragma: no cover
break
currentDt = currentEvent.isDeltaTime()
nextDt = nextEvent.isDeltaTime()
# in pairs, first should be delta time, second should be event
# environLocal.printDebug(['midiTrackToStream(): index', 'i', i, mt.events[i]])
# environLocal.printDebug(['midiTrackToStream(): index', 'i + 1', i + 1, mt.events[i + 1]])
# need to find pairs of delta time and events
# in some cases, there are delta times that are out of order, or
# packed in the beginning
if currentDt and not nextDt:
currentTime += currentEvent.time # increment time
tupleAppend = (currentTime, nextEvent)
events.append(tupleAppend)
i += 2
elif (not currentDt
and not nextDt):
# environLocal.printDebug(['midiTrackToStream(): got two non delta times in a row'])
i += 1
elif currentDt and nextDt:
# environLocal.printDebug(['midiTrackToStream(): got two delta times in a row'])
i += 1
else:
# cannot pair delta time to the next event; skip by 1
# environLocal.printDebug(['cannot pair to delta time', mt.events[i]])
i += 1
return events
def getNotesFromEvents(
events: List[Tuple[int, 'music21.midi.MidiEvent']]
) -> List[Tuple[Tuple[int, 'music21.midi.MidiEvent'],
Tuple[int, 'music21.midi.MidiEvent']]]:
'''
Returns a list of Tuples of MIDI events that are pairs of note-on and
note-off events.
'''
notes = [] # store pairs of pairs
memo = set() # store already matched note off
for i, eventTuple in enumerate(events):
if i in memo:
continue
unused_t, e = eventTuple
# for each note on event, we need to search for a match in all future
# events
if not e.isNoteOn():
continue
match = None
# environLocal.printDebug(['midiTrackToStream(): isNoteOn', e])
for j in range(i + 1, len(events)):
if j in memo:
continue
unused_tSub, eSub = events[j]
if e.matchedNoteOff(eSub):
memo.add(j)
match = i, j
break
if match is not None:
i, j = match
pairs = (events[i], events[j])
notes.append(pairs)
else:
pass
# environLocal.printDebug([
# 'midiTrackToStream(): cannot find a note off for a note on', e])
return notes
def getMetaEvents(events):
from music21.midi import MetaEvents, ChannelVoiceMessages
metaEvents = [] # store pairs of abs time, m21 object
for i, eventTuple in enumerate(events):
t, e = eventTuple
metaObj = None
if e.type == MetaEvents.TIME_SIGNATURE:
# time signature should be 4 bytes
metaObj = midiEventsToTimeSignature(e)
elif e.type == MetaEvents.KEY_SIGNATURE:
metaObj = midiEventsToKey(e)
elif e.type == MetaEvents.SET_TEMPO:
metaObj = midiEventsToTempo(e)
elif e.type in (MetaEvents.INSTRUMENT_NAME, MetaEvents.SEQUENCE_TRACK_NAME):
metaObj = midiEventsToInstrument(e)
elif e.type == ChannelVoiceMessages.PROGRAM_CHANGE:
metaObj = midiEventsToInstrument(e)
elif e.type == MetaEvents.MIDI_PORT:
pass
else:
pass
if metaObj:
pair = (t, metaObj)
metaEvents.append(pair)
return metaEvents
def midiTrackToStream(
mt,
ticksPerQuarter=None,
quantizePost=True,
inputM21=None,
**keywords
) -> stream.Part:
# noinspection PyShadowingNames
'''
Note that quantization takes place in stream.py since it's useful not just for MIDI.
>>> fp = common.getSourceFilePath() / 'midi' / 'testPrimitive' / 'test05.mid'
>>> mf = midi.MidiFile()
>>> mf.open(fp)
>>> mf.read()
>>> mf.close()
>>> mf
<music21.midi.MidiFile 1 track>
>>> len(mf.tracks)
1
>>> mt = mf.tracks[0]
>>> mt
<music21.midi.MidiTrack 0 -- 56 events>
>>> mt.events
[<MidiEvent DeltaTime...>,
<MidiEvent SEQUENCE_TRACK_NAME...>,
<MidiEvent DeltaTime...>,
<MidiEvent NOTE_ON, t=0, track=0, channel=1, pitch=36, velocity=90>,
...]
>>> p = midi.translate.midiTrackToStream(mt)
>>> p
<music21.stream.Part ...>
>>> len(p.notesAndRests)
11
>>> p.notes[0].pitch.midi
36
>>> p.notes[0].volume.velocity
90
Note that the output Part has not yet had measures made, nor does it have a
TimeSignature yet.
>>> p.show('text')
{0.0} <music21.instrument.Instrument ''>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Rest rest>
{2.0} <music21.chord.Chord F3 G#4 C5>
{3.0} <music21.note.Rest rest>
{4.5} <music21.note.Note B->
...
'''
# environLocal.printDebug(['midiTrackToStream(): got midi track: events',
# len(mt.events), 'ticksPerQuarter', ticksPerQuarter])
if inputM21 is None:
s = stream.Part()
else:
s = inputM21
if ticksPerQuarter is None:
ticksPerQuarter = defaults.ticksPerQuarter
# get events without DeltaTimes
events = getTimeForEvents(mt)
# need to build chords and notes
notes = getNotesFromEvents(events)
metaEvents = getMetaEvents(events)
# first create meta events
for t, obj in metaEvents:
# environLocal.printDebug(['insert midi meta event:', t, obj])
s.coreInsert(t / ticksPerQuarter, obj)
s.coreElementsChanged()
deduplicate(s, inPlace=True)
# environLocal.printDebug([
# 'midiTrackToStream(): found notes ready for Stream import', len(notes)])
# collect notes with similar start times into chords
# create a composite list of both notes and chords
# composite = []
chordSub = None
i = 0
iGathered = [] # store a list of indexes of gathered values put into chords
voicesRequired = False
if len(notes) > 1:
# environLocal.printDebug(['\n', 'midiTrackToStream(): notes', notes])
while i < len(notes):
if i in iGathered:
i += 1
continue
# look at each note; get on time and event
on, off = notes[i]
t, unused_e = on
tOff, unused_eOff = off
# environLocal.printDebug(['on, off', on, off, 'i', i, 'len(notes)', len(notes)])
# go through all following notes; if there is only 1 note, this will
# not execute;
# looking for other events that start within a certain small time
# window to make into a chord
# if we find a note with a different end time but same start
# time, throw into a different voice
for j in range(i + 1, len(notes)):
# look at each on time event
onSub, offSub = notes[j]
tSub, unused_eSub = onSub
tOffSub, unused_eOffSub = offSub
# can set a tolerance for chordSubbing; here at 1/16th
# of a quarter
chunkTolerance = ticksPerQuarter / 16
if abs(tSub - t) <= chunkTolerance:
# isolate case where end time is not w/n tolerance
if abs(tOffSub - tOff) > chunkTolerance:
# need to store this as requiring movement to a diff
# voice
voicesRequired = True
continue
if chordSub is None: # start a new one
chordSub = [notes[i]]
iGathered.append(i)
chordSub.append(notes[j])
iGathered.append(j)
continue # keep looping through events to see
# if we can add more elements to this chord group
else: # no more matches; assuming chordSub tones are contiguous
break
# this comparison must be outside of j loop, as the case where we
# have the last note in a list of notes and the j loop does not
# execute; chordSub will be None
if chordSub is not None:
# composite.append(chordSub)
# create a chord here
c = chord.Chord()
midiEventsToChord(chordSub, ticksPerQuarter, c)
o = notes[i][0][0] / ticksPerQuarter
c.midiTickStart = notes[i][0][0]
s.coreInsert(o, c)
# iSkip = len(chordSub) # amount of accumulated chords
chordSub = None
else: # just append the note, chordSub is None
# composite.append(notes[i])
# create a note here
n = note.Note()
midiEventsToNote(notes[i], ticksPerQuarter, n)
# the time is the first value in the first pair
# need to round, as floating point error is likely
o = notes[i][0][0] / ticksPerQuarter
n.midiTickStart = notes[i][0][0]
s.coreInsert(o, n)
# iSkip = 1
# break # exit secondary loop
i += 1
elif len(notes) == 1: # rare case of just one note
n = note.Note()
midiEventsToNote(notes[0], ticksPerQuarter, n)
# the time is the first value in the first pair
# need to round, as floating point error is likely
o = notes[0][0][0] / ticksPerQuarter
n.midiTickStart = notes[i][0][0]
s.coreInsert(o, n)
s.coreElementsChanged()
# quantize to nearest 16th
if quantizePost:
if 'quarterLengthDivisors' in keywords:
quarterLengthDivisors = keywords['quarterLengthDivisors']
else:
quarterLengthDivisors = None
s.quantize(quarterLengthDivisors=quarterLengthDivisors,
processOffsets=True,
processDurations=True,
inPlace=True)
if voicesRequired:
# this procedure will make the appropriate rests
s.makeVoices(inPlace=True, fillGaps=True)
else:
# always need to fill gaps, as rests are not found in any other way
s.makeRests(inPlace=True, fillGaps=True)
return s
def prepareStreamForMidi(s) -> stream.Stream:
# noinspection PyShadowingNames
'''
Given a score, prepare it for MIDI processing, and return a new Stream:
1. Expand repeats.
2. Make changes that will let us later create a conductor (tempo) track
by placing `MetronomeMark`, `TimeSignature`, and `KeySignature`
objects into a new Part, and remove them from other parts.
3. Ensure that the resulting Stream always has part-like substreams.
Note: will make a deepcopy() of the stream.
>>> s = stream.Score()
>>> p = stream.Part()
>>> m = stream.Measure(number=1)
>>> m.append(tempo.MetronomeMark(100))
>>> m.append(note.Note('C4', type='whole')) # MIDI 60
>>> p.append(m)
>>> s.append(p)
>>> sOut = midi.translate.prepareStreamForMidi(s)
>>> sOut.show('text')
{0.0} <music21.stream.Part 0x10b0439a0>
{0.0} <music21.tempo.MetronomeMark Quarter=100>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.stream.Part 0x10b043c10>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.note.Note C>
'''
from music21 import volume
if s.recurse().stream().hasMeasures():
s = s.expandRepeats() # makes a deep copy
else:
s = copy.deepcopy(s)
conductor = conductorStream(s)
if s.hasPartLikeStreams():
# process Volumes one part at a time
# this assumes that dynamics in a part/stream apply to all components
# of that part stream
# this sets the cachedRealized value for each Volume
for p in s.iter.getElementsByClass('Stream'):
volume.realizeVolume(p)
s.insert(0, conductor)
out = s
else: # just a single Stream
volume.realizeVolume(s)
out = stream.Score()
out.insert(0, conductor)
out.insert(0, s)
return out
def conductorStream(s: stream.Stream) -> stream.Part:
# noinspection PyShadowingNames
'''
Strip the given stream of any events that belong in a conductor track
rather than in a music track, and returns a :class:`~music21.stream.Part`
containing just those events, without duplicates, suitable for being a
Part to turn into a conductor track.
Sets a default MetronomeMark of 120 if no MetronomeMarks are present
and a TimeSignature of 4/4 if not present.
Ensures that the conductor track always sorts before other parts.
Here we purposely use nested generic streams instead of Scores, Parts, etc.
to show that this still works. But you should use Score, Part, Measure instead.
>>> s = stream.Stream(id='scoreLike')
>>> p = stream.Stream(id='partLike')
>>> p.priority = -2
>>> m = stream.Stream(id='measureLike')
>>> m.append(tempo.MetronomeMark(100))
>>> m.append(note.Note('C4'))
>>> p.append(m)
>>> s.insert(0, p)
>>> conductor = midi.translate.conductorStream(s)
>>> conductor.priority
-3
The MetronomeMark is moved and a default TimeSignature is added:
>>> conductor.show('text')
{0.0} <music21.tempo.MetronomeMark Quarter=100>
{0.0} <music21.meter.TimeSignature 4/4>
The original stream still has the note:
>>> s.show('text')
{0.0} <music21.stream.Stream partLike>
{0.0} <music21.stream.Stream measureLike>
{0.0} <music21.note.Note C>
'''
from music21 import tempo, meter
partsList = list(s.getElementsByClass('Stream').getElementsByOffset(0))
minPriority = min(p.priority for p in partsList) if partsList else 0
conductorPriority = minPriority - 1
conductorPart = stream.Part()
conductorPart.priority = conductorPriority
for klass in ('MetronomeMark', 'TimeSignature', 'KeySignature'):
events = s.flat.getElementsByClass(klass)
lastOffset = -1
for el in events:
o = events.srcStream.elementOffset(el)
s.remove(el, recurse=True)
# Don't overwrite an event of the same class at this offset
if o > lastOffset:
conductorPart.coreInsert(o, el)
lastOffset = o
conductorPart.coreElementsChanged()
# Defaults
if not conductorPart.getElementsByClass('MetronomeMark'):
conductorPart.insert(tempo.MetronomeMark(number=120))
if not conductorPart.getElementsByClass('TimeSignature'):
conductorPart.insert(meter.TimeSignature('4/4'))
return conductorPart
def channelInstrumentData(
s: stream.Stream,
acceptableChannelList: Optional[List[int]] = None,
) -> Tuple[Dict[Union[int, None], int], List[int]]:
'''
Read through Stream `s` and finding instruments in it, return a 2-tuple,
the first a dictionary mapping MIDI program numbers to channel numbers,
and the second, a list of unassigned channels that can be used for dynamic
allocation.
Substreams without notes or rests (e.g. representing a conductor track)
will not consume a channel.
Only necessarily works if :func:`~music21.midi.translate.prepareStreamForMidi`
has been run before calling this routine.
'''
# temporary channel allocation
if acceptableChannelList is not None:
allChannels = acceptableChannelList
else:
allChannels = list(range(1, 10)) + list(range(11, 17)) # all but 10
# store program numbers
# tried using set() but does not guarantee proper order.
allUniqueInstruments = []
# store streams in uniform list
substreamList = []
if s.hasPartLikeStreams():
for obj in s.getElementsByClass('Stream'):
if not obj.flat.notesAndRests:
# Conductor track: don't consume a channel
continue
else:
substreamList.append(obj)
else:
# should not ever run if prepareStreamForMidi() was run...
substreamList.append(s) # pragma: no cover
# Music tracks
for subs in substreamList:
# get a first instrument; iterate over rest
instrumentStream = subs.recurse().getElementsByClass('Instrument')
setAnInstrument = False
for inst in instrumentStream:
if inst.midiProgram not in allUniqueInstruments:
allUniqueInstruments.append(inst.midiProgram)
setAnInstrument = True
if not setAnInstrument:
if None not in allUniqueInstruments:
allUniqueInstruments.append(None)
channelByInstrument = {} # the instrument is the key
channelsDynamic = [] # remaining channels
# create an entry for all unique instruments, assign channels
# for each instrument, assign a channel; if we go above 16, that is fine
# we just cannot use it and will take modulus later
channelsAssigned = []
for i, iPgm in enumerate(allUniqueInstruments):
# the key is the program number; the values is the start channel
if i < len(allChannels) - 1: # save at least one dynamic channel
channelByInstrument[iPgm] = allChannels[i]
channelsAssigned.append(allChannels[i])
else: # just use 1, and deal with the mess: cannot allocate
channelByInstrument[iPgm] = allChannels[0]
channelsAssigned.append(allChannels[0])
# get the dynamic channels, or those not assigned
for ch in allChannels:
if ch not in channelsAssigned:
channelsDynamic.append(ch)
return channelByInstrument, channelsDynamic
def packetStorageFromSubstreamList(
substreamList: List[stream.Part],
*,
addStartDelay=False,
) -> Dict[int, Dict[str, Any]]:
# noinspection PyShadowingNames
r'''
Make a dictionary of raw packets and the initial instrument for each
subStream.
If the first Part in the list of parts is empty then a new
:class:`~music21.instrument.Conductor` object will be given as the instrument.
>>> s = stream.Score()
>>> p = stream.Part()
>>> m = stream.Measure(number=1)
>>> m.append(tempo.MetronomeMark(100))
>>> m.append(instrument.Oboe())
>>> m.append(note.Note('C4', type='whole')) # MIDI 60
>>> p.append(m)
>>> s.append(p)
>>> sOut = midi.translate.prepareStreamForMidi(s)
>>> partList = list(sOut.parts)
>>> packetStorage = midi.translate.packetStorageFromSubstreamList(partList)
>>> list(sorted(packetStorage.keys()))
[0, 1]
>>> list(sorted(packetStorage[0].keys()))
['initInstrument', 'rawPackets']
>>> from pprint import pprint
>>> pprint(packetStorage)
{0: {'initInstrument': <music21.instrument.Conductor 'Conductor'>,
'rawPackets': [{'centShift': None,
'duration': 0,
'lastInstrument': None,
'midiEvent': <MidiEvent SET_TEMPO, t=0, track=None, channel=1, ...>,
'obj': <music21.tempo.MetronomeMark Quarter=100>,
'offset': 0,
'trackId': 0},
{'centShift': None,
'duration': 0,
'lastInstrument': None,
'midiEvent': <MidiEvent TIME_SIGNATURE, t=0, ...>,
'obj': <music21.meter.TimeSignature 4/4>,
'offset': 0,
'trackId': 0}]},
1: {'initInstrument': <music21.instrument.Oboe 'Oboe'>,
'rawPackets': [{'centShift': None,
'duration': 0,
'lastInstrument': <music21.instrument.Oboe 'Oboe'>,
'midiEvent': <MidiEvent PROGRAM_CHANGE,
t=0, track=None, channel=1, data=68>,
'obj': <music21.instrument.Oboe 'Oboe'>,
'offset': 0,
'trackId': 1},
{'centShift': None,
'duration': 4096,
'lastInstrument': <music21.instrument.Oboe 'Oboe'>,
'midiEvent': <MidiEvent NOTE_ON, t=0,
track=None, channel=1, pitch=60, velocity=90>,
'obj': <music21.note.Note C>,
'offset': 0,
'trackId': 1},
{'centShift': None,
'duration': 0,
'lastInstrument': <music21.instrument.Oboe 'Oboe'>,
'midiEvent': <MidiEvent NOTE_OFF, t=0,
track=None, channel=1, pitch=60, velocity=0>,
'obj': <music21.note.Note C>,
'offset': 4096,
'trackId': 1}]}}
'''
packetStorage = {}
for trackId, subs in enumerate(substreamList): # Conductor track is track 0
subs = subs.flat
# get a first instrument; iterate over rest
instrumentStream = subs.iter.getElementsByClass('Instrument')
# if there is an Instrument object at the start, make instObj that instrument.
if instrumentStream and subs.elementOffset(instrumentStream[0]) == 0:
instObj = instrumentStream[0]
elif trackId == 0 and not subs.notesAndRests:
# Conductor track
instObj = Conductor()
else:
instObj = None
trackPackets = streamToPackets(subs, trackId=trackId, addStartDelay=addStartDelay)
# store packets in dictionary; keys are trackIds
packetStorage[trackId] = {
'rawPackets': trackPackets,
'initInstrument': instObj,
}
return packetStorage
def updatePacketStorageWithChannelInfo(
packetStorage: Dict[int, Dict[str, Any]],
channelByInstrument: Dict[Union[int, None], int],
) -> None:
'''
Take the packetStorage dictionary and using information
from 'initInstrument' and channelByInstrument, add an 'initChannel' key to each
packetStorage bundle and to each rawPacket in the bundle['rawPackets']
'''
# update packets with first channel
for unused_trackId, bundle in packetStorage.items():
# get instrument
instObj = bundle['initInstrument']
if instObj is None:
try:
initCh = channelByInstrument[None]
except KeyError: # pragma: no cover
initCh = 1 # fallback, should not happen.
elif 'Conductor' in instObj.classes:
initCh = None
else: # use midi program
initCh = channelByInstrument[instObj.midiProgram]
bundle['initChannel'] = initCh # set for bundle too
for rawPacket in bundle['rawPackets']:
rawPacket['initChannel'] = initCh
def streamHierarchyToMidiTracks(
inputM21,
*,
acceptableChannelList=None,
addStartDelay=False,
):
'''
Given a Stream, Score, Part, etc., that may have substreams (i.e.,
a hierarchy), return a list of :class:`~music21.midi.MidiTrack` objects.
acceptableChannelList is a list of MIDI Channel numbers that can be used or None.
If None, then 1-9, 11-16 are used (10 being reserved for percussion).
Called by streamToMidiFile()
The process:
1. makes a deepcopy of the Stream (Developer TODO: could this
be done with a shallow copy? Not if ties are stripped and volume realized.)
2. we make a list of all instruments that are being used in the piece.
Changed in v.6 -- acceptableChannelList is keyword only. addStartDelay is new.
Changed in v.6.5 -- Track 0 (tempo/conductor track) always exported.
'''
# makes a deepcopy
s = prepareStreamForMidi(inputM21)
channelByInstrument, channelsDynamic = channelInstrumentData(s, acceptableChannelList)
# return a list of MidiTrack objects
midiTracks = []
# TODO: may need to shift all time values to accommodate
# Streams that do not start at same time
# store streams in uniform list: prepareStreamForMidi() ensures there are substreams
substreamList = []
for obj in s.getElementsByClass('Stream'):
# prepareStreamForMidi() supplies defaults for these
if obj.getElementsByClass(('MetronomeMark', 'TimeSignature')):
# Ensure conductor track is first
substreamList.insert(0, obj)
else:
substreamList.append(obj)
# strip all ties inPlace
for subs in substreamList:
subs.stripTies(inPlace=True, matchByPitch=False)
packetStorage = packetStorageFromSubstreamList(substreamList, addStartDelay=addStartDelay)
updatePacketStorageWithChannelInfo(packetStorage, channelByInstrument)
initTrackIdToChannelMap = {}
for trackId, bundle in packetStorage.items():
initTrackIdToChannelMap[trackId] = bundle['initChannel'] # map trackId to channelId
# combine all packets for processing of channel allocation
netPackets = []
for bundle in packetStorage.values():
netPackets += bundle['rawPackets']
# process all channel assignments for all packets together
netPackets = assignPacketsToChannels(
netPackets,
channelByInstrument=channelByInstrument,
channelsDynamic=channelsDynamic,
initTrackIdToChannelMap=initTrackIdToChannelMap)
# environLocal.printDebug(['got netPackets:', len(netPackets),
# 'packetStorage keys (tracks)', packetStorage.keys()])
# build each track, sorting out the appropriate packets based on track
# ids
for trackId in packetStorage:
initChannel = packetStorage[trackId]['initChannel']
instrumentObj = packetStorage[trackId]['initInstrument']
mt = packetsToMidiTrack(netPackets,
trackId=trackId,
channel=initChannel,
instrumentObj=instrumentObj)
midiTracks.append(mt)
return midiTracks
def midiTracksToStreams(
midiTracks: List['music21.midi.MidiTrack'],
ticksPerQuarter=None,
quantizePost=True,
inputM21: stream.Score = None,
**keywords
) -> stream.Stream():
'''
Given a list of midiTracks, populate either a new stream.Score or inputM21
with a Part for each track.
'''
# environLocal.printDebug(['midi track count', len(midiTracks)])
if inputM21 is None:
s = stream.Score()
else:
s = inputM21
# conductorPart will store common elements such as time sig, key sig
# from the conductor track (or any track without notes).
conductorPart = stream.Part()
for mt in midiTracks:
# not all tracks have notes defined; only creates parts for those
# that do
# environLocal.printDebug(['raw midi tracks', mt])
if mt.hasNotes():
streamPart = stream.Part() # create a part instance for each part
s.insert(0, streamPart)
else:
streamPart = conductorPart
midiTrackToStream(mt,
ticksPerQuarter,
quantizePost,
inputM21=streamPart,
**keywords)
# environLocal.printDebug(['show() conductorTrack elements'])
# if we have time sig/key sig/tempo elements, add to each part
for e in conductorPart.getElementsByClass(
('TimeSignature', 'KeySignature', 'MetronomeMark')):
for i, p in enumerate(s.getElementsByClass('Stream')):
# create a deepcopy of the element so a flat does not cause
# multiple references of the same
eventCopy = copy.deepcopy(e)
if 'TempoIndication' in eventCopy.classes and i != 0:
eventCopy.style.hideObjectOnPrint = True
eventCopy.numberImplicit = True
p.insert(conductorPart.elementOffset(e), eventCopy)
return s
def streamToMidiFile(
inputM21: stream.Stream,
addStartDelay: bool = False,
) -> 'music21.midi.MidiFile':
# noinspection PyShadowingNames
'''
Converts a Stream hierarchy into a :class:`~music21.midi.MidiFile` object.
>>> s = stream.Stream()
>>> n = note.Note('g#')
>>> n.quarterLength = 0.5
>>> s.repeatAppend(n, 4)
>>> mf = midi.translate.streamToMidiFile(s)
>>> mf.tracks[0].index # Track 0: conductor track
0
>>> len(mf.tracks[1].events) # Track 1: music track
22
From here, you can call mf.writestr() to get the actual file info.
>>> sc = scale.PhrygianScale('g')
>>> s = stream.Stream()
>>> x=[s.append(note.Note(sc.pitchFromDegree(i % 11), quarterLength=0.25)) for i in range(60)]
>>> mf = midi.translate.streamToMidiFile(s)
>>> #_DOCS_SHOW mf.open('/Volumes/disc/_scratch/midi.mid', 'wb')
>>> #_DOCS_SHOW mf.write()
>>> #_DOCS_SHOW mf.close()
'''
from music21 import midi as midiModule
s = inputM21
midiTracks = streamHierarchyToMidiTracks(s, addStartDelay=addStartDelay)
# may need to update channel information
mf = midiModule.MidiFile()
mf.tracks = midiTracks
mf.ticksPerQuarterNote = defaults.ticksPerQuarter
return mf
def midiFilePathToStream(
filePath,
inputM21=None,
**keywords
):
'''
Used by music21.converter:
Take in a file path (name of a file on disk) and using `midiFileToStream`,
return a :class:`~music21.stream.Score` object (or if inputM21 is passed in,
use that object instead).
Keywords to control quantization:
`quantizePost` controls whether to quantize the output. (Default: True)
`quarterLengthDivisors` allows for overriding the default quantization units
in defaults.quantizationQuarterLengthDivisors. (Default: (4, 3)).
>>> sfp = common.getSourceFilePath() #_DOCS_HIDE
>>> fp = str(sfp / 'midi' / 'testPrimitive' / 'test05.mid') #_DOCS_HIDE
>>> #_DOCS_SHOW fp = '/Users/test/music21/midi/testPrimitive/test05.mid'
>>> streamScore = midi.translate.midiFilePathToStream(fp)
>>> streamScore
<music21.stream.Score ...>
'''
from music21 import midi as midiModule
mf = midiModule.MidiFile()
mf.open(filePath)
mf.read()
mf.close()
return midiFileToStream(mf, inputM21, **keywords)
def midiAsciiStringToBinaryString(
midiFormat=1,
ticksPerQuarterNote=960,
tracksEventsList=None
) -> bytes:
r'''
Convert Ascii midi data to a bytes object (formerly binary midi string).
tracksEventsList contains a list of tracks which contain also a list of events.
asciiMidiEventList = ['0 90 27 66', '0 90 3e 60', '3840 80 27 00', '0 80 3e 00']
The format of one event is : 'aa bb cc dd'::
aa = delta time to last event (integer)
bb = Midi event type
cc = Note number (hex)
dd = Velocity (integer)
Example:
>>> asciiMidiEventList = []
>>> asciiMidiEventList.append('0 90 31 15')
>>> midiTrack = []
>>> midiTrack.append(asciiMidiEventList)
>>> midiBinaryBytes = midi.translate.midiAsciiStringToBinaryString(tracksEventsList=midiTrack)
>>> midiBinaryBytes
b'MThd\x00\x00\x00\x06\x00\x01\x00\x01\x03\xc0MTrk\x00\x00\x00\x04\x00\x901\x0f'
Note that the name is from pre-Python 3. There is now in fact nothing called a "binary string"
it is in fact a bytes object.
'''
from music21 import midi as midiModule
mf = midiModule.MidiFile()
numTracks = len(tracksEventsList)
if numTracks == 1:
mf.format = 1
else:
mf.format = midiFormat
mf.ticksPerQuarterNote = ticksPerQuarterNote
if tracksEventsList is not None:
for i in range(numTracks):
trk = midiModule.MidiTrack(i) # sets the MidiTrack index parameters
for j in tracksEventsList[i]:
me = midiModule.MidiEvent(trk)
dt = midiModule.DeltaTime(trk)
chunk_event_param = str(j).split(' ')
dt.channel = i + 1
dt.time = int(chunk_event_param[0])
me.channel = i + 1
me.pitch = int(chunk_event_param[2], 16)
me.velocity = int(chunk_event_param[3])
valid = False
if chunk_event_param[1] != 'FF':
if list(chunk_event_param[1])[0] == '8':
me.type = midiModule.ChannelVoiceMessages.NOTE_OFF
valid = True
elif list(chunk_event_param[1])[0] == '9':
valid = True
me.type = midiModule.ChannelVoiceMessages.NOTE_ON
else:
environLocal.warn(f'Unsupported midi event: 0x{chunk_event_param[1]}')
else:
environLocal.warn(f'Unsupported meta event: 0x{chunk_event_param[1]}')
if valid:
trk.events.append(dt)
trk.events.append(me)
mf.tracks.append(trk)
midiBinStr = b''
midiBinStr = midiBinStr + mf.writestr()
return midiBinStr
def midiStringToStream(strData, **keywords):
r'''
Convert a string of binary midi data to a Music21 stream.Score object.
Keywords to control quantization:
`quantizePost` controls whether to quantize the output. (Default: True)
`quarterLengthDivisors` allows for overriding the default quantization units
in defaults.quantizationQuarterLengthDivisors. (Default: (4, 3)).
N.B. -- this has been somewhat problematic, so use at your own risk.
>>> midiBinStr = (b'MThd\x00\x00\x00\x06\x00\x01\x00\x01\x04\x00'
... + b'MTrk\x00\x00\x00\x16\x00\xff\x03\x00\x00\xe0\x00@\x00'
... + b'\x90CZ\x88\x00\x80C\x00\x88\x00\xff/\x00')
>>> s = midi.translate.midiStringToStream(midiBinStr)
>>> s.show('text')
{0.0} <music21.stream.Part ...>
{0.0} <music21.note.Note G>
'''
from music21 import midi as midiModule
mf = midiModule.MidiFile()
# do not need to call open or close on MidiFile instance
mf.readstr(strData)
return midiFileToStream(mf, **keywords)
def midiFileToStream(
mf: 'music21.midi.MidiFile',
inputM21=None,
quantizePost=True,
**keywords
):
# noinspection PyShadowingNames
'''
Note: this is NOT the normal way to read a MIDI file. The best way is generally:
score = converter.parse('path/to/file.mid')
Convert a :class:`~music21.midi.MidiFile` object to a
:class:`~music21.stream.Stream` object.
The `inputM21` object can specify an existing Stream (or Stream subclass) to fill.
Keywords to control quantization:
`quantizePost` controls whether to quantize the output. (Default: True)
`quarterLengthDivisors` allows for overriding the default quantization units
in defaults.quantizationQuarterLengthDivisors. (Default: (4, 3)).
>>> import os
>>> fp = common.getSourceFilePath() / 'midi' / 'testPrimitive' / 'test05.mid'
>>> mf = midi.MidiFile()
>>> mf.open(fp)
>>> mf.read()
>>> mf.close()
>>> len(mf.tracks)
1
>>> s = midi.translate.midiFileToStream(mf)
>>> s
<music21.stream.Score ...>
>>> len(s.flat.notesAndRests)
11
'''
# environLocal.printDebug(['got midi file: tracks:', len(mf.tracks)])
if inputM21 is None:
s = stream.Score()
else:
s = inputM21
if not mf.tracks:
raise exceptions21.StreamException('no tracks are defined in this MIDI file.')
if 'quantizePost' in keywords:
quantizePost = keywords.pop('quantizePost')
# create a stream for each tracks
# may need to check if tracks actually have event data
midiTracksToStreams(mf.tracks,
ticksPerQuarter=mf.ticksPerQuarterNote,
quantizePost=quantizePost,
inputM21=s,
**keywords)
# s._setMidiTracks(mf.tracks, mf.ticksPerQuarterNote)
return s
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
def testMidiAsciiStringToBinaryString(self):
from binascii import a2b_hex
asciiMidiEventList = []
asciiMidiEventList.append('0 90 1f 15')
# asciiMidiEventList.append('3840 80 1f 15')
# asciiMidiEventList.append('0 b0 7b 00')
# asciiMidiEventList = ['0 90 27 66', '3840 80 27 00']
# asciiMidiEventList = ['0 90 27 66', '0 90 3e 60', '3840 80 27 00', '0 80 3e 00',
# '0 90 3b 60', '960 80 3b 00', '0 90 41 60', '960 80 41 00', '0 90 3e 60',
# '1920 80 3e 00', '0 b0 7b 00', '0 90 24 60', '3840 80 24 00', '0 b0 7b 00']
# asciiMidiEventList = ['0 90 27 66', '0 90 3e 60', '3840 80 27 00', '0 80 3e 00',
# '0 90 3b 60', '960 80 3b 00', '0 90 41 60', '960 80 41 00',
# '0 90 3e 60', '1920 80 3e 00', '0 90 24 60', '3840 80 24 00']
midiTrack = []
midiTrack.append(asciiMidiEventList)
# midiTrack.append(asciiMidiEventList)
# midiTrack.append(asciiMidiEventList)
midiBinStr = midiAsciiStringToBinaryString(tracksEventsList=midiTrack)
self.assertEqual(midiBinStr,
b'MThd' + a2b_hex('000000060001000103c0')
+ b'MTrk' + a2b_hex('0000000400901f0f'))
def testNote(self):
from music21 import midi as midiModule
n1 = note.Note('A4')
n1.quarterLength = 2.0
eventList = noteToMidiEvents(n1)
self.assertEqual(len(eventList), 4)
self.assertIsInstance(eventList[0], midiModule.DeltaTime)
self.assertIsInstance(eventList[2], midiModule.DeltaTime)
# translate eventList back to a note
n2 = midiEventsToNote(eventList)
self.assertEqual(n2.pitch.nameWithOctave, 'A4')
self.assertEqual(n2.quarterLength, 2.0)
def testStripTies(self):
from music21.midi import ChannelVoiceMessages
from music21 import tie
# Stream without measures
s = stream.Stream()
n = note.Note('C4', quarterLength=1.0)
n.tie = tie.Tie('start')
n2 = note.Note('C4', quarterLength=1.0)
n2.tie = tie.Tie('stop')
n3 = note.Note('C4', quarterLength=1.0)
n4 = note.Note('C4', quarterLength=1.0)
s.append([n, n2, n3, n4])
trk = streamHierarchyToMidiTracks(s)[1]
mt1noteOnOffEventTypes = [event.type for event in trk.events if event.type in (
ChannelVoiceMessages.NOTE_ON, ChannelVoiceMessages.NOTE_OFF)]
# Expected result: three pairs of NOTE_ON, NOTE_OFF messages
# https://github.com/cuthbertLab/music21/issues/266
self.assertListEqual(mt1noteOnOffEventTypes,
[ChannelVoiceMessages.NOTE_ON, ChannelVoiceMessages.NOTE_OFF] * 3)
# Stream with measures
s.makeMeasures(inPlace=True)
trk = streamHierarchyToMidiTracks(s)[1]
mt2noteOnOffEventTypes = [event.type for event in trk.events if event.type in (
ChannelVoiceMessages.NOTE_ON, ChannelVoiceMessages.NOTE_OFF)]
self.assertListEqual(mt2noteOnOffEventTypes,
[ChannelVoiceMessages.NOTE_ON, ChannelVoiceMessages.NOTE_OFF] * 3)
def testTimeSignature(self):
from music21 import meter
n = note.Note()
n.quarterLength = 0.5
s = stream.Stream()
for i in range(20):
s.append(copy.deepcopy(n))
s.insert(0, meter.TimeSignature('3/4'))
s.insert(3, meter.TimeSignature('5/4'))
s.insert(8, meter.TimeSignature('2/4'))
mt = streamHierarchyToMidiTracks(s)[0]
# self.assertEqual(str(mt.events), match)
self.assertEqual(len(mt.events), 10)
# s.show('midi')
# get and compare just the conductor tracks
# mtAlt = streamHierarchyToMidiTracks(s.getElementsByClass('TimeSignature').stream())[0]
conductorEvents = repr(mt.events)
match = '''[<MidiEvent DeltaTime, t=0, track=0, channel=None>,
<MidiEvent SET_TEMPO, t=0, track=0, channel=None, data=b'\\x07\\xa1 '>,
<MidiEvent DeltaTime, t=0, track=0, channel=None>,
<MidiEvent TIME_SIGNATURE, t=0, track=0, channel=None, data=b'\\x03\\x02\\x18\\x08'>,
<MidiEvent DeltaTime, t=3072, track=0, channel=None>,
<MidiEvent TIME_SIGNATURE, t=0, track=0, channel=None, data=b'\\x05\\x02\\x18\\x08'>,
<MidiEvent DeltaTime, t=5120, track=0, channel=None>,
<MidiEvent TIME_SIGNATURE, t=0, track=0, channel=None, data=b'\\x02\\x02\\x18\\x08'>,
<MidiEvent DeltaTime, t=1024, track=0, channel=None>,
<MidiEvent END_OF_TRACK, t=0, track=0, channel=None, data=b''>]'''
self.assertTrue(common.whitespaceEqual(conductorEvents, match), conductorEvents)
def testKeySignature(self):
from music21 import meter, key
n = note.Note()
n.quarterLength = 0.5
s = stream.Stream()
for i in range(20):
s.append(copy.deepcopy(n))
s.insert(0, meter.TimeSignature('3/4'))
s.insert(3, meter.TimeSignature('5/4'))
s.insert(8, meter.TimeSignature('2/4'))
s.insert(0, key.KeySignature(4))
s.insert(3, key.KeySignature(-5))
s.insert(8, key.KeySignature(6))
conductor = streamHierarchyToMidiTracks(s)[0]
self.assertEqual(len(conductor.events), 16)
# s.show('midi')
def testChannelAllocation(self):
# test instrument assignments
from music21 import instrument
from music21.midi import translate
iList = [instrument.Harpsichord,
instrument.Viola,
instrument.ElectricGuitar,
instrument.Flute]
iObjs = []
s = stream.Score()
for i, instClass in enumerate(iList):
p = stream.Part()
inst = instClass()
iObjs.append(inst)
p.insert(0, inst) # must call instrument to create instance
p.append(note.Note('C#'))
s.insert(0, p)
channelByInstrument, channelsDynamic = translate.channelInstrumentData(s)
self.assertEqual(channelByInstrument.keys(), set(inst.midiProgram for inst in iObjs))
self.assertSetEqual(set(channelByInstrument.values()), {1, 2, 3, 4})
self.assertListEqual(channelsDynamic, [5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16])
def testPacketStorage(self):
# test instrument assignments
from music21 import instrument
from music21.midi import translate
iList = [None, # conductor track
instrument.Harpsichord,
instrument.Viola,
instrument.ElectricGuitar,
instrument.Flute,
None]
iObjs = []
substreamList = []
for i, instClass in enumerate(iList):
p = stream.Part()
if instClass is not None:
inst = instClass()
iObjs.append(inst)
p.insert(0, inst) # must call instrument to create instance
if i != 0:
p.append(note.Note('C#'))
substreamList.append(p)
packetStorage = translate.packetStorageFromSubstreamList(substreamList, addStartDelay=False)
self.assertIsInstance(packetStorage, dict)
self.assertEqual(list(packetStorage.keys()), [0, 1, 2, 3, 4, 5])
harpsPacket = packetStorage[1]
self.assertIsInstance(harpsPacket, dict)
self.assertSetEqual(set(harpsPacket.keys()),
{'rawPackets', 'initInstrument'})
self.assertIs(harpsPacket['initInstrument'], iObjs[0])
self.assertIsInstance(harpsPacket['rawPackets'], list)
self.assertTrue(harpsPacket['rawPackets'])
self.assertIsInstance(harpsPacket['rawPackets'][0], dict)
channelInfo = {
iObjs[0].midiProgram: 1,
iObjs[1].midiProgram: 2,
iObjs[2].midiProgram: 3,
iObjs[3].midiProgram: 4,
None: 5,
}
translate.updatePacketStorageWithChannelInfo(packetStorage, channelInfo)
self.assertSetEqual(set(harpsPacket.keys()),
{'rawPackets', 'initInstrument', 'initChannel'})
self.assertEqual(harpsPacket['initChannel'], 1)
self.assertEqual(harpsPacket['rawPackets'][-1]['initChannel'], 1)
def testAnacrusisTiming(self):
from music21 import corpus
s = corpus.parse('bach/bwv103.6')
# get just the soprano part
soprano = s.parts['soprano']
mts = streamHierarchyToMidiTracks(soprano)[1] # get one
# first note-on is not delayed, even w anacrusis
match = '''
[<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=1, data=b'Soprano'>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent PITCH_BEND, t=0, track=1, channel=1, parameter1=0, parameter2=64>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>]'''
self.maxDiff = None
found = str(mts.events[:5])
self.assertTrue(common.whitespaceEqual(found, match), found)
# first note-on is not delayed, even w anacrusis
match = '''
[<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=1, data=b'Alto'>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent PITCH_BEND, t=0, track=1, channel=1, parameter1=0, parameter2=64>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent PROGRAM_CHANGE, t=0, track=1, channel=1, data=0>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=62, velocity=90>]'''
alto = s.parts['alto']
mta = streamHierarchyToMidiTracks(alto)[1]
found = str(mta.events[:8])
self.assertTrue(common.whitespaceEqual(found, match), found)
# try streams to midi tracks
# get just the soprano part
soprano = s.parts['soprano']
mtList = streamHierarchyToMidiTracks(soprano)
self.assertEqual(len(mtList), 2)
# it's the same as before
match = '''[<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=1, data=b'Soprano'>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent PITCH_BEND, t=0, track=1, channel=1, parameter1=0, parameter2=64>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent PROGRAM_CHANGE, t=0, track=1, channel=1, data=0>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=66, velocity=90>,
<MidiEvent DeltaTime, t=512, track=1, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=1, channel=1, pitch=66, velocity=0>]'''
found = str(mtList[1].events[:10])
self.assertTrue(common.whitespaceEqual(found, match), found)
def testMidiProgramChangeA(self):
from music21 import instrument
p1 = stream.Part()
p1.append(instrument.Dulcimer())
p1.repeatAppend(note.Note('g6', quarterLength=1.5), 4)
p2 = stream.Part()
p2.append(instrument.Tuba())
p2.repeatAppend(note.Note('c1', quarterLength=2), 2)
p3 = stream.Part()
p3.append(instrument.TubularBells())
p3.repeatAppend(note.Note('e4', quarterLength=1), 4)
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
s.insert(0, p3)
unused_mts = streamHierarchyToMidiTracks(s)
# p1.show()
# s.show('midi')
def testMidiProgramChangeB(self):
from music21 import instrument, scale
import random
iList = [instrument.Harpsichord,
instrument.Clavichord, instrument.Accordion,
instrument.Celesta, instrument.Contrabass, instrument.Viola,
instrument.Harp, instrument.ElectricGuitar, instrument.Ukulele,
instrument.Banjo, instrument.Piccolo, instrument.AltoSaxophone,
instrument.Trumpet]
sc = scale.MinorScale()
pitches = sc.getPitches('c2', 'c5')
random.shuffle(pitches)
s = stream.Stream()
for i in range(30):
n = note.Note(pitches[i % len(pitches)])
n.quarterLength = 0.5
inst = iList[i % len(iList)]() # call to create instance
s.append(inst)
s.append(n)
unused_mts = streamHierarchyToMidiTracks(s)
# s.show('midi')
def testOverlappedEventsA(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
sFlat = s.flat
mtList = streamHierarchyToMidiTracks(sFlat)
self.assertEqual(len(mtList), 2)
# it's the same as before
match = '''[<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=66, velocity=90>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=61, velocity=90>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=58, velocity=90>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_ON, t=0, track=1, channel=1, pitch=54, velocity=90>,
<MidiEvent DeltaTime, t=1024, track=1, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=1, channel=1, pitch=66, velocity=0>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=1, channel=1, pitch=61, velocity=0>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=1, channel=1, pitch=58, velocity=0>,
<MidiEvent DeltaTime, t=0, track=1, channel=1>,
<MidiEvent NOTE_OFF, t=0, track=1, channel=1, pitch=54, velocity=0>,
<MidiEvent DeltaTime, t=1024, track=1, channel=1>,
<MidiEvent END_OF_TRACK, t=0, track=1, channel=1, data=b''>]'''
results = str(mtList[1].events[-17:])
self.assertTrue(common.whitespaceEqual(results, match), results)
def testOverlappedEventsB(self):
from music21 import scale
import random
sc = scale.MajorScale()
pitches = sc.getPitches('c2', 'c5')
random.shuffle(pitches)
dur = 16
step = 0.5
o = 0
s = stream.Stream()
for p in pitches:
n = note.Note(p)
n.quarterLength = dur - o
s.insert(o, n)
o = o + step
unused_mt = streamHierarchyToMidiTracks(s)[0]
# s.plot('pianoroll')
# s.show('midi')
def testOverlappedEventsC(self):
from music21 import meter, key
s = stream.Stream()
s.insert(key.KeySignature(3))
s.insert(meter.TimeSignature('2/4'))
s.insert(0, note.Note('c'))
n = note.Note('g')
n.pitch.microtone = 25
s.insert(0, n)
c = chord.Chord(['d', 'f', 'a'], type='half')
c.pitches[1].microtone = -50
s.append(c)
pos = s.highestTime
s.insert(pos, note.Note('e'))
s.insert(pos, note.Note('b'))
unused_mt = streamHierarchyToMidiTracks(s)[0]
# s.show('midi')
def testExternalMidiProgramChangeB(self):
from music21 import instrument, scale
iList = [instrument.Harpsichord, instrument.Clavichord, instrument.Accordion,
instrument.Celesta, instrument.Contrabass, instrument.Viola,
instrument.Harp, instrument.ElectricGuitar, instrument.Ukulele,
instrument.Banjo, instrument.Piccolo, instrument.AltoSaxophone,
instrument.Trumpet, instrument.Clarinet, instrument.Flute,
instrument.Violin, instrument.Soprano, instrument.Oboe,
instrument.Tuba, instrument.Sitar, instrument.Ocarina,
instrument.Piano]
sc = scale.MajorScale()
pitches = sc.getPitches('c2', 'c5')
# random.shuffle(pitches)
s = stream.Stream()
for i, p in enumerate(pitches):
n = note.Note(p)
n.quarterLength = 1.5
inst = iList[i]() # call to create instance
s.append(inst)
s.append(n)
unused_mts = streamHierarchyToMidiTracks(s)
# s.show('midi')
def testMicrotonalOutputA(self):
s = stream.Stream()
s.append(note.Note('c4', type='whole'))
s.append(note.Note('c~4', type='whole'))
s.append(note.Note('c#4', type='whole'))
s.append(note.Note('c#~4', type='whole'))
s.append(note.Note('d4', type='whole'))
# mts = streamHierarchyToMidiTracks(s)
s.insert(0, note.Note('g3', quarterLength=10))
unused_mts = streamHierarchyToMidiTracks(s)
def testMicrotonalOutputB(self):
# a two-part stream
from music21.midi import translate
p1 = stream.Part()
p1.append(note.Note('c4', type='whole'))
p1.append(note.Note('c~4', type='whole'))
p1.append(note.Note('c#4', type='whole'))
p1.append(note.Note('c#~4', type='whole'))
p1.append(note.Note('d4', type='whole'))
# mts = translate.streamHierarchyToMidiTracks(s)
p2 = stream.Part()
p2.insert(0, note.Note('g2', quarterLength=20))
# order here matters: this needs to be fixed
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
mts = translate.streamHierarchyToMidiTracks(s)
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[2].getChannels(), [1, 2])
# print(mts)
# s.show('midi')
# recreate with different order
s = stream.Score()
s.insert(0, p2)
s.insert(0, p1)
mts = translate.streamHierarchyToMidiTracks(s)
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[2].getChannels(), [1, 2])
def testInstrumentAssignments(self):
# test instrument assignments
from music21 import instrument
iList = [instrument.Harpsichord,
instrument.Viola,
instrument.ElectricGuitar,
instrument.Flute]
# number of notes, ql, pitch
params = [(8, 1, 'C6'),
(4, 2, 'G3'),
(2, 4, 'E4'),
(6, 1.25, 'C5')]
s = stream.Score()
for i, inst in enumerate(iList):
p = stream.Part()
p.insert(0, inst()) # must call instrument to create instance
number, ql, pitchName = params[i]
for j in range(number):
p.append(note.Note(pitchName, quarterLength=ql))
s.insert(0, p)
# s.show('midi')
mts = streamHierarchyToMidiTracks(s)
# print(mts[0])
self.assertEqual(mts[0].getChannels(), []) # Conductor track
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[2].getChannels(), [2])
self.assertEqual(mts[3].getChannels(), [3])
self.assertEqual(mts[4].getChannels(), [4])
def testMicrotonalOutputD(self):
# test instrument assignments with microtones
from music21 import instrument
from music21.midi import translate
iList = [instrument.Harpsichord,
instrument.Viola,
instrument.ElectricGuitar,
instrument.Flute
]
# number of notes, ql, pitch
params = [(8, 1, ['C6']),
(4, 2, ['G3', 'G~3']),
(2, 4, ['E4', 'E5']),
(6, 1.25, ['C5'])]
s = stream.Score()
for i, inst in enumerate(iList):
p = stream.Part()
p.insert(0, inst()) # must call instrument to create instance
number, ql, pitchNameList = params[i]
for j in range(number):
p.append(note.Note(pitchNameList[j % len(pitchNameList)], quarterLength=ql))
s.insert(0, p)
# s.show('midi')
mts = translate.streamHierarchyToMidiTracks(s)
# print(mts[1])
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[1].getProgramChanges(), [6]) # 6 = GM Harpsichord
self.assertEqual(mts[2].getChannels(), [2, 5])
self.assertEqual(mts[2].getProgramChanges(), [41]) # 41 = GM Viola
self.assertEqual(mts[3].getChannels(), [3, 6])
self.assertEqual(mts[3].getProgramChanges(), [26]) # 26 = GM ElectricGuitar
# print(mts[3])
self.assertEqual(mts[4].getChannels(), [4, 6])
self.assertEqual(mts[4].getProgramChanges(), [73]) # 73 = GM Flute
# s.show('midi')
def testMicrotonalOutputE(self):
from music21 import corpus, interval
s = corpus.parse('bwv66.6')
p1 = s.parts[0]
p2 = copy.deepcopy(p1)
t = interval.Interval(0.5) # half sharp
p2.transpose(t, inPlace=True, classFilterList=('Note', 'Chord'))
post = stream.Score()
post.insert(0, p1)
post.insert(0, p2)
# post.show('midi')
mts = streamHierarchyToMidiTracks(post)
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[1].getProgramChanges(), [0])
self.assertEqual(mts[2].getChannels(), [1, 2])
self.assertEqual(mts[2].getProgramChanges(), [0])
# post.show('midi', app='Logic Express')
def testMicrotonalOutputF(self):
from music21 import corpus, interval
s = corpus.parse('bwv66.6')
p1 = s.parts[0]
p2 = copy.deepcopy(p1)
p3 = copy.deepcopy(p1)
t1 = interval.Interval(12.5) # octave + half sharp
t2 = interval.Interval(-12.25) # octave down minus 1/8th tone
p2.transpose(t1, inPlace=True, classFilterList=('Note', 'Chord'))
p3.transpose(t2, inPlace=True, classFilterList=('Note', 'Chord'))
post = stream.Score()
post.insert(0, p1)
post.insert(0, p2)
post.insert(0, p3)
# post.show('midi')
mts = streamHierarchyToMidiTracks(post)
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[1].getProgramChanges(), [0])
self.assertEqual(mts[2].getChannels(), [1, 2])
self.assertEqual(mts[2].getProgramChanges(), [0])
self.assertEqual(mts[3].getChannels(), [1, 3])
self.assertEqual(mts[3].getProgramChanges(), [0])
# post.show('midi', app='Logic Express')
def testMicrotonalOutputG(self):
from music21 import corpus, interval, instrument
s = corpus.parse('bwv66.6')
p1 = s.parts[0]
p1.remove(p1.getElementsByClass('Instrument')[0])
p2 = copy.deepcopy(p1)
p3 = copy.deepcopy(p1)
t1 = interval.Interval(12.5) # a sharp p4
t2 = interval.Interval(-7.25) # a sharp p4
p2.transpose(t1, inPlace=True, classFilterList=('Note', 'Chord'))
p3.transpose(t2, inPlace=True, classFilterList=('Note', 'Chord'))
post = stream.Score()
p1.insert(0, instrument.Dulcimer())
post.insert(0, p1)
p2.insert(0, instrument.Trumpet())
post.insert(0.125, p2)
p3.insert(0, instrument.ElectricGuitar())
post.insert(0.25, p3)
# post.show('midi')
mts = streamHierarchyToMidiTracks(post)
self.assertEqual(mts[1].getChannels(), [1])
self.assertEqual(mts[1].getProgramChanges(), [15])
self.assertEqual(mts[2].getChannels(), [2, 4])
self.assertEqual(mts[2].getProgramChanges(), [56])
# print(mts[3])
self.assertEqual(mts[3].getChannels(), [3, 5])
self.assertEqual(mts[3].getProgramChanges(), [26])
# post.show('midi')#, app='Logic Express')
def testMidiTempoImportA(self):
from music21 import converter
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
# a simple file created in athenacl
fp = dirLib / 'test10.mid'
s = converter.parse(fp)
mmStream = s.flat.getElementsByClass('MetronomeMark')
self.assertEqual(len(mmStream), 4)
self.assertEqual(mmStream[0].number, 120.0)
self.assertEqual(mmStream[1].number, 110.0)
self.assertEqual(mmStream[2].number, 90.0)
self.assertEqual(mmStream[3].number, 60.0)
fp = dirLib / 'test06.mid'
s = converter.parse(fp)
mmStream = s.flat.getElementsByClass('MetronomeMark')
self.assertEqual(len(mmStream), 1)
self.assertEqual(mmStream[0].number, 120.0)
fp = dirLib / 'test07.mid'
s = converter.parse(fp)
mmStream = s.flat.getElementsByClass('MetronomeMark')
self.assertEqual(len(mmStream), 1)
self.assertEqual(mmStream[0].number, 180.0)
def testMidiTempoImportB(self):
from music21 import converter
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
# a file with three tracks and one conductor track with four tempo marks
fp = dirLib / 'test11.mid'
s = converter.parse(fp)
self.assertEqual(len(s.parts), 3)
# metronome marks propagate to every staff, but are hidden on subsequent staffs
self.assertEqual(
[mm.numberImplicit for mm in s.parts[0].getElementsByClass('MetronomeMark')],
[False, False, False, False]
)
self.assertEqual(
[mm.numberImplicit for mm in s.parts[1].getElementsByClass('MetronomeMark')],
[True, True, True, True]
)
self.assertEqual(
[mm.numberImplicit for mm in s.parts[2].getElementsByClass('MetronomeMark')],
[True, True, True, True]
)
def testMidiExportConductorA(self):
'''Export conductor data to MIDI conductor track.'''
from music21 import meter, tempo
p1 = stream.Part()
p1.repeatAppend(note.Note('c4'), 12)
p1.insert(0, meter.TimeSignature('3/4'))
p1.insert(0, tempo.MetronomeMark(number=90))
p1.insert(6, tempo.MetronomeMark(number=30))
p2 = stream.Part()
p2.repeatAppend(note.Note('g4'), 12)
p2.insert(6, meter.TimeSignature('6/4'))
s = stream.Score()
s.insert([0, p1, 0, p2])
mts = streamHierarchyToMidiTracks(s)
self.assertEqual(len(mts), 3)
# Tempo and time signature should be in conductor track only
condTrkRepr = repr(mts[0].events)
self.assertEqual(condTrkRepr.count('SET_TEMPO'), 2)
self.assertEqual(condTrkRepr.count('TIME_SIGNATURE'), 2)
musicTrkRepr = repr(mts[1].events)
self.assertEqual(musicTrkRepr.find('SET_TEMPO'), -1)
self.assertEqual(musicTrkRepr.find('TIME_SIGNATURE'), -1)
# s.show('midi')
# s.show('midi', app='Logic Express')
def testMidiExportConductorB(self):
from music21 import tempo, corpus
s = corpus.parse('bwv66.6')
s.insert(0, tempo.MetronomeMark(number=240))
s.insert(4, tempo.MetronomeMark(number=30))
s.insert(6, tempo.MetronomeMark(number=120))
s.insert(8, tempo.MetronomeMark(number=90))
s.insert(12, tempo.MetronomeMark(number=360))
# s.show('midi')
mts = streamHierarchyToMidiTracks(s)
condTrkRepr = repr(mts[0].events)
self.assertEqual(condTrkRepr.count('SET_TEMPO'), 5)
musicTrkRepr = repr(mts[1].events)
self.assertEqual(musicTrkRepr.count('SET_TEMPO'), 0)
def testMidiExportConductorC(self):
from music21 import tempo
minTempo = 60
maxTempo = 600
period = 50
s = stream.Stream()
for i in range(100):
scalar = (math.sin(i * (math.pi * 2) / period) + 1) * 0.5
n = ((maxTempo - minTempo) * scalar) + minTempo
s.append(tempo.MetronomeMark(number=n))
s.append(note.Note('g3'))
mts = streamHierarchyToMidiTracks(s)
self.assertEqual(len(mts), 2)
mtsRepr = repr(mts[0].events)
self.assertEqual(mtsRepr.count('SET_TEMPO'), 100)
def testMidiExportConductorD(self):
'''120 bpm and 4/4 are supplied by default.'''
s = stream.Stream()
s.insert(note.Note())
mts = streamHierarchyToMidiTracks(s)
self.assertEqual(len(mts), 2)
condTrkRepr = repr(mts[0].events)
self.assertEqual(condTrkRepr.count('SET_TEMPO'), 1)
self.assertEqual(condTrkRepr.count('TIME_SIGNATURE'), 1)
# No pitch bend events in conductor track
self.assertEqual(condTrkRepr.count('PITCH_BEND'), 0)
def testMidiExportConductorE(self):
'''The conductor only gets the first element at an offset.'''
from music21 import converter, tempo, key
s = stream.Stream()
p1 = converter.parse('tinynotation: c1')
p2 = converter.parse('tinynotation: d2 d2')
p1.insert(0, tempo.MetronomeMark(number=44))
p2.insert(0, tempo.MetronomeMark(number=144))
p2.insert(2, key.KeySignature(-5))
s.insert(0, p1)
s.insert(0, p2)
conductor = conductorStream(s)
tempos = conductor.getElementsByClass('MetronomeMark')
keySignatures = conductor.getElementsByClass('KeySignature')
self.assertEqual(len(tempos), 1)
self.assertEqual(tempos[0].number, 44)
self.assertEqual(len(keySignatures), 1)
def testMidiExportVelocityA(self):
s = stream.Stream()
for i in range(10):
# print(i)
n = note.Note('c3')
n.volume.velocityScalar = i / 10
n.volume.velocityIsRelative = False
s.append(n)
# s.show('midi')
mts = streamHierarchyToMidiTracks(s)
mtsRepr = repr(mts[1].events)
self.assertEqual(mtsRepr.count('velocity=114'), 1)
self.assertEqual(mtsRepr.count('velocity=13'), 1)
def testMidiExportVelocityB(self):
import random
from music21 import volume
s1 = stream.Stream()
shift = [0, 6, 12]
amps = [(x / 10. + 0.4) for x in range(6)]
amps = amps + list(reversed(amps))
qlList = [1.5] * 6 + [1] * 8 + [2] * 6 + [1.5] * 8 + [1] * 4
for j, ql in enumerate(qlList):
if random.random() > 0.6:
c = note.Rest()
else:
c = chord.Chord(['c3', 'd-4', 'g5'])
vChord = []
for i, unused_cSub in enumerate(c):
v = volume.Volume()
v.velocityScalar = amps[(j + shift[i]) % len(amps)]
v.velocityIsRelative = False
vChord.append(v)
c.volume = vChord # can set to list
c.duration.quarterLength = ql
s1.append(c)
s2 = stream.Stream()
random.shuffle(qlList)
random.shuffle(amps)
for j, ql in enumerate(qlList):
n = note.Note(random.choice(['f#2', 'f#2', 'e-2']))
n.duration.quarterLength = ql
n.volume.velocityScalar = amps[j % len(amps)]
s2.append(n)
s = stream.Score()
s.insert(0, s1)
s.insert(0, s2)
mts = streamHierarchyToMidiTracks(s)
mtsRepr = repr(mts[0].events) + repr(mts[1].events)
self.assertGreater(mtsRepr.count('velocity=51'), 2)
self.assertGreater(mtsRepr.count('velocity=102'), 2)
# s.show('midi')
def testImportTruncationProblemA(self):
from music21 import converter
# specialized problem of not importing last notes
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
fp = dirLib / 'test12.mid'
s = converter.parse(fp)
self.assertEqual(len(s.parts[0].flat.notes), 3)
self.assertEqual(len(s.parts[1].flat.notes), 3)
self.assertEqual(len(s.parts[2].flat.notes), 3)
self.assertEqual(len(s.parts[3].flat.notes), 3)
# s.show('t')
# s.show('midi')
def testImportChordVoiceA(self):
# looking at cases where notes appear to be chord but
# are better seen as voices
from music21 import converter
# specialized problem of not importing last notes
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
fp = dirLib / 'test13.mid'
s = converter.parse(fp)
# s.show('t')
self.assertEqual(len(s.flat.notes), 7)
# s.show('midi')
fp = dirLib / 'test14.mid'
s = converter.parse(fp)
# three chords will be created, as well as two voices
self.assertEqual(len(s.flat.getElementsByClass('Chord')), 3)
self.assertEqual(len(s.parts[0].voices), 2)
def testImportChordsA(self):
from music21 import converter
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
fp = dirLib / 'test05.mid'
# a simple file created in athenacl
s = converter.parse(fp)
# s.show('t')
self.assertEqual(len(s.flat.getElementsByClass('Chord')), 4)
def testMidiEventsImported(self):
self.maxDiff = None
from music21 import corpus
def procCompare(mf_inner, match_inner):
triples = []
for i in range(2):
for j in range(0, len(mf_inner.tracks[i].events), 2):
d = mf_inner.tracks[i].events[j] # delta
e = mf_inner.tracks[i].events[j + 1] # events
triples.append((d.time, e.type.name, e.pitch))
self.assertEqual(triples, match_inner)
s = corpus.parse('bach/bwv66.6')
part = s.parts[0].measures(6, 9) # last measures
# part.show('musicxml')
# part.show('midi')
mf = streamToMidiFile(part)
match = [(0, 'KEY_SIGNATURE', None), # Conductor track
(0, 'TIME_SIGNATURE', None),
(0, 'SET_TEMPO', None),
(1024, 'END_OF_TRACK', None),
(0, 'SEQUENCE_TRACK_NAME', None), # Music track
(0, 'PROGRAM_CHANGE', None),
(0, 'PITCH_BEND', None),
(0, 'PROGRAM_CHANGE', None),
(0, 'NOTE_ON', 69),
(1024, 'NOTE_OFF', 69),
(0, 'NOTE_ON', 71),
(1024, 'NOTE_OFF', 71),
(0, 'NOTE_ON', 73),
(1024, 'NOTE_OFF', 73),
(0, 'NOTE_ON', 69),
(1024, 'NOTE_OFF', 69),
(0, 'NOTE_ON', 68),
(1024, 'NOTE_OFF', 68),
(0, 'NOTE_ON', 66),
(1024, 'NOTE_OFF', 66),
(0, 'NOTE_ON', 68),
(2048, 'NOTE_OFF', 68),
(0, 'NOTE_ON', 66),
(2048, 'NOTE_OFF', 66),
(0, 'NOTE_ON', 66),
(1024, 'NOTE_OFF', 66),
(0, 'NOTE_ON', 66),
(2048, 'NOTE_OFF', 66),
(0, 'NOTE_ON', 66),
(512, 'NOTE_OFF', 66),
(0, 'NOTE_ON', 65),
(512, 'NOTE_OFF', 65),
(0, 'NOTE_ON', 66),
(1024, 'NOTE_OFF', 66),
(1024, 'END_OF_TRACK', None)]
procCompare(mf, match)
def testMidiInstrumentToStream(self):
from music21 import converter
from music21 import instrument
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.transposing01)
mf = streamToMidiFile(s)
out = midiFileToStream(mf)
instruments = out.parts[0].getElementsByClass('Instrument')
self.assertIsInstance(instruments[0], instrument.Oboe)
self.assertEqual(instruments[0].quarterLength, 0)
# Unrecognized instrument 'a'
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
fp = dirLib / 'test15.mid'
s2 = converter.parse(fp)
self.assertEqual(s2.parts[0].partName, 'a')
def testImportZeroDurationNote(self):
'''
Musescore places zero duration notes in multiple voice scenarios
to represent double stemmed notes. Avoid false positives for extra voices.
https://github.com/cuthbertLab/music21/issues/600
'''
from music21 import converter
dirLib = common.getSourceFilePath() / 'midi' / 'testPrimitive'
fp = dirLib / 'test16.mid'
s = converter.parse(fp)
self.assertEqual(len(s.parts[0].voices), 2)
els = s.parts[0].flat.getElementsByOffset(0.5)
self.assertSequenceEqual([e.duration.quarterLength for e in els], [0, 1])
def testRepeatsExpanded(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.repeatBracketsA)
num_notes_before = len(s.flat.notes)
prepared = prepareStreamForMidi(s)
num_notes_after = len(prepared.flat.notes)
self.assertGreater(num_notes_after, num_notes_before)
def testNullTerminatedInstrumentName(self):
'''
MuseScore currently writes null bytes at the end of instrument names.
https://musescore.org/en/node/310158
'''
from music21 import instrument
from music21 import midi as midiModule
event = midiModule.MidiEvent()
event.data = bytes('Piccolo\x00', 'utf-8')
i = midiEventsToInstrument(event)
self.assertIsInstance(i, instrument.Piccolo)
# test that nothing was broken.
event.data = bytes('Flute', 'utf-8')
i = midiEventsToInstrument(event)
self.assertIsInstance(i, instrument.Flute)
def testLousyInstrumentName(self):
from music21 import midi as midiModule
lousyNames = (' ', 'Instrument 20', 'Instrument', 'Inst 2', 'instrument')
for name in lousyNames:
with self.subTest(name=name):
event = midiModule.MidiEvent()
event.data = bytes(name, 'utf-8')
event.type = midiModule.MetaEvents.INSTRUMENT_NAME
i = midiEventsToInstrument(event)
self.assertIsNone(i.instrumentName)
def testConductorStream(self):
s = stream.Stream()
p = stream.Stream()
p.priority = -2
m = stream.Stream()
m.append(note.Note('C4'))
p.append(m)
s.insert(0, p)
conductor = conductorStream(s)
self.assertEqual(conductor.priority, -3)
def testRestsMadeInVoice(self):
from music21 import converter
fp = common.getSourceFilePath() / 'midi' / 'testPrimitive' / 'test17.mid'
inn = converter.parse(fp)
numRests = len(inn.parts[1].voices[0].getElementsByClass('Rest'))
self.assertEqual(numRests, 2)
# ------------------------------------------------------------------------------
_DOC_ORDER = [streamToMidiFile, midiFileToStream]
if __name__ == '__main__':
import music21
music21.mainTest(Test) # , runTest='testConductorStream')
| 36.293613 | 100 | 0.60827 |
5212092cef34f4cde9cd1fe8d37ad10a8854065b
| 355 |
py
|
Python
|
Practice/Beginner/CATSDOGS.py
|
ganeshkumarm1/CodeChef-Solutions
|
de94a5d06cc1fcfda42bce06e2918c3add81a999
|
[
"MIT"
] | null | null | null |
Practice/Beginner/CATSDOGS.py
|
ganeshkumarm1/CodeChef-Solutions
|
de94a5d06cc1fcfda42bce06e2918c3add81a999
|
[
"MIT"
] | null | null | null |
Practice/Beginner/CATSDOGS.py
|
ganeshkumarm1/CodeChef-Solutions
|
de94a5d06cc1fcfda42bce06e2918c3add81a999
|
[
"MIT"
] | null | null | null |
t = int(input())
for i in range(t):
cats, dogs, legs = list(map(int, input().split()))
max_legs = 4 * (cats + dogs)
min_legs = 4 * dogs if cats <= 2 * dogs else 4 * (cats - dogs)
if legs % 4 != 0:
print("no")
else:
if min_legs <= legs <= max_legs:
print("yes")
else:
print("no")
| 20.882353 | 66 | 0.470423 |
481355fe9f9d6345b43e6ce75b1f1c817fb22842
| 891 |
py
|
Python
|
examples/slash_commands/old.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 290 |
2021-11-03T12:33:16.000Z
|
2022-03-31T19:30:19.000Z
|
examples/slash_commands/old.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 200 |
2021-11-03T10:41:41.000Z
|
2022-03-31T08:13:11.000Z
|
examples/slash_commands/old.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 118 |
2021-11-03T18:27:09.000Z
|
2022-03-25T22:00:45.000Z
|
"""
An example of old-style options.
Not the most convenient syntax.
"""
import disnake
from disnake.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned)
@bot.slash_command(
name="slash_command",
description="A Simple Slash Command",
options=[
disnake.Option("string", description="A string to send", required=True),
disnake.Option(
"channel", description="The destination channel", type=disnake.OptionType.channel
),
disnake.Option(
"number", description="The number of repetitions", type=disnake.OptionType.integer
),
],
)
async def command(inter, string, channel=None, number=1):
channel = channel or inter.channel
await inter.response.send_message(
f"Sending {string} {number}x to {channel.mention}", ephemeral=True
)
await channel.send(string * number)
| 29.7 | 94 | 0.682379 |
05e209388b52e73487a350a60c000604928dae0a
| 30,242 |
py
|
Python
|
src/gluonnlp/data/glue.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | null | null | null |
src/gluonnlp/data/glue.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | null | null | null |
src/gluonnlp/data/glue.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | 3 |
2021-03-12T04:41:00.000Z
|
2021-03-12T04:41:24.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""GLUEBenchmark corpora."""
__all__ = ['GlueCoLA', 'GlueSST2', 'GlueSTSB', 'GlueQQP', 'GlueRTE', 'GlueMNLI',
'GlueQNLI', 'GlueWNLI', 'GlueMRPC']
import zipfile
import os
import io
from mxnet.gluon.utils import download, check_sha1, _get_repo_file_url
from .dataset import TSVDataset
from .registry import register
from ..base import get_home_dir
class _GlueDataset(TSVDataset):
def __init__(self, root, data_file, **kwargs):
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
segment, zip_hash, data_hash = data_file
self._root = root
filename = os.path.join(self._root, '%s.tsv' % segment)
self._get_data(segment, zip_hash, data_hash, filename)
super(_GlueDataset, self).__init__(filename, **kwargs)
def _get_data(self, segment, zip_hash, data_hash, filename):
data_filename = '%s-%s.zip' % (segment, data_hash[:8])
if not os.path.exists(filename) or not check_sha1(filename, data_hash):
download(_get_repo_file_url(self._repo_dir(), data_filename),
path=self._root, sha1_hash=zip_hash)
# unzip
downloaded_path = os.path.join(self._root, data_filename)
with zipfile.ZipFile(downloaded_path, 'r') as zf:
# skip dir structures in the zip
for zip_info in zf.infolist():
if zip_info.filename[-1] == '/':
continue
zip_info.filename = os.path.basename(zip_info.filename)
zf.extract(zip_info, self._root)
def _repo_dir(self):
raise NotImplementedError
@register(segment=['train', 'dev', 'test'])
class GlueCoLA(_GlueDataset):
"""The Corpus of Linguistic Acceptability (Warstadt et al., 2018) consists of English
acceptability judgments drawn from books and journal articles on linguistic theory.
Each example is a sequence of words annotated with whether it is a grammatical
English sentence.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_cola'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> cola_dev = gluonnlp.data.GlueCoLA('dev', root='./datasets/cola')
-etc-
>>> len(cola_dev)
1043
>>> len(cola_dev[0])
2
>>> cola_dev[0]
['The sailors rode the breeze clear of the rocks.', '1']
>>> cola_test = gluonnlp.data.GlueCoLA('test', root='./datasets/cola')
-etc-
>>> len(cola_test)
1063
>>> len(cola_test[0])
1
>>> cola_test[0]
['Bill whistled past the house.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_cola'),
return_all_fields=False):
self._data_file = {'train': ('train', '662227ed4d98bb96b3495234b650e37826a5ef72',
'7760a9c4b1fb05f6d003475cc7bb0d0118875190'),
'dev': ('dev', '6f3f5252b004eab187bf22ab5b0af31e739d3a3f',
'30ece4de38e1929545c4154d4c71ad297c7f54b4'),
'test': ('test', 'b88180515ad041935793e74e3a76470b0c1b2c50',
'f38b43d31bb06accf82a3d5b2fe434a752a74c9f')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, LABEL_IDX = 3, 1
field_indices = [A_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 0
elif segment == 'test':
A_IDX = 1
field_indices = [A_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueCoLA, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/CoLA'
@register(segment=['train', 'dev', 'test'])
class GlueSST2(_GlueDataset):
"""The Stanford Sentiment Treebank (Socher et al., 2013) consists of sentences from movie
reviews and human annotations of their sentiment.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_sst'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> sst_dev = gluonnlp.data.GlueSST2('dev', root='./datasets/sst')
-etc-
>>> len(sst_dev)
872
>>> len(sst_dev[0])
2
>>> sst_dev[0]
["it 's a charming and often affecting journey . ", '1']
>>> sst_test = gluonnlp.data.GlueSST2('test', root='./datasets/sst')
-etc-
>>> len(sst_test)
1821
>>> len(sst_test[0])
1
>>> sst_test[0]
['uneasy mishmash of styles and genres .']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_sst'),
return_all_fields=False):
self._data_file = {'train': ('train', 'bcde781bed5caa30d5e9a9d24e5c826965ed02a2',
'ffbb67a55e27525e925b79fee110ca19585d70ca'),
'dev': ('dev', '85698e465ff6573fb80d0b34229c76df84cd766b',
'e166f986cec68fd4cca0ae5ce5869b917f88a2fa'),
'test': ('test', 'efac1c275553ed78500e9b8d8629408f5f867b20',
'3ce8041182bf82dbbbbfe13738b39d3c69722744')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, LABEL_IDX = 0, 1
field_indices = [A_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX = 1
field_indices = [A_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueSST2, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/SST-2'
@register(segment=['train', 'dev', 'test'])
class GlueSTSB(_GlueDataset):
"""The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
sentence pairs drawn from news headlines, video and image captions, and natural
language inference data.
Each pair is human-annotated with a similarity score from 1 to 5.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_stsb'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> stsb_dev = gluonnlp.data.GlueSTSB('dev', root='./datasets/stsb')
-etc-
>>> len(stsb_dev)
1500
>>> len(stsb_dev[0])
3
>>> stsb_dev[0]
['A man with a hard hat is dancing.', 'A man wearing a hard hat is dancing.', '5.000']
>>> stsb_test = gluonnlp.data.GlueSTSB('test', root='./datasets/stsb')
-etc-
>>> len(stsb_test)
1379
>>> len(stsb_test[0])
2
>>> stsb_test[0]
['A girl is styling her hair.', 'A girl is brushing her hair.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_stsb'),
return_all_fields=False):
self._data_file = {'train': ('train', '9378bd341576810730a5c666ed03122e4c5ecc9f',
'501e55248c6db2a3f416c75932a63693000a82bc'),
'dev': ('dev', '529c3e7c36d0807d88d0b2a5d4b954809ddd4228',
'f8bcc33b01dfa2e9ba85601d0140020735b8eff3'),
'test': ('test', '6284872d6992d8ec6d96320af89c2f46ac076d18',
'36553e5e2107b817257232350e95ff0f3271d844')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, B_IDX, LABEL_IDX = 7, 8, 9
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX, B_IDX, = 7, 8
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueSTSB, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/STS-B'
@register(segment=['train', 'dev', 'test'])
class GlueQQP(_GlueDataset):
"""The Quora Question Pairs dataset is a collection of question pairs from the community
question-answering website Quora.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_qqp'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> qqp_dev = gluonnlp.data.GlueQQP('dev', root='./datasets/qqp')
-etc-
>>> len(qqp_dev)
40430
>>> len(qqp_dev[0])
3
>>> qqp_dev[0]
['Why are African-Americans so beautiful?', 'Why are hispanics so beautiful?', '0']
>>> qqp_test = gluonnlp.data.GlueQQP('test', root='./datasets/qqp')
-etc-
>>> len(qqp_test)
390965
>>> len(qqp_test[3])
2
>>> qqp_test[3]
['Is it safe to invest in social trade biz?', 'Is social trade geniune?']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_qqp'),
return_all_fields=False):
self._data_file = {'train': ('train', '494f280d651f168ad96d6cd05f8d4ddc6be73ce9',
'95c01e711ac8dbbda8f67f3a4291e583a72b6988'),
'dev': ('dev', '9957b60c4c62f9b98ec91b26a9d43529d2ee285d',
'755e0bf2899b8ad315d4bd7d4c85ec51beee5ad0'),
'test': ('test', '1e325cc5dbeeb358f9429c619ebe974fc2d1a8ca',
'0f50d1a62dd51fe932ba91be08238e47c3e2504a')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, B_IDX, LABEL_IDX = 3, 4, 5
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX, B_IDX, = 1, 2
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
# QQP may include broken samples
super(GlueQQP, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices, allow_missing=True)
def _repo_dir(self):
return 'gluon/dataset/GLUE/QQP'
@register(segment=['train', 'dev', 'test'])
class GlueRTE(_GlueDataset):
"""The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
entailment challenges (RTE1, RTE2, RTE3, and RTE5).
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_rte'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> rte_dev = gluonnlp.data.GlueRTE('dev', root='./datasets/rte')
-etc-
>>> len(rte_dev)
277
>>> len(rte_dev[0])
3
>>> rte_dev[0]
['Dana Reeve, the widow of the actor Christopher Reeve, has died of lung cancer at age 44, according to the Christopher Reeve Foundation.', 'Christopher Reeve had an accident.', 'not_entailment']
>>> rte_test = gluonnlp.data.GlueRTE('test', root='./datasets/rte')
-etc-
>>> len(rte_test)
3000
>>> len(rte_test[16])
2
>>> rte_test[16]
['United failed to progress beyond the group stages of the Champions League and trail in the Premiership title race, sparking rumours over its future.', 'United won the Champions League.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_rte'),
return_all_fields=False):
self._data_file = {'train': ('train', 'a23b0633f4f4dfa866c672af2e94f7e07344888f',
'ec2b246745bb5c9d92aee0800684c08902742730'),
'dev': ('dev', 'a6cde090d12a10744716304008cf33dd3f0dbfcb',
'ade75e0673862dcac9c653efb9f59f51be2749aa'),
'test': ('test', '7e4e58a6fa80b1f05e603b4e220524be7976b488',
'ddda5c967fb5a4934b429bb52aaa144e70900000')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, B_IDX, LABEL_IDX = 1, 2, 3
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX, B_IDX, = 1, 2
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueRTE, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/RTE'
@register(segment=['train', 'dev_matched', 'dev_mismatched',
'test_matched', 'test_mismatched'])
class GlueMNLI(_GlueDataset):
"""The Multi-Genre Natural Language Inference Corpus (Williams et al., 2018)
is a crowdsourced collection of sentence pairs with textual entailment annotations.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev_matched', 'dev_mismatched', 'test_matched', 'test_mismatched'},
default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_mnli'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> mnli_dev = gluonnlp.data.GlueMNLI('dev_matched', root='./datasets/mnli')
-etc-
>>> len(mnli_dev)
9815
>>> len(mnli_dev[0])
3
>>> mnli_dev[0]
['The new rights are nice enough', 'Everyone really likes the newest benefits ', 'neutral']
>>> mnli_test = gluonnlp.data.GlueMNLI('test_matched', root='./datasets/mnli')
-etc-
>>> len(mnli_test)
9796
>>> len(mnli_test[0])
2
>>> mnli_test[0]
['Hierbas, ans seco, ans dulce, and frigola are just a few names worth keeping a look-out for.', 'Hierbas is a name worth looking out for.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_mnli'),
return_all_fields=False):
self._data_file = {'train': ('train', 'aa235064ab3ce47d48caa17c553561d84fdf5bf2',
'1e74055bc91e260323574bfe63186acb9420fa13'),
'dev_matched': ('dev_matched',
'328cf527add50ee7bc20a862f97913800ba8a4b1',
'7a38c5fb5ecc875f259e1d57662d58a984753b70'),
'dev_mismatched': ('dev_mismatched',
'9c5d6c6d2e3a676bfa19d929b32e2f9f233585c5',
'47470d91b594e767d80e5de2ef0be6a453c17be5'),
'test_matched': ('test_matched',
'53877d9d554b6a6d402cc0e5f7e38366cd4f8e60',
'00106769e11a43eac119975ad25c2de2c8d2dbe7'),
'test_mismatched': ('test_mismatched',
'82b03d3cc9f4a59c74beab06c141bc0c5bf74a55',
'5a31abf92f045f127dbb2e3d2e0ef8ddea04c237')}
data_file = self._data_file[segment]
if segment in ['train']:
A_IDX, B_IDX, LABEL_IDX = 8, 9, 11
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment in ['dev_matched', 'dev_mismatched']:
A_IDX, B_IDX, LABEL_IDX = 8, 9, 15
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment in ['test_matched', 'test_mismatched']:
A_IDX, B_IDX, = 8, 9
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueMNLI, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/MNLI'
@register(segment=['train', 'dev', 'test'])
class GlueQNLI(_GlueDataset):
r"""The Question-answering NLI dataset converted from Stanford Question Answering Dataset
(Rajpurkar et al. 2016).
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_qnli'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> qnli_dev = gluonnlp.data.GlueQNLI('dev', root='./datasets/qnli')
-etc-
>>> len(qnli_dev)
5732
>>> len(qnli_dev[0])
3
>>> qnli_dev[0]
['Which NFL team represented the AFC at Super Bowl 50?', 'The American Football Conference (AFC) champion Denver Broncos defeated the National Football Conference (NFC) champion Carolina Panthers 24\u201310 to earn their third Super Bowl title.', 'entailment']
>>> qnli_test = gluonnlp.data.GlueQNLI('test', root='./datasets/qnli')
-etc-
>>> len(qnli_test)
5740
>>> len(qnli_test[0])
2
>>> qnli_test[0]
['What seldom used term of a unit of force equal to 1000 pound s of force?', 'Other arcane units of force include the sthène, which is equivalent to 1000 N, and the kip, which is equivalent to 1000 lbf.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_qnli'),
return_all_fields=False):
self._data_file = {'train': ('train', '95fae96fb1ffa6a2804192c9036d3435e63b48e8',
'd90a84eb40c6ba32bc2b34284ceaa962c46f8753'),
'dev': ('dev', '5652b9d4d5c8d115c080bcf64101927ea2b3a1e0',
'd14a61290301c2a9d26459c4cd036742e8591428'),
'test': ('test', '23dfb2f38adb14d3e792dbaecb7f5fd5dfa8db7e',
'f3da1a2e471ebfee81d91574b42e0f5d39153c59')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, B_IDX, LABEL_IDX = 1, 2, 3
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX, B_IDX, = 1, 2
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueQNLI, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/QNLI'
@register(segment=['train', 'dev', 'test'])
class GlueWNLI(_GlueDataset):
"""The Winograd NLI dataset converted from the dataset in
Winograd Schema Challenge (Levesque et al., 2011).
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_wnli'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
return_all_fields : bool, default False
Return all fields available in the dataset.
Examples
--------
>>> wnli_dev = gluonnlp.data.GlueWNLI('dev', root='./datasets/wnli')
-etc-
>>> len(wnli_dev)
71
>>> len(wnli_dev[0])
3
>>> wnli_dev[0]
['The drain is clogged with hair. It has to be cleaned.', 'The hair has to be cleaned.', '0']
>>> wnli_test = gluonnlp.data.GlueWNLI('test', root='./datasets/wnli')
-etc-
>>> len(wnli_test)
146
>>> len(wnli_test[0])
2
>>> wnli_test[0]
['Maude and Dora had seen the trains rushing across the prairie, with long, rolling puffs of black smoke streaming back from the engine. Their roars and their wild, clear whistles could be heard from far away. Horses ran away when they came in sight.', 'Horses ran away when Maude and Dora came in sight.']
"""
def __init__(self, segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_wnli'),
return_all_fields=False):
self._data_file = {'train': ('train', '8db0004d0e58640751a9f2875dd66c8000504ddb',
'b497281c1d848b619ea8fe427b3a6e4dc8e7fa92'),
'dev': ('dev', 'd54834960555073fb497cf2766edb77fb62c3646',
'6bbdb866d0cccaac57c3a2505cf53103789b69a9'),
'test': ('test', '431e596a1c6627fb168e7741b3e32ef681da3c7b',
'6ba8fcf3e5b451c101a3902fb4ba3fc1dea42e50')}
data_file = self._data_file[segment]
if segment in ['train', 'dev']:
A_IDX, B_IDX, LABEL_IDX = 1, 2, 3
field_indices = [A_IDX, B_IDX, LABEL_IDX] if not return_all_fields else None
num_discard_samples = 1
elif segment == 'test':
A_IDX, B_IDX, = 1, 2
field_indices = [A_IDX, B_IDX] if not return_all_fields else None
num_discard_samples = 1
super(GlueWNLI, self).__init__(root, data_file,
num_discard_samples=num_discard_samples,
field_indices=field_indices)
def _repo_dir(self):
return 'gluon/dataset/GLUE/WNLI'
@register(segment=['train', 'dev', 'test'])
class GlueMRPC(TSVDataset):
"""The Microsoft Research Paraphrase Corpus dataset.
From
https://gluebenchmark.com/tasks
Parameters
----------
segment : {'train', 'dev', 'test'}, default 'train'
Dataset segment.
root : str, default '$MXNET_HOME/datasets/glue_mrpc'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
Examples
--------
>>> mrpc_dev = gluonnlp.data.GlueMRPC('dev', root='./datasets/mrpc')
-etc-
>>> len(mrpc_dev)
408
>>> len(mrpc_dev[0])
3
>>> mrpc_dev[0]
["He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .", '" The foodservice pie business does not fit our long-term growth strategy .', '1']
>>> mrpc_test = gluonnlp.data.GlueMRPC('test', root='./datasets/mrpc')
-etc-
>>> len(mrpc_test)
1725
>>> len(mrpc_test[0])
2
>>> mrpc_test[0]
["PCCW 's chief operating officer , Mike Butcher , and Alex Arena , the chief financial officer , will report directly to Mr So .", 'Current Chief Operating Officer Mike Butcher and Group Chief Financial Officer Alex Arena will report to So .']
"""
def __init__(self,
segment='train',
root=os.path.join(get_home_dir(), 'datasets', 'glue_mrpc')):
self._root = root
assert segment in ['train', 'dev', 'test'], 'Unsupported segment: %s'%segment
self._data_file = {'train': ('msr_paraphrase_train.txt',
'716e0f67af962f08220b7e97d229b293077ef41f',
'131675ffd3d2f04f286049d31cca506c8acba69e'),
'dev': ('msr_paraphrase_train.txt',
'716e0f67af962f08220b7e97d229b293077ef41f',
'e4486577c4cb2e5c2a3fd961eb24f03c623ea02d'),
'test': ('msr_paraphrase_test.txt',
'4265196c15cf75620b0b592b8b921f543bda7e6c',
'3602b2ca26cf574e84183c14d6c0901669ee2d0a')}
self._generate(segment)
path = os.path.join(root, '%s.tsv' % segment)
A_IDX, B_IDX, LABEL_IDX = 3, 4, 0
if segment == 'test':
fields = [A_IDX, B_IDX]
else:
fields = [A_IDX, B_IDX, LABEL_IDX]
super(GlueMRPC, self).__init__(
path, num_discard_samples=1, field_indices=fields)
def _repo_dir(self):
return 'https://dl.fbaipublicfiles.com/senteval/senteval_data/'
def _generate(self, segment):
"""Partition MRPC dataset into train, dev and test.
Adapted from https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
"""
# download raw data
data_name = segment + '.tsv'
raw_name, raw_hash, data_hash = self._data_file[segment]
raw_path = os.path.join(self._root, raw_name)
download(self._repo_dir() + raw_name, path=raw_path, sha1_hash=raw_hash)
data_path = os.path.join(self._root, data_name)
if segment in ('train', 'dev'):
if os.path.isfile(data_path) and check_sha1(data_path, data_hash):
return
# retrieve dev ids for train and dev set
DEV_ID_URL = 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc'
DEV_ID_HASH = '506c7a1a5e0dd551ceec2f84070fa1a8c2bc4b41'
dev_id_name = 'dev_ids.tsv'
dev_id_path = os.path.join(self._root, dev_id_name)
download(DEV_ID_URL, path=dev_id_path, sha1_hash=DEV_ID_HASH)
# read dev data ids
dev_ids = []
with io.open(dev_id_path, encoding='utf8') as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
# generate train and dev set
train_path = os.path.join(self._root, 'train.tsv')
dev_path = os.path.join(self._root, 'dev.tsv')
with io.open(raw_path, encoding='utf8') as data_fh:
with io.open(train_path, 'w', encoding='utf8') as train_fh:
with io.open(dev_path, 'w', encoding='utf8') as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
example = '%s\t%s\t%s\t%s\t%s\n'%(label, id1, id2, s1, s2)
if [id1, id2] in dev_ids:
dev_fh.write(example)
else:
train_fh.write(example)
else:
# generate test set
if os.path.isfile(data_path) and check_sha1(data_path, data_hash):
return
with io.open(raw_path, encoding='utf8') as data_fh:
with io.open(data_path, 'w', encoding='utf8') as test_fh:
header = data_fh.readline()
test_fh.write('index\t#1 ID\t#2 ID\t#1 String\t#2 String\n')
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write('%d\t%s\t%s\t%s\t%s\n'%(idx, id1, id2, s1, s2))
| 43.079772 | 310 | 0.590669 |
d486c7c54e5a1877a92b87a06a775cbc573fb3af
| 43,571 |
py
|
Python
|
tests/shared/core/test_domain.py
|
theanht1/rasa
|
eb408bb2d216de3a1cd1f72799d7b80eceeb1fd2
|
[
"Apache-2.0"
] | null | null | null |
tests/shared/core/test_domain.py
|
theanht1/rasa
|
eb408bb2d216de3a1cd1f72799d7b80eceeb1fd2
|
[
"Apache-2.0"
] | 21 |
2021-05-27T13:20:53.000Z
|
2022-03-01T13:45:04.000Z
|
tests/shared/core/test_domain.py
|
theanht1/rasa
|
eb408bb2d216de3a1cd1f72799d7b80eceeb1fd2
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
from pathlib import Path
from typing import Dict, List, Text, Any, Union, Set, Optional
import pytest
from rasa.shared.exceptions import YamlSyntaxException
import rasa.shared.utils.io
from rasa.shared.constants import (
DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
LATEST_TRAINING_DATA_FORMAT_VERSION,
)
from rasa.core import training, utils
from rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer
from rasa.shared.core.slots import InvalidSlotTypeException, TextSlot
from rasa.shared.core.constants import (
DEFAULT_INTENTS,
SLOT_LISTED_ITEMS,
SLOT_LAST_OBJECT,
SLOT_LAST_OBJECT_TYPE,
DEFAULT_KNOWLEDGE_BASE_ACTION,
ENTITY_LABEL_SEPARATOR,
DEFAULT_ACTION_NAMES,
)
from rasa.shared.core.domain import (
InvalidDomain,
SessionConfig,
ENTITY_ROLES_KEY,
USED_ENTITIES_KEY,
USE_ENTITIES_KEY,
IGNORE_ENTITIES_KEY,
State,
Domain,
KEY_FORMS,
KEY_E2E_ACTIONS,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.core.events import ActionExecuted, SlotSet, UserUttered
from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE
def test_slots_states_before_user_utterance(default_domain: Domain):
featurizer = MaxHistoryTrackerFeaturizer()
tracker = DialogueStateTracker.from_events(
"bla",
evts=[
SlotSet(default_domain.slots[0].name, "some_value"),
ActionExecuted("utter_default"),
],
slots=default_domain.slots,
)
trackers_as_states, _ = featurizer.training_states_and_actions(
[tracker], default_domain
)
expected_states = [[{"slots": {"name": (1.0,)}}]]
assert trackers_as_states == expected_states
async def test_create_train_data_no_history(default_domain: Domain):
featurizer = MaxHistoryTrackerFeaturizer(max_history=1)
training_trackers = await training.load_data(
DEFAULT_STORIES_FILE, default_domain, augmentation_factor=0
)
assert len(training_trackers) == 4
(decoded, _) = featurizer.training_states_and_actions(
training_trackers, default_domain
)
# decoded needs to be sorted
hashed = []
for states in decoded:
hashed.append(json.dumps(states, sort_keys=True))
hashed = sorted(hashed, reverse=True)
assert hashed == [
"[{}]",
'[{"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}]',
'[{"prev_action": {"action_name": "utter_greet"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}]',
'[{"prev_action": {"action_name": "utter_goodbye"}, "user": {"intent": "goodbye"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "slots": {"name": [1.0]}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}]',
]
async def test_create_train_data_with_history(default_domain: Domain):
featurizer = MaxHistoryTrackerFeaturizer(max_history=4)
training_trackers = await training.load_data(
DEFAULT_STORIES_FILE, default_domain, augmentation_factor=0
)
assert len(training_trackers) == 4
(decoded, _) = featurizer.training_states_and_actions(
training_trackers, default_domain
)
# decoded needs to be sorted
hashed = []
for states in decoded:
hashed.append(json.dumps(states, sort_keys=True))
hashed = sorted(hashed)
assert hashed == [
'[{"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}, {"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "utter_default"}, "slots": {"name": [1.0]}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "utter_default"}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}, {"prev_action": {"action_name": "utter_goodbye"}, "user": {"intent": "goodbye"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "utter_default"}, "user": {"intent": "default"}}]',
'[{"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "utter_default"}, "user": {"intent": "default"}}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}, {"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"intent": "default"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "slots": {"name": [1.0]}, "user": {"entities": ["name"], "intent": "greet"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}, {"prev_action": {"action_name": "utter_goodbye"}, "user": {"intent": "goodbye"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "default"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}, {"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}]',
'[{}, {"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}]',
"[{}]",
]
def check_for_too_many_entities_and_remove_them(state: State) -> State:
# we ignore entities where there are > 1 of them:
# entities come from dictionary keys; as a result, they are stored
# in different order in the tuple which makes the test unstable
if (
state.get("user")
and state.get("user", {}).get("entities")
and len(state.get("user").get("entities")) > 1
):
state.get("user")["entities"] = ()
return state
async def test_create_train_data_unfeaturized_entities():
domain_file = "data/test_domains/default_unfeaturized_entities.yml"
stories_file = "data/test_yaml_stories/stories_unfeaturized_entities.yml"
domain = Domain.load(domain_file)
featurizer = MaxHistoryTrackerFeaturizer(max_history=1)
training_trackers = await training.load_data(
stories_file, domain, augmentation_factor=0
)
assert len(training_trackers) == 2
(decoded, _) = featurizer.training_states_and_actions(training_trackers, domain)
# decoded needs to be sorted
hashed = []
for states in decoded:
new_states = [
check_for_too_many_entities_and_remove_them(state) for state in states
]
hashed.append(json.dumps(new_states, sort_keys=True))
hashed = sorted(hashed, reverse=True)
assert hashed == [
"[{}]",
'[{"prev_action": {"action_name": "utter_greet"}, "user": {"intent": "greet"}}]',
'[{"prev_action": {"action_name": "utter_greet"}, "user": {"entities": ["name"], "intent": "greet"}}]',
'[{"prev_action": {"action_name": "utter_goodbye"}, "user": {"intent": "goodbye"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "user": {"intent": "why"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "user": {"intent": "thank"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "user": {"entities": [], "intent": "default"}}]',
'[{"prev_action": {"action_name": "utter_default"}, "user": {"entities": [], "intent": "ask"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "why"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "thank"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "greet"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"intent": "goodbye"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"entities": [], "intent": "default"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"entities": [], "intent": "ask"}}]',
'[{"prev_action": {"action_name": "action_listen"}, "user": {"entities": ["name"], "intent": "greet"}}]',
]
def test_domain_from_template():
domain_file = DEFAULT_DOMAIN_PATH_WITH_SLOTS
domain = Domain.load(domain_file)
assert not domain.is_empty()
assert len(domain.intents) == 10 + len(DEFAULT_INTENTS)
assert len(domain.action_names_or_texts) == 16
def test_avoid_action_repetition(default_domain: Domain):
domain = Domain.from_yaml(
"""
actions:
- utter_greet
responses:
utter_greet:
- text: "hi"
"""
)
assert len(domain.action_names_or_texts) == len(DEFAULT_ACTION_NAMES) + 1
def test_utter_templates():
domain_file = "examples/moodbot/domain.yml"
domain = Domain.load(domain_file)
expected_template = {
"text": "Hey! How are you?",
"buttons": [
{"title": "great", "payload": "/mood_great"},
{"title": "super sad", "payload": "/mood_unhappy"},
],
}
assert domain.random_template_for("utter_greet") == expected_template
def test_custom_slot_type(tmpdir: Path):
domain_path = str(tmpdir / "domain.yml")
rasa.shared.utils.io.write_text_file(
"""
slots:
custom:
type: tests.core.conftest.CustomSlot
responses:
utter_greet:
- text: hey there! """,
domain_path,
)
Domain.load(domain_path)
@pytest.mark.parametrize(
"domain_unkown_slot_type",
[
"""
slots:
custom:
type: tests.core.conftest.Unknown
responses:
utter_greet:
- text: hey there!""",
"""
slots:
custom:
type: blubblubblub
responses:
utter_greet:
- text: hey there!""",
],
)
def test_domain_fails_on_unknown_custom_slot_type(tmpdir, domain_unkown_slot_type):
domain_path = str(tmpdir / "domain.yml")
rasa.shared.utils.io.write_text_file(domain_unkown_slot_type, domain_path)
with pytest.raises(InvalidSlotTypeException):
Domain.load(domain_path)
def test_domain_to_dict():
test_yaml = f"""
actions:
- action_save_world
config:
store_entities_as_slots: true
entities: []
forms:
some_form:
intents: []
responses:
utter_greet:
- text: hey there!
session_config:
carry_over_slots_to_new_session: true
session_expiration_time: 60
{KEY_E2E_ACTIONS}:
- Hello, dear user
- what's up
slots:
some_slot:
type: categorical
values:
- high
- low"""
domain_as_dict = Domain.from_yaml(test_yaml).as_dict()
assert domain_as_dict == {
"actions": ["action_save_world"],
"config": {"store_entities_as_slots": True},
"entities": [],
"forms": {"some_form": None},
"intents": [],
"e2e_actions": [],
"responses": {"utter_greet": [{"text": "hey there!"}]},
"session_config": {
"carry_over_slots_to_new_session": True,
"session_expiration_time": 60,
},
"slots": {
"some_slot": {
"values": ["high", "low"],
"initial_value": None,
"auto_fill": True,
"influence_conversation": True,
"type": "rasa.shared.core.slots.CategoricalSlot",
}
},
KEY_E2E_ACTIONS: ["Hello, dear user", "what's up"],
}
def test_domain_to_yaml():
test_yaml = f"""
version: '2.0'
actions:
- action_save_world
config:
store_entities_as_slots: true
e2e_actions: []
entities: []
forms: {{}}
intents: []
responses:
utter_greet:
- text: hey there!
session_config:
carry_over_slots_to_new_session: true
session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES}
slots: {{}}
"""
with pytest.warns(None) as record:
domain = Domain.from_yaml(test_yaml)
actual_yaml = domain.as_yaml()
assert not record
expected = rasa.shared.utils.io.read_yaml(test_yaml)
actual = rasa.shared.utils.io.read_yaml(actual_yaml)
assert actual == expected
def test_merge_yaml_domains():
test_yaml_1 = f"""config:
store_entities_as_slots: true
entities: []
intents: []
slots: {{}}
responses:
utter_greet:
- text: hey there!
{KEY_E2E_ACTIONS}:
- Hi"""
test_yaml_2 = f"""config:
store_entities_as_slots: false
session_config:
session_expiration_time: 20
carry_over_slots: true
entities:
- cuisine
intents:
- greet
slots:
cuisine:
type: text
{KEY_E2E_ACTIONS}:
- Bye
responses:
utter_goodbye:
- text: bye!
utter_greet:
- text: hey you!"""
domain_1 = Domain.from_yaml(test_yaml_1)
domain_2 = Domain.from_yaml(test_yaml_2)
domain = domain_1.merge(domain_2)
# single attribute should be taken from domain_1
assert domain.store_entities_as_slots
# conflicts should be taken from domain_1
assert domain.templates == {
"utter_greet": [{"text": "hey there!"}],
"utter_goodbye": [{"text": "bye!"}],
}
# lists should be deduplicated and merged
assert domain.intents == sorted(["greet", *DEFAULT_INTENTS])
assert domain.entities == ["cuisine"]
assert isinstance(domain.slots[0], TextSlot)
assert domain.slots[0].name == "cuisine"
assert sorted(domain.user_actions) == sorted(["utter_greet", "utter_goodbye"])
assert domain.session_config == SessionConfig(20, True)
domain = domain_1.merge(domain_2, override=True)
# single attribute should be taken from domain_2
assert not domain.store_entities_as_slots
# conflicts should take value from domain_2
assert domain.templates == {
"utter_greet": [{"text": "hey you!"}],
"utter_goodbye": [{"text": "bye!"}],
}
assert domain.session_config == SessionConfig(20, True)
assert domain.action_texts == ["Bye", "Hi"]
@pytest.mark.parametrize("default_intent", DEFAULT_INTENTS)
def test_merge_yaml_domains_with_default_intents(default_intent: Text):
test_yaml_1 = """intents: []"""
# this domain contains an overridden default intent
test_yaml_2 = f"""intents:
- greet
- {default_intent}"""
domain_1 = Domain.from_yaml(test_yaml_1)
domain_2 = Domain.from_yaml(test_yaml_2)
domain = domain_1.merge(domain_2)
# check that the default intents were merged correctly
assert default_intent in domain.intents
assert domain.intents == sorted(["greet", *DEFAULT_INTENTS])
# ensure that the default intent is contain the domain's dictionary dump
assert list(domain.as_dict()["intents"][1].keys())[0] == default_intent
def test_merge_session_config_if_first_is_not_default():
yaml1 = """
session_config:
session_expiration_time: 20
carry_over_slots: true"""
yaml2 = """
session_config:
session_expiration_time: 40
carry_over_slots: true
"""
domain1 = Domain.from_yaml(yaml1)
domain2 = Domain.from_yaml(yaml2)
merged = domain1.merge(domain2)
assert merged.session_config == SessionConfig(20, True)
merged = domain1.merge(domain2, override=True)
assert merged.session_config == SessionConfig(40, True)
def test_merge_with_empty_domain():
domain = Domain.from_yaml(
"""config:
store_entities_as_slots: false
session_config:
session_expiration_time: 20
carry_over_slots: true
entities:
- cuisine
intents:
- greet
slots:
cuisine:
type: text
responses:
utter_goodbye:
- text: bye!
utter_greet:
- text: hey you!"""
)
merged = Domain.empty().merge(domain)
assert merged.as_dict() == domain.as_dict()
@pytest.mark.parametrize("other", [Domain.empty(), None])
def test_merge_with_empty_other_domain(other: Optional[Domain]):
domain = Domain.from_yaml(
"""config:
store_entities_as_slots: false
session_config:
session_expiration_time: 20
carry_over_slots: true
entities:
- cuisine
intents:
- greet
slots:
cuisine:
type: text
responses:
utter_goodbye:
- text: bye!
utter_greet:
- text: hey you!"""
)
merged = domain.merge(other, override=True)
assert merged.as_dict() == domain.as_dict()
def test_merge_domain_with_forms():
test_yaml_1 = """
forms:
# Old style form definitions (before RulePolicy)
- my_form
- my_form2
"""
test_yaml_2 = """
forms:
my_form3:
slot1:
- type: from_text
"""
domain_1 = Domain.from_yaml(test_yaml_1)
domain_2 = Domain.from_yaml(test_yaml_2)
domain = domain_1.merge(domain_2)
expected_number_of_forms = 3
assert len(domain.form_names) == expected_number_of_forms
assert len(domain.forms) == expected_number_of_forms
@pytest.mark.parametrize(
"intents, entities, roles, groups, intent_properties",
[
(
["greet", "goodbye"],
["entity", "other", "third"],
{"entity": ["role-1", "role-2"]},
{},
{
"greet": {
USED_ENTITIES_KEY: [
"entity",
f"entity{ENTITY_LABEL_SEPARATOR}role-1",
f"entity{ENTITY_LABEL_SEPARATOR}role-2",
"other",
"third",
]
},
"goodbye": {
USED_ENTITIES_KEY: [
"entity",
f"entity{ENTITY_LABEL_SEPARATOR}role-1",
f"entity{ENTITY_LABEL_SEPARATOR}role-2",
"other",
"third",
]
},
},
),
(
[{"greet": {USE_ENTITIES_KEY: []}}, "goodbye"],
["entity", "other", "third"],
{},
{"other": ["1", "2"]},
{
"greet": {USED_ENTITIES_KEY: []},
"goodbye": {
USED_ENTITIES_KEY: [
"entity",
"other",
f"other{ENTITY_LABEL_SEPARATOR}1",
f"other{ENTITY_LABEL_SEPARATOR}2",
"third",
]
},
},
),
(
[
{
"greet": {
"triggers": "utter_goodbye",
USE_ENTITIES_KEY: ["entity"],
IGNORE_ENTITIES_KEY: ["other"],
}
},
"goodbye",
],
["entity", "other", "third"],
{"entity": ["role"], "other": ["role"]},
{},
{
"greet": {
"triggers": "utter_goodbye",
USED_ENTITIES_KEY: [
"entity",
f"entity{ENTITY_LABEL_SEPARATOR}role",
],
},
"goodbye": {
USED_ENTITIES_KEY: [
"entity",
f"entity{ENTITY_LABEL_SEPARATOR}role",
"other",
f"other{ENTITY_LABEL_SEPARATOR}role",
"third",
]
},
},
),
(
[
{"greet": {"triggers": "utter_goodbye", USE_ENTITIES_KEY: None}},
{"goodbye": {USE_ENTITIES_KEY: [], IGNORE_ENTITIES_KEY: []}},
],
["entity", "other", "third"],
{},
{},
{
"greet": {USED_ENTITIES_KEY: [], "triggers": "utter_goodbye"},
"goodbye": {USED_ENTITIES_KEY: []},
},
),
(
[
"greet",
"goodbye",
{"chitchat": {"is_retrieval_intent": True, "use_entities": None}},
],
["entity", "other", "third"],
{},
{},
{
"greet": {USED_ENTITIES_KEY: ["entity", "other", "third"]},
"goodbye": {USED_ENTITIES_KEY: ["entity", "other", "third"]},
"chitchat": {USED_ENTITIES_KEY: [], "is_retrieval_intent": True},
},
),
],
)
def test_collect_intent_properties(
intents: Union[Set[Text], List[Union[Text, Dict[Text, Any]]]],
entities: List[Text],
roles: Dict[Text, List[Text]],
groups: Dict[Text, List[Text]],
intent_properties: Dict[Text, Dict[Text, Union[bool, List]]],
):
Domain._add_default_intents(intent_properties, entities, roles, groups)
assert (
Domain.collect_intent_properties(intents, entities, roles, groups)
== intent_properties
)
def test_load_domain_from_directory_tree(tmp_path: Path):
root_domain = {"actions": ["utter_root", "utter_root2"]}
utils.dump_obj_as_yaml_to_file(tmp_path / "domain_pt1.yml", root_domain)
subdirectory_1 = tmp_path / "Skill 1"
subdirectory_1.mkdir()
skill_1_domain = {"actions": ["utter_skill_1"]}
utils.dump_obj_as_yaml_to_file(subdirectory_1 / "domain_pt2.yml", skill_1_domain)
subdirectory_2 = tmp_path / "Skill 2"
subdirectory_2.mkdir()
skill_2_domain = {"actions": ["utter_skill_2"]}
utils.dump_obj_as_yaml_to_file(subdirectory_2 / "domain_pt3.yml", skill_2_domain)
subsubdirectory = subdirectory_2 / "Skill 2-1"
subsubdirectory.mkdir()
skill_2_1_domain = {"actions": ["utter_subskill", "utter_root"]}
# Check if loading from `.yaml` also works
utils.dump_obj_as_yaml_to_file(
subsubdirectory / "domain_pt4.yaml", skill_2_1_domain
)
actual = Domain.load(str(tmp_path))
expected = [
"utter_root",
"utter_root2",
"utter_skill_1",
"utter_skill_2",
"utter_subskill",
]
assert set(actual.user_actions) == set(expected)
def test_domain_warnings():
domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS)
warning_types = [
"action_warnings",
"intent_warnings",
"entity_warnings",
"slot_warnings",
]
actions = ["action_1", "action_2"]
intents = ["intent_1", "intent_2"]
entities = ["entity_1", "entity_2"]
slots = ["slot_1", "slot_2"]
domain_warnings = domain.domain_warnings(
intents=intents, entities=entities, actions=actions, slots=slots
)
# elements not found in domain should be in `in_training_data` diff
for _type, elements in zip(warning_types, [actions, intents, entities]):
assert set(domain_warnings[_type]["in_training_data"]) == set(elements)
# all other domain elements should be in `in_domain` diff
for _type, elements in zip(
warning_types,
[domain.user_actions + domain.form_names, domain.intents, domain.entities],
):
assert set(domain_warnings[_type]["in_domain"]) == set(elements)
# fully aligned domain and elements should yield empty diff
domain_warnings = domain.domain_warnings(
intents=domain.intents,
entities=domain.entities,
actions=domain.user_actions + domain.form_names,
slots=[s.name for s in domain._user_slots],
)
for diff_dict in domain_warnings.values():
assert all(not diff_set for diff_set in diff_dict.values())
def test_unfeaturized_slot_in_domain_warnings():
# create empty domain
featurized_slot_name = "text_slot"
unfeaturized_slot_name = "unfeaturized_slot"
domain = Domain.from_dict(
{
"slots": {
featurized_slot_name: {"initial_value": "value2", "type": "text"},
unfeaturized_slot_name: {
"type": "text",
"initial_value": "value1",
"influence_conversation": False,
},
}
}
)
# ensure both are in domain
for slot in (featurized_slot_name, unfeaturized_slot_name):
assert slot in [slot.name for slot in domain.slots]
# text slot should appear in domain warnings, unfeaturized slot should not
in_domain_slot_warnings = domain.domain_warnings()["slot_warnings"]["in_domain"]
assert featurized_slot_name in in_domain_slot_warnings
assert unfeaturized_slot_name not in in_domain_slot_warnings
def test_check_domain_sanity_on_invalid_domain():
with pytest.raises(InvalidDomain):
Domain(
intents={},
entities=[],
slots=[],
templates={},
action_names=["random_name", "random_name"],
forms={},
)
with pytest.raises(InvalidDomain):
Domain(
intents={},
entities=[],
slots=[TextSlot("random_name"), TextSlot("random_name")],
templates={},
action_names=[],
forms={},
)
with pytest.raises(InvalidDomain):
Domain(
intents={},
entities=["random_name", "random_name", "other_name", "other_name"],
slots=[],
templates={},
action_names=[],
forms={},
)
with pytest.raises(InvalidDomain):
Domain(
intents={},
entities=[],
slots=[],
templates={},
action_names=[],
forms=["random_name", "random_name"],
)
def test_load_on_invalid_domain_duplicate_intents():
with pytest.raises(InvalidDomain):
Domain.load("data/test_domains/duplicate_intents.yml")
def test_load_on_invalid_domain_duplicate_actions():
with pytest.raises(InvalidDomain):
Domain.load("data/test_domains/duplicate_actions.yml")
def test_load_on_invalid_domain_duplicate_templates():
with pytest.raises(YamlSyntaxException):
Domain.load("data/test_domains/duplicate_templates.yml")
def test_load_on_invalid_domain_duplicate_entities():
with pytest.raises(InvalidDomain):
Domain.load("data/test_domains/duplicate_entities.yml")
def test_load_domain_with_entity_roles_groups():
domain = Domain.load("data/test_domains/travel_form.yml")
assert domain.entities is not None
assert "GPE" in domain.entities
assert "name" in domain.entities
assert "name" not in domain.roles
assert "GPE" in domain.roles
assert "origin" in domain.roles["GPE"]
assert "destination" in domain.roles["GPE"]
def test_is_empty():
assert Domain.empty().is_empty()
def test_transform_intents_for_file_default():
domain_path = "data/test_domains/default_unfeaturized_entities.yml"
domain = Domain.load(domain_path)
transformed = domain._transform_intents_for_file()
expected = [
{"greet": {USE_ENTITIES_KEY: ["name"]}},
{"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}},
{"goodbye": {USE_ENTITIES_KEY: []}},
{"thank": {USE_ENTITIES_KEY: []}},
{"ask": {USE_ENTITIES_KEY: True}},
{"why": {USE_ENTITIES_KEY: []}},
{"pure_intent": {USE_ENTITIES_KEY: True}},
]
assert transformed == expected
def test_transform_intents_for_file_with_mapping():
domain_path = "data/test_domains/default_with_mapping.yml"
domain = Domain.load(domain_path)
transformed = domain._transform_intents_for_file()
expected = [
{"greet": {"triggers": "utter_greet", USE_ENTITIES_KEY: True}},
{"default": {"triggers": "utter_default", USE_ENTITIES_KEY: True}},
{"goodbye": {USE_ENTITIES_KEY: True}},
]
assert transformed == expected
def test_transform_intents_for_file_with_entity_roles_groups():
domain_path = "data/test_domains/travel_form.yml"
domain = Domain.load(domain_path)
transformed = domain._transform_intents_for_file()
expected = [
{"inform": {USE_ENTITIES_KEY: ["GPE"]}},
{"greet": {USE_ENTITIES_KEY: ["name"]}},
]
assert transformed == expected
def test_transform_entities_for_file_default():
domain_path = "data/test_domains/travel_form.yml"
domain = Domain.load(domain_path)
transformed = domain._transform_entities_for_file()
expected = [{"GPE": {ENTITY_ROLES_KEY: ["destination", "origin"]}}, "name"]
assert transformed == expected
def test_clean_domain_for_file():
domain_path = "data/test_domains/default_unfeaturized_entities.yml"
cleaned = Domain.load(domain_path).cleaned_domain()
expected = {
"intents": [
{"greet": {USE_ENTITIES_KEY: ["name"]}},
{"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}},
{"goodbye": {USE_ENTITIES_KEY: []}},
{"thank": {USE_ENTITIES_KEY: []}},
"ask",
{"why": {USE_ENTITIES_KEY: []}},
"pure_intent",
],
"entities": ["name", "unrelated_recognized_entity", "other"],
"responses": {
"utter_greet": [{"text": "hey there!"}],
"utter_goodbye": [{"text": "goodbye :("}],
"utter_default": [{"text": "default message"}],
},
"session_config": {
"carry_over_slots_to_new_session": True,
"session_expiration_time": DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
},
}
assert cleaned == expected
def test_not_add_knowledge_base_slots():
test_domain = Domain.empty()
slot_names = [s.name for s in test_domain.slots]
assert SLOT_LISTED_ITEMS not in slot_names
assert SLOT_LAST_OBJECT not in slot_names
assert SLOT_LAST_OBJECT_TYPE not in slot_names
def test_add_knowledge_base_slots():
test_domain = Domain.from_yaml(
f"""
actions:
- {DEFAULT_KNOWLEDGE_BASE_ACTION}
"""
)
slot_names = [s.name for s in test_domain.slots]
assert SLOT_LISTED_ITEMS in slot_names
assert SLOT_LAST_OBJECT in slot_names
assert SLOT_LAST_OBJECT_TYPE in slot_names
@pytest.mark.parametrize(
"input_domain, expected_session_expiration_time, expected_carry_over_slots",
[
(
f"""session_config:
session_expiration_time: {DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES}
carry_over_slots_to_new_session: true""",
DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
True,
),
("", DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, True),
(
"""session_config:
carry_over_slots_to_new_session: false""",
DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
False,
),
(
"""session_config:
session_expiration_time: 20.2
carry_over_slots_to_new_session: False""",
20.2,
False,
),
("""session_config: {}""", DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES, True),
],
)
def test_session_config(
input_domain,
expected_session_expiration_time: float,
expected_carry_over_slots: bool,
):
domain = Domain.from_yaml(input_domain)
assert (
domain.session_config.session_expiration_time
== expected_session_expiration_time
)
assert domain.session_config.carry_over_slots == expected_carry_over_slots
def test_domain_as_dict_with_session_config():
session_config = SessionConfig(123, False)
domain = Domain.empty()
domain.session_config = session_config
serialized = domain.as_dict()
deserialized = Domain.from_dict(serialized)
assert deserialized.session_config == session_config
@pytest.mark.parametrize(
"session_config, enabled",
[
(SessionConfig(0, True), False),
(SessionConfig(1, True), True),
(SessionConfig(-1, False), False),
],
)
def test_are_sessions_enabled(session_config: SessionConfig, enabled: bool):
assert session_config.are_sessions_enabled() == enabled
def test_domain_from_dict_does_not_change_input():
input_before = {
"intents": [
{"greet": {USE_ENTITIES_KEY: ["name"]}},
{"default": {IGNORE_ENTITIES_KEY: ["unrelated_recognized_entity"]}},
{"goodbye": {USE_ENTITIES_KEY: None}},
{"thank": {USE_ENTITIES_KEY: False}},
{"ask": {USE_ENTITIES_KEY: True}},
{"why": {USE_ENTITIES_KEY: []}},
"pure_intent",
],
"entities": ["name", "unrelated_recognized_entity", "other"],
"slots": {"name": {"type": "text"}},
"responses": {
"utter_greet": [{"text": "hey there {name}!"}],
"utter_goodbye": [{"text": "goodbye 😢"}, {"text": "bye bye 😢"}],
"utter_default": [{"text": "default message"}],
},
}
input_after = copy.deepcopy(input_before)
Domain.from_dict(input_after)
assert input_after == input_before
@pytest.mark.parametrize(
"domain", [{}, {"intents": DEFAULT_INTENTS}, {"intents": [DEFAULT_INTENTS[0]]}]
)
def test_add_default_intents(domain: Dict):
domain = Domain.from_dict(domain)
assert all(intent_name in domain.intents for intent_name in DEFAULT_INTENTS)
def test_domain_deepcopy():
domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS)
new_domain = copy.deepcopy(domain)
assert isinstance(new_domain, Domain)
# equalities
assert new_domain.intent_properties == domain.intent_properties
assert new_domain.overridden_default_intents == domain.overridden_default_intents
assert new_domain.entities == domain.entities
assert new_domain.forms == domain.forms
assert new_domain.form_names == domain.form_names
assert new_domain.templates == domain.templates
assert new_domain.action_texts == domain.action_texts
assert new_domain.session_config == domain.session_config
assert new_domain._custom_actions == domain._custom_actions
assert new_domain.user_actions == domain.user_actions
assert new_domain.action_names_or_texts == domain.action_names_or_texts
assert new_domain.store_entities_as_slots == domain.store_entities_as_slots
# not the same objects
assert new_domain is not domain
assert new_domain.intent_properties is not domain.intent_properties
assert (
new_domain.overridden_default_intents is not domain.overridden_default_intents
)
assert new_domain.entities is not domain.entities
assert new_domain.forms is not domain.forms
assert new_domain.form_names is not domain.form_names
assert new_domain.slots is not domain.slots
assert new_domain.templates is not domain.templates
assert new_domain.action_texts is not domain.action_texts
assert new_domain.session_config is not domain.session_config
assert new_domain._custom_actions is not domain._custom_actions
assert new_domain.user_actions is not domain.user_actions
assert new_domain.action_names_or_texts is not domain.action_names_or_texts
@pytest.mark.parametrize(
"template_key, validation",
[("utter_chitchat/faq", True), ("utter_chitchat", False)],
)
def test_is_retrieval_intent_template(template_key, validation):
domain = Domain.load(DEFAULT_DOMAIN_PATH_WITH_SLOTS)
assert domain.is_retrieval_intent_template((template_key, [{}])) == validation
def test_retrieval_intent_template_seggregation():
domain = Domain.load("data/test_domains/mixed_retrieval_intents.yml")
assert domain.templates != domain.retrieval_intent_templates
assert domain.templates and domain.retrieval_intent_templates
assert list(domain.retrieval_intent_templates.keys()) == [
"utter_chitchat/ask_weather",
"utter_chitchat/ask_name",
]
def test_get_featurized_entities():
domain = Domain.load("data/test_domains/travel_form.yml")
user_uttered = UserUttered(
text="Hello, I am going to London",
intent={"name": "greet", "confidence": 1.0},
entities=[{"entity": "GPE", "value": "London", "role": "destination"}],
)
featurized_entities = domain._get_featurized_entities(user_uttered)
assert featurized_entities == set()
user_uttered = UserUttered(
text="I am going to London",
intent={"inform": "greet", "confidence": 1.0},
entities=[{"entity": "GPE", "value": "London", "role": "destination"}],
)
featurized_entities = domain._get_featurized_entities(user_uttered)
assert featurized_entities == {"GPE", f"GPE{ENTITY_LABEL_SEPARATOR}destination"}
@pytest.mark.parametrize(
"domain_as_dict",
[
# No forms
{KEY_FORMS: {}},
# Deprecated but still support form syntax
{KEY_FORMS: ["my form", "other form"]},
# No slot mappings
{KEY_FORMS: {"my_form": None}},
{KEY_FORMS: {"my_form": {}}},
# Valid slot mappings
{
KEY_FORMS: {
"my_form": {"slot_x": [{"type": "from_entity", "entity": "name"}]}
}
},
{KEY_FORMS: {"my_form": {"slot_x": [{"type": "from_intent", "value": 5}]}}},
{
KEY_FORMS: {
"my_form": {"slot_x": [{"type": "from_intent", "value": "some value"}]}
}
},
{KEY_FORMS: {"my_form": {"slot_x": [{"type": "from_intent", "value": False}]}}},
{
KEY_FORMS: {
"my_form": {"slot_x": [{"type": "from_trigger_intent", "value": 5}]}
}
},
{
KEY_FORMS: {
"my_form": {
"slot_x": [{"type": "from_trigger_intent", "value": "some value"}]
}
}
},
{KEY_FORMS: {"my_form": {"slot_x": [{"type": "from_text"}]}}},
],
)
def test_valid_slot_mappings(domain_as_dict: Dict[Text, Any]):
Domain.from_dict(domain_as_dict)
@pytest.mark.parametrize(
"domain_as_dict",
[
# Wrong type for slot names
{KEY_FORMS: {"my_form": []}},
{KEY_FORMS: {"my_form": 5}},
# Slot mappings not defined as list
{KEY_FORMS: {"my_form": {"slot1": {}}}},
# Unknown mapping
{KEY_FORMS: {"my_form": {"slot1": [{"type": "my slot mapping"}]}}},
# Mappings with missing keys
{
KEY_FORMS: {
"my_form": {"slot1": [{"type": "from_entity", "intent": "greet"}]}
}
},
{KEY_FORMS: {"my_form": {"slot1": [{"type": "from_intent"}]}}},
{KEY_FORMS: {"my_form": {"slot1": [{"type": "from_intent", "value": None}]}}},
{KEY_FORMS: {"my_form": {"slot1": [{"type": "from_trigger_intent"}]}}},
{
KEY_FORMS: {
"my_form": {"slot1": [{"type": "from_trigger_intent", "value": None}]}
}
},
],
)
def test_form_invalid_mappings(domain_as_dict: Dict[Text, Any]):
with pytest.raises(InvalidDomain):
Domain.from_dict(domain_as_dict)
def test_slot_order_is_preserved():
test_yaml = f"""version: '{LATEST_TRAINING_DATA_FORMAT_VERSION}'
session_config:
session_expiration_time: 60
carry_over_slots_to_new_session: true
slots:
confirm:
type: bool
influence_conversation: false
previous_email:
type: text
influence_conversation: false
caller_id:
type: text
influence_conversation: false
email:
type: text
influence_conversation: false
incident_title:
type: text
influence_conversation: false
priority:
type: text
influence_conversation: false
problem_description:
type: text
influence_conversation: false
requested_slot:
type: text
influence_conversation: false
handoff_to:
type: text
influence_conversation: false
"""
domain = Domain.from_yaml(test_yaml)
assert domain.as_yaml(clean_before_dump=True) == test_yaml
def test_slot_order_is_preserved_when_merging():
slot_1 = """
b:
type: text
influence_conversation: false
a:
type: text
influence_conversation: false"""
test_yaml_1 = f"""
slots:{slot_1}
"""
slot_2 = """
d:
type: text
influence_conversation: false
c:
type: text
influence_conversation: false"""
test_yaml_2 = f"""
slots:{slot_2}
"""
test_yaml_merged = f"""version: '{LATEST_TRAINING_DATA_FORMAT_VERSION}'
session_config:
session_expiration_time: 60
carry_over_slots_to_new_session: true
slots:{slot_2}{slot_1}
"""
domain_1 = Domain.from_yaml(test_yaml_1)
domain_2 = Domain.from_yaml(test_yaml_2)
domain_merged = domain_1.merge(domain_2)
assert domain_merged.as_yaml(clean_before_dump=True) == test_yaml_merged
def test_responses_text_multiline_is_preserved():
test_yaml = f"""version: '{LATEST_TRAINING_DATA_FORMAT_VERSION}'
session_config:
session_expiration_time: 60
carry_over_slots_to_new_session: true
responses:
utter_confirm:
- text: |-
First line
Second line
Third line
- text: One more response
utter_cancel:
- text: First line
- text: Second line
"""
domain = Domain.from_yaml(test_yaml)
assert domain.as_yaml(clean_before_dump=True) == test_yaml
def test_is_valid_domain_doesnt_raise_with_valid_domain(tmpdir: Path):
domain_path = str(tmpdir / "domain.yml")
rasa.shared.utils.io.write_text_file(
"""
responses:
utter_greet:
- text: hey there! """,
domain_path,
)
assert Domain.is_domain_file(domain_path)
def test_is_valid_domain_doesnt_raise_with_invalid_domain(tmpdir: Path):
domain_path = str(tmpdir / "domain.yml")
rasa.shared.utils.io.write_text_file(
"""
invalid""",
domain_path,
)
assert not Domain.is_domain_file(domain_path)
def test_is_valid_domain_doesnt_raise_with_invalid_yaml(tmpdir: Path):
potential_domain_path = str(tmpdir / "domain.yml")
rasa.shared.utils.io.write_text_file(
"""
script:
- echo "Latest SDK version is ${RASA_SDK_VERSION}""",
potential_domain_path,
)
assert not Domain.is_domain_file(potential_domain_path)
| 32.933485 | 481 | 0.621032 |
2f21d513fba4266c7429dbe0ad97c3d93f2190d5
| 552 |
py
|
Python
|
setup.py
|
mpasternak/django-emailtemplates
|
529e0120c8c3a58605257eff893df636a5cbf8d0
|
[
"MIT"
] | 1 |
2015-05-18T13:51:08.000Z
|
2015-05-18T13:51:08.000Z
|
setup.py
|
mpasternak/django-emailtemplates
|
529e0120c8c3a58605257eff893df636a5cbf8d0
|
[
"MIT"
] | null | null | null |
setup.py
|
mpasternak/django-emailtemplates
|
529e0120c8c3a58605257eff893df636a5cbf8d0
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
import os
from setuptools import setup
setup(name = 'django-emailtemplates',
description = 'Send e-mails using database-stored e-mail templates with Django',
version = '0.2',
author = u'Michał Pasternak - FHU Kagami',
author_email = '[email protected]',
url = 'http://fhu-kagami.pl/',
license = 'MIT',
packages = ['emailtemplates', 'emailtemplates.conf'],
include_package_data = True,
install_requires = ['django'],
zip_safe = False)
| 34.5 | 82 | 0.605072 |
2caa7fc019e38497ae24aa3ea0498936b014121d
| 3,011 |
py
|
Python
|
python_scripts/linear_models_ex_05.py
|
miwojc/scikit-learn-mooc
|
1dd1e6110ee037c2ce9b9597d769de5eb25fcffb
|
[
"CC-BY-4.0"
] | 2 |
2021-09-30T11:07:28.000Z
|
2021-09-30T11:07:31.000Z
|
python_scripts/linear_models_ex_05.py
|
Ravimk07/scikit-learn-mooc
|
c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf
|
[
"CC-BY-4.0"
] | null | null | null |
python_scripts/linear_models_ex_05.py
|
Ravimk07/scikit-learn-mooc
|
c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf
|
[
"CC-BY-4.0"
] | null | null | null |
# %% [markdown]
# # 📝 Exercise M4.05
# In the previous notebook, we presented a non-penalized logistic regression
# classifier. This classifier accepts a parameter `penalty` to add a
# regularization. The regularization strength is set using the parameter `C`.
#
# In this exercise, we ask you to train a l2-penalized logistic regression
# classifier and to find by yourself the effect of the parameter `C`.
#
# We will start by loading the dataset and create the helper function to show
# the decision separation as in the previous code.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
import pandas as pd
from sklearn.model_selection import train_test_split
penguins = pd.read_csv("../datasets/penguins_classification.csv")
# only keep the Adelie and Chinstrap classes
penguins = penguins.set_index("Species").loc[
["Adelie", "Chinstrap"]].reset_index()
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
# %%
from sklearn.model_selection import train_test_split
penguins_train, penguins_test = train_test_split(penguins, random_state=0)
data_train = penguins_train[culmen_columns]
data_test = penguins_test[culmen_columns]
target_train = penguins_train[target_column]
target_test = penguins_test[target_column]
range_features = {
feature_name: (penguins[feature_name].min() - 1,
penguins[feature_name].max() + 1)
for feature_name in culmen_columns
}
# %%
import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(fitted_classifier, range_features, ax=None):
"""Plot the boundary of the decision function of a classifier."""
from sklearn.preprocessing import LabelEncoder
feature_names = list(range_features.keys())
# create a grid to evaluate all possible samples
plot_step = 0.02
xx, yy = np.meshgrid(
np.arange(*range_features[feature_names[0]], plot_step),
np.arange(*range_features[feature_names[1]], plot_step),
)
# compute the associated prediction
Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = LabelEncoder().fit_transform(Z)
Z = Z.reshape(xx.shape)
# make the plot of the boundary and the data samples
if ax is None:
_, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.4, cmap="RdBu")
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[1])
return ax
# %% [markdown]
# Given the following candidate for the parameter `C`, find out what is the
# effect of the value of this parameter on the decision boundary and on the
# weights magnitude.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
Cs = [0.01, 0.1, 1, 10]
logistic_regression = make_pipeline(
StandardScaler(), LogisticRegression(penalty="l2"))
# %%
# Write your code here.
| 31.041237 | 77 | 0.732315 |
968f225fb15828f32f42f6dddb8922c14773a89d
| 2,021 |
py
|
Python
|
backend/app/schemas/user.py
|
MyjJr/amechii
|
8f68acc568f252e491115b8b1ff468b2fcb4dc92
|
[
"MIT"
] | 1 |
2021-08-31T15:06:59.000Z
|
2021-08-31T15:06:59.000Z
|
backend/app/schemas/user.py
|
MyjJr/Amechii
|
8f68acc568f252e491115b8b1ff468b2fcb4dc92
|
[
"MIT"
] | 8 |
2021-09-03T02:02:45.000Z
|
2021-09-06T05:11:58.000Z
|
backend/app/schemas/user.py
|
MyjJr/amechii
|
8f68acc568f252e491115b8b1ff468b2fcb4dc92
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Optional # , TYPE_CHECKING, Dict
from pydantic import BaseModel
from app.schemas.transaction import Transaction
from app.schemas.address import Address
from app.schemas.item import Item
class UserBase(BaseModel):
display_name: Optional[str] = None
icon: Optional[str] = None
class Config:
orm_mode = True
class UserCreate(UserBase):
name: str
icon: str = "default.png"
password: str
class UserUpdate(UserBase):
password: Optional[str] = None
class UserInDB(UserBase):
id: Optional[int] = None
name: Optional[str] = None
registration_time: Optional[datetime] = None
class UserLogin(BaseModel):
name: str
password: str
class Follow(BaseModel):
user_id: int
following: int
class Config:
orm_mode = True
class Following(BaseModel):
following: Optional[List[Follow]] = None
class Config:
orm_mode = True
class TaskBase(BaseModel):
title: Optional[str] = None
deadline: Optional[datetime] = None
back_money: Optional[bool] = None
class FavouriteCreate(BaseModel):
user_id: int
item_id: int
class Config:
orm_mode = True
class FavouriteUpdate(FavouriteCreate):
pass
class Favourite(BaseModel):
items: Optional[Item] = None
class Config:
orm_mode = True
class User(UserInDB):
id: Optional[int] = None
name: Optional[str] = None
balance: int
registration_time: Optional[datetime] = None
following: List[UserInDB] = []
followers: List[UserInDB] = []
favourites: Optional[List[Favourite]] = None
class Config:
orm_mode = True
class UserInfo(User):
from app.schemas.task import TaskRes, SetTaskRes
do_tasks: List[TaskRes] = []
set_tasks: List[SetTaskRes] = []
transactions: Optional[List[Transaction]] = None
address: Optional[List[Address]] = None
favourites: Optional[List[Favourite]] = None
class Config:
orm_mode = True
| 20.009901 | 58 | 0.680356 |
7e75de7a315acc5267cc57beeb7b91f7de5c251a
| 1,914 |
py
|
Python
|
benchmarking/utils/subprocess_with_logger.py
|
pritamdamania87/FAI-PEP
|
f5ab2272a435edd75beead651d043d4ad1e6e4d9
|
[
"Apache-2.0"
] | null | null | null |
benchmarking/utils/subprocess_with_logger.py
|
pritamdamania87/FAI-PEP
|
f5ab2272a435edd75beead651d043d4ad1e6e4d9
|
[
"Apache-2.0"
] | null | null | null |
benchmarking/utils/subprocess_with_logger.py
|
pritamdamania87/FAI-PEP
|
f5ab2272a435edd75beead651d043d4ad1e6e4d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
from .custom_logger import getLogger
from .utilities import setRunFailure
def processRun(*args, **kwargs):
getLogger().info("Running: %s", ' '.join(*args))
err_output = None
try:
log_output = False
if "log_output" in kwargs:
log_output = kwargs["log_output"]
del kwargs["log_output"]
output = None
if "non_blocking" in kwargs and kwargs["non_blocking"]:
subprocess.Popen(*args)
return "", None
else:
output_raw = subprocess.check_output(*args,
stderr=subprocess.STDOUT,
**kwargs)
# without the decode/encode the string cannot be printed out
output = output_raw.decode("utf-8", "ignore")
if log_output:
getLogger().info(output)
return output, None
except subprocess.CalledProcessError as e:
err_output = e.output.decode("utf-8", "ignore")
getLogger().error("Command failed: {}".format(err_output))
except Exception:
getLogger().error("Unknown exception {}: {}".format(sys.exc_info()[0],
' '.join(*args)))
err_output = "{}".format(sys.exc_info()[0])
setRunFailure()
return None, err_output
| 37.529412 | 78 | 0.549112 |
f3982f9232a52a29897ac3c810052fd7a58b983b
| 5,016 |
py
|
Python
|
scout/commands/delete/delete_command.py
|
CHRUdeLille/scout
|
0f70bec32e078d1825ebf20237f4a4979585dffb
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/delete/delete_command.py
|
CHRUdeLille/scout
|
0f70bec32e078d1825ebf20237f4a4979585dffb
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/delete/delete_command.py
|
CHRUdeLille/scout
|
0f70bec32e078d1825ebf20237f4a4979585dffb
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import click
log = logging.getLogger(__name__)
@click.command('panel', short_help='Delete a gene panel')
@click.option('--panel-id',
help="The panel identifier name",
required=True
)
@click.option('-v', '--version',
type=float,
)
@click.pass_context
def panel(context, panel_id, version):
"""Delete a version of a gene panel or all versions of a gene panel"""
log.info("Running scout delete panel")
adapter = context.obj['adapter']
panel_objs = adapter.gene_panels(panel_id=panel_id, version=version)
if panel_objs.count() == 0:
log.info("No panels found")
for panel_obj in panel_objs:
adapter.delete_panel(panel_obj)
# @click.command('users', short_help='Display users')
# @click.pass_context
# def users(context):
# """Show all users in the database"""
# log.info("Running scout view users")
# adapter = context.obj['adapter']
#
# ## TODO add a User interface to the adapter
# for user_obj in User.objects():
# click.echo(user_obj['name'])
#
# @click.command('institutes', short_help='Display institutes')
# @click.pass_context
# def institutes(context):
# """Show all institutes in the database"""
# log.info("Running scout view institutes")
# adapter = context.obj['adapter']
#
# for institute_obj in adapter.institutes():
# click.echo(institute_obj['internal_id'])
@click.command('index', short_help='Delete all indexes')
@click.pass_context
def index(context):
"""Delete all indexes in the database"""
log.info("Running scout delete index")
adapter = context.obj['adapter']
for collection in adapter.db.collection_names():
adapter.db[collection].drop_indexes()
log.info("All indexes deleted")
@click.command('user', short_help='Delete a user')
@click.option('-m', '--mail', required=True)
@click.pass_context
def user(context, mail):
"""Delete a user from the database"""
log.info("Running scout delete user")
adapter = context.obj['adapter']
user_obj = adapter.user(mail)
if not user_obj:
log.warning("User {0} could not be found in database".format(mail))
else:
adapter.delete_user(mail)
@click.command('genes', short_help='Delete genes')
@click.option('-b', 'build', type=click.Choice(['37', '38']))
@click.pass_context
def genes(context, build):
"""Delete all genes in the database"""
log.info("Running scout delete genes")
adapter = context.obj['adapter']
if build:
log.info("Dropping genes collection for build: %s", build)
else:
log.info("Dropping genes collection")
adapter.drop_genes()
@click.command('case', short_help='Delete a case')
@click.option('-i', '--institute', help='institute id of related cases')
@click.option('-c', '--case-id')
@click.option('-d', '--display-name')
@click.pass_context
def case(context, institute, case_id, display_name):
"""Delete a case and it's variants from the database"""
adapter = context.obj['adapter']
if not (case_id or display_name):
click.echo("Please specify what case to delete")
context.abort()
if display_name:
if not institute:
click.echo("Please specify the owner of the case that should be "
"deleted with flag '-i/--institute'.")
context.abort()
case_id = "{0}-{1}".format(institute, display_name)
log.info("Running deleting case {0}".format(case_id))
case = adapter.delete_case(
case_id=case_id,
institute_id=institute,
display_name=display_name
)
if case.deleted_count == 1:
adapter.delete_variants(case_id=case_id, variant_type='clinical')
adapter.delete_variants(case_id=case_id, variant_type='research')
else:
log.warning("Case does not exist in database")
context.abort()
# @click.command('diseases', short_help='Display all diseases')
# @click.pass_context
# def diseases(context):
# """Show all diseases in the database"""
# log.info("Running scout view diseases")
# adapter = context.obj['adapter']
#
# click.echo("Disease")
# for disease_obj in adapter.disease_terms():
# click.echo("{0}:{1}".format(
# disease_obj['source'],
# disease_obj['disease_id'],
# ))
#
# @click.command('hpo', short_help='Display all hpo terms')
# @click.pass_context
# def hpo(context):
# """Show all hpo terms in the database"""
# log.info("Running scout view hpo")
# adapter = context.obj['adapter']
#
# click.echo("hpo_id\tdescription")
# for hpo_obj in adapter.hpo_terms():
# click.echo("{0}\t{1}".format(
# hpo_obj.hpo_id,
# hpo_obj.description,
# ))
@click.group()
@click.pass_context
def delete(context):
"""
Delete objects from the database.
"""
pass
delete.add_command(genes)
delete.add_command(case)
delete.add_command(user)
delete.add_command(index)
delete.add_command(panel)
| 30.035928 | 77 | 0.653509 |
ea04448f6920da1ddec70650bf7801f74f6efd46
| 1,496 |
py
|
Python
|
docs/source/auto_examples/image/compute_smooth.py
|
theislab/squidpy_notebooks
|
371ddeab15c26140d74da531ce7e63eda4d5ea89
|
[
"MIT"
] | 9 |
2021-02-16T08:22:42.000Z
|
2022-02-21T05:49:33.000Z
|
docs/source/auto_examples/image/compute_smooth.py
|
theislab/squidpy_notebooks
|
371ddeab15c26140d74da531ce7e63eda4d5ea89
|
[
"MIT"
] | 32 |
2021-02-14T17:10:40.000Z
|
2022-03-09T20:52:26.000Z
|
docs/source/auto_examples/image/compute_smooth.py
|
theislab/squidpy_notebooks
|
371ddeab15c26140d74da531ce7e63eda4d5ea89
|
[
"MIT"
] | 3 |
2021-04-01T17:10:27.000Z
|
2022-02-09T12:54:14.000Z
|
#!/usr/bin/env python
r"""
Smooth an image
---------------
This example shows how to use :func:`squidpy.im.process` to smooth an image layer of :class:`squidpy.im.ImageContainer`.
We use the argument ``method="smooth"`` to smooth the image.
This calls :func:`skimage.filters.gaussian` in the background.
Keyword arguments ``kwargs`` are passed to the wrapped function.
This allows us to set the width of the Gaussian kernel, :math:`\\sigma`, used for smoothing.
.. seealso::
- :ref:`sphx_glr_auto_examples_image_compute_gray.py`.
- :ref:`sphx_glr_auto_examples_image_compute_process_hires.py`.
"""
import squidpy as sq
import matplotlib.pyplot as plt
# load the H&E stained tissue image
img = sq.datasets.visium_hne_image_crop()
###############################################################################
# Smooth the image with ``sigma = 2``.
# With the argument ``layer`` we can select the image layer that should be processed.
# By default, the resulting image is saved in the layer ``image_smooth``.
# This behavior can be changed with the arguments ``copy`` and ``layer_added``.
sq.im.process(img, layer="image", method="smooth", sigma=2)
###############################################################################
# Now we can look at the result on a cropped part of the image.
crop = img.crop_corner(0, 0, size=200)
fig, axes = plt.subplots(1, 2)
for i, layer in enumerate(["image", "image_smooth"]):
crop.show(layer, ax=axes[i])
axes[i].set_title(layer)
| 36.487805 | 120 | 0.644385 |
4b5103570b4da681b37a6c63586f59a6c9540d0d
| 4,098 |
py
|
Python
|
app/study/filterCandlePattern.py
|
kyoungd/material-stock-finder-app
|
60b4a274ddb304ae8257f6a53a0d91b65975b649
|
[
"MIT"
] | null | null | null |
app/study/filterCandlePattern.py
|
kyoungd/material-stock-finder-app
|
60b4a274ddb304ae8257f6a53a0d91b65975b649
|
[
"MIT"
] | null | null | null |
app/study/filterCandlePattern.py
|
kyoungd/material-stock-finder-app
|
60b4a274ddb304ae8257f6a53a0d91b65975b649
|
[
"MIT"
] | 1 |
2022-03-26T06:50:59.000Z
|
2022-03-26T06:50:59.000Z
|
import pandas as pd
import numpy as np
import pandas as pd
import numpy as np
import talib
import os
from datetime import date
from util import AllStocks, StockAnalysis, EnvFile
import logging
class engulfingCandle:
def __init__(self, df, minChangePercent, minChangeValue):
data = df.loc[0:4]
self.data = data[::-1]
minCalcPrice = self.data.iloc[0].Close * minChangePercent
self.minValue = minChangeValue if minCalcPrice < minChangeValue else minCalcPrice
def CDLENGULFING(self, df):
res = talib.CDLENGULFING(
df.Open.values, df.High.values, df.Low.values, df.Close.values)
return res
def CDLENGULFING_MIN(self, df, result, minChange):
for index, row in df.iterrows():
if index > 0 and result[index] != 0:
change = abs(df.Open[index] - df.Close[index-1])
return True if change >= minChange else False
return False
def run(self):
step1 = self.CDLENGULFING(self.data)
result = self.CDLENGULFING_MIN(self.data, step1, self.minValue)
return result
class starCandle:
def __init__(self, symbol:str, df:pd.DataFrame):
data = df.loc[0:4]
self.data = data[::-1]
self.symbol = symbol
def CDLEVENINGDOJISTAR(self, df):
res = talib.CDLEVENINGDOJISTAR(
df.Open.values, df.High.values, df.Low.values, df.Close.values)
return res
def CDLEVENINGSTAR(self, df):
res = talib.CDLEVENINGSTAR(
df.Open.values, df.High.values, df.Low.values, df.Close.values)
return res
def CDLMORNINGDOJISTAR(self, df):
res = talib.CDLMORNINGDOJISTAR(
df.Open.values, df.High.values, df.Low.values, df.Close.values)
return res
def CDLMORNINGSTAR(self, df):
res = talib.CDLMORNINGSTAR(
df.Open.values, df.High.values, df.Low.values, df.Close.values)
return res
def run(self):
try:
step1 = self.CDLEVENINGDOJISTAR(self.data)
step2 = self.CDLEVENINGSTAR(self.data)
step3 = self.CDLMORNINGDOJISTAR(self.data)
step4 = self.CDLMORNINGSTAR(self.data)
return sum(step1) + sum(step2) + sum(step3) + sum(step4)
except Exception as e:
logging.error(f'filterCandlePattern.starPattern.run: {self.symbol} - {e}')
return 0
class FilterCandlePattern:
def __init__(self):
self.minEngulfingCandleChangePercent = float(
EnvFile.Get('MIN_ENGULFING_CANDLE_CHANGE_PERCENT', '0.03'))
self.minEngulfingCandleChangevalue = float(
EnvFile.Get('MIN_ENGULFING_CANDLE_CHANGE_VALUE', '0.2'))
self.sa = StockAnalysis()
self.data = self.sa.GetJson
def Run(self, symbol):
isLoaded, df = AllStocks.GetDailyStockData(symbol)
if isLoaded:
try:
filterEngulf = engulfingCandle(df, self.minEngulfingCandleChangePercent, self.minEngulfingCandleChangevalue)
filterStar = starCandle(symbol, df)
engulf = filterEngulf.run()
star = filterStar.run()
self.sa.UpdateFilter(self.data, symbol, 'engulf', engulf)
self.sa.UpdateFilter(self.data, symbol, 'st', True if star > 0 else False)
except Exception as e:
self.sa.UpdateFilter(self.data, symbol, 'engulf', False)
self.sa.UpdateFilter(self.data, symbol, 'st', False)
logging.error(f'filterCandlePattern.Run: {symbol} - {e}')
print(f'filterCandlePattern.Run: {symbol} - {e}')
return False
@staticmethod
def All():
filter = FilterCandlePattern()
AllStocks.Run(filter.Run, False)
filter.sa.WriteJson(filter.data)
@staticmethod
def Test():
symbol = 'TSLA'
isLoaded, df = AllStocks.GetDailyStockData(symbol)
if isLoaded:
filter = starCandle(symbol, df)
result = filter.run()
print(result)
if __name__ == '__main__':
pass
| 35.327586 | 124 | 0.617862 |
7dfceffa99fee0d71c65e04b36df493ce2c79d8f
| 3,804 |
py
|
Python
|
src/generate.py
|
kljensen/uBlock-Origin-dev-filter
|
40eaca8c8b8b93d309f7a0c0659021d32d5f6f2e
|
[
"Unlicense"
] | null | null | null |
src/generate.py
|
kljensen/uBlock-Origin-dev-filter
|
40eaca8c8b8b93d309f7a0c0659021d32d5f6f2e
|
[
"Unlicense"
] | null | null | null |
src/generate.py
|
kljensen/uBlock-Origin-dev-filter
|
40eaca8c8b8b93d309f7a0c0659021d32d5f6f2e
|
[
"Unlicense"
] | null | null | null |
from glob import glob
from pathlib import Path
LINE_SEP = "\n"
def to_css_attr(url):
return url.replace("*://", "").replace("*.", ".").replace("/*", "")
def to_google(url):
return f'google.*##.g:has(a[href*="{to_css_attr(url)}"])'
def to_duckduckgo(url):
return f'duckduckgo.*##.results > div:has(a[href*="{to_css_attr(url)}"])'
def to_brave(url):
return f'search.brave.com###results > div:has(a[href*="{to_css_attr(url)}"])'
def to_startpage(url):
return f'startpage.com##.w-gl__result:has(a[href*="{to_css_attr(url)}"])'
def main():
root_path = Path(__file__).parent.joinpath("../")
dist_path = root_path.joinpath("dist")
tmp_txt = dist_path.joinpath("tmp.txt")
g_all = dist_path.joinpath("google", "all.txt")
d_all = dist_path.joinpath("duckduckgo", "all.txt")
gd_all = dist_path.joinpath("google_duckduckgo", "all.txt")
b_all = dist_path.joinpath("brave", "all.txt")
sp_all = dist_path.joinpath("startpage", "all.txt")
for f in [g_all, d_all, gd_all, b_all, sp_all]:
f.parent.mkdir(parents=True, exist_ok=True)
with g_all.open("w") as g_all, \
d_all.open("w") as d_all, \
gd_all.open("w") as gd_all, \
b_all.open("w") as b_all, \
sp_all.open("w") as sp_all:
for file in root_path.joinpath("data").glob("*.txt"):
filename = file.name.split(".")[0]
# Sort and find duplicates
with file.open("r") as i, tmp_txt.open("w") as tmp:
already_in = set()
for line in i:
if line.startswith("!") or not line.strip():
tmp.write(line)
continue
url = line.strip()
if url in already_in:
print(f"Find duplicate: {url}. Skip!")
continue
else:
already_in.add(url)
tmp.write(line)
tmp_txt.replace(file)
with dist_path.joinpath("google", f"{filename}.txt").open("w") as g, \
dist_path.joinpath("duckduckgo", f"{filename}.txt").open("w") as d, \
dist_path.joinpath("google_duckduckgo", f"{filename}.txt").open("w") as gd, \
dist_path.joinpath("brave", f"{filename}.txt").open("w") as b, \
dist_path.joinpath("startpage", f"{filename}.txt").open("w") as sp, \
file.open("r") as i:
for line in i:
if line.startswith("!") or not line.strip():
continue
url = line.strip()
for f in [
g, g_all,
d, d_all,
gd, gd_all,
b, b_all,
sp, sp_all
]:
f.write(url + LINE_SEP)
url_google = to_google(url)
url_duckduckgo = to_duckduckgo(url)
url_brave = to_brave(url)
url_sp = to_startpage(url)
for f in [
g, g_all,
gd, gd_all
]:
f.write(url_google + LINE_SEP)
for f in [
d, d_all,
gd, gd_all
]:
f.write(url_duckduckgo + LINE_SEP)
for f in [
b, b_all
]:
f.write(url_brave + LINE_SEP)
for f in [
sp, sp_all
]:
f.write(url_sp + LINE_SEP)
if __name__ == "__main__":
main()
| 36.576923 | 93 | 0.458728 |
f5024800a9643d7bb8baf624cf5b3d19b143b99f
| 5,976 |
py
|
Python
|
ivy/functional/backends/tensorflow/linear_algebra.py
|
mehtamohit013/ivy
|
588621c8f607d7771e7f23c7363bf4b106bbd7ab
|
[
"Apache-2.0"
] | 1 |
2022-02-28T19:06:12.000Z
|
2022-02-28T19:06:12.000Z
|
ivy/functional/backends/tensorflow/linear_algebra.py
|
thecoder12/ivy
|
84c5fb82ec43c5c7d0154d5110973805e524831c
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/tensorflow/linear_algebra.py
|
thecoder12/ivy
|
84c5fb82ec43c5c7d0154d5110973805e524831c
|
[
"Apache-2.0"
] | null | null | null |
# global
import tensorflow as tf
from tensorflow.python.types.core import Tensor
from typing import Union, Optional, Tuple, Literal
from collections import namedtuple
# local
from ivy import inf
import ivy
from collections import namedtuple
# Array API Standard #
# -------------------#
inv = tf.linalg.inv
pinv = tf.linalg.pinv
cholesky = tf.linalg.cholesky
def matrix_transpose(x: Tensor)\
-> Tensor:
return tf.experimental.numpy.swapaxes(x, -1, -2)
# noinspection PyUnusedLocal,PyShadowingBuiltins
def vector_norm(x: Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, - inf]] = 2)\
-> Tensor:
if ord == -float('inf'):
tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)
elif ord == -1:
tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)
elif ord == 0:
tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()
else:
tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)
if tn_normalized_vector.shape == tuple():
return tf.expand_dims(tn_normalized_vector, 0)
return tn_normalized_vector
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = (-2, -1) if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
if p == -float('inf'):
ret = tf.reduce_min(tf.reduce_sum(tf.abs(x), axis=axes[1], keepdims=True), axis=axes)
elif p == -1:
ret = tf.reduce_min(tf.reduce_sum(tf.abs(x), axis=axes[0], keepdims=True), axis=axes)
else:
ret = tf.linalg.norm(x, p, axes, keepdims)
if ret.shape == ():
return tf.expand_dims(ret, 0)
return ret
# noinspection PyPep8Naming
def svd(x:Tensor,full_matrices: bool = True) -> Union[Tensor, Tuple[Tensor,...]]:
results=namedtuple("svd", "U S Vh")
batch_shape = tf.shape(x)[:-2]
num_batch_dims = len(batch_shape)
transpose_dims = list(range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]
D, U, V = tf.linalg.svd(x,full_matrices=full_matrices)
VT = tf.transpose(V, transpose_dims)
res=results(U, D, VT)
return res
def diagonal(x: tf.Tensor,
offset: int = 0,
axis1: int = -2,
axis2: int = -1) -> tf.Tensor:
return tf.experimental.numpy.diagonal(x, offset, axis1=axis1, axis2=axis2)
def qr(x: tf.Tensor,
mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):
res = namedtuple('qr', ['Q', 'R'])
if mode == 'reduced':
q, r = tf.linalg.qr(x, full_matrices=False)
return res(q, r)
elif mode == 'complete':
q, r = tf.linalg.qr(x, full_matrices=True)
return res(q, r)
else:
raise Exception("Only 'reduced' and 'complete' qr modes are allowed for the tensorflow backend.")
def matmul(x1: tf.Tensor,
x2: tf.Tensor) -> tf.Tensor:
dtype_from = tf.experimental.numpy.promote_types(x1.dtype.as_numpy_dtype, x2.dtype.as_numpy_dtype)
dtype_from = tf.as_dtype(dtype_from)
if dtype_from.is_unsigned or dtype_from==tf.int8 or dtype_from==tf.int16:
x1 = tf.cast(x1, tf.int64)
x2 = tf.cast(x2, tf.int64)
if x1.dtype != x2.dtype:
x1 = tf.cast(x1, dtype_from)
x2 = tf.cast(x2, dtype_from)
if (x1.shape == () or x2.shape == ()
or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)
or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)
or (len(x1.shape) == 1 and len(x2.shape) >= 2 and x1.shape[0] != x2.shape[-2])
or (len(x2.shape) == 1 and len(x1.shape) >= 2 and x2.shape[0] != x1.shape[-1])
or (len(x1.shape) >= 2 and len(x2.shape) >= 2 and x1.shape[-1] != x2.shape[-2])):
raise Exception('Error,shapes not compatible')
if len(x1.shape) == len(x2.shape) == 1:
if x1.shape == 0:
ret = tf.constant(0)
else:
ret = tf.math.multiply(x1, x2)[0]
ret = tf.cast(ret, dtype=dtype_from)
return ret
x1_padded = False
x1_padded_2 = False
x2_padded = False
if len(x1.shape) == 1:
if len(x2.shape) == 2:
x1_padded_2 = True
elif len(x2.shape) > 2:
x1_padded = True
x1 = tf.expand_dims(x1, axis=0)
elif len(x2.shape) == 1 and len(x1.shape) >= 2:
x2 = tf.expand_dims(x2, axis=1)
x2_padded = True
ret = tf.matmul(x1, x2)
ret = tf.cast(ret, dtype=dtype_from)
if x1_padded_2:
return ret[0]
elif x1_padded:
return tf.squeeze(ret, axis=-2)
elif x2_padded:
return tf.squeeze(ret, axis=-1)
return ret
def svdvals(x: tf.Tensor) -> tf.Tensor:
return tf.linalg.svd(x, compute_uv=False)
def slogdet(x:Union[ivy.Array,ivy.NativeArray],full_matrices: bool = True) -> Union[ivy.Array, Tuple[ivy.Array,...]]:
results = namedtuple("slogdet", "sign logabsdet")
sign, logabsdet = tf.linalg.slogdet(x)
res = results(sign, logabsdet)
return res
def trace(x: tf.Tensor,
offset: int = 0)\
-> tf.Tensor:
return tf.trace(x, offset)
# Extra #
# ------#
def vector_to_skew_symmetric_matrix(vector: Tensor)\
-> Tensor:
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = tf.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = tf.zeros(batch_shape + [1, 1])
# BS x 1 x 3
row1 = tf.concat((zs, -a3s, a2s), -1)
row2 = tf.concat((a3s, zs, -a1s), -1)
row3 = tf.concat((-a2s, a1s, zs), -1)
# BS x 3 x 3
return tf.concat((row1, row2, row3), -2)
| 31.287958 | 117 | 0.59421 |
a27a5f5e65e08460827a70ededfdb76a1b562fcf
| 2,420 |
py
|
Python
|
Python/Alphabet_rangoli.py
|
sarathsdev/Hackerrank-Codes
|
87bcafae5d11af0bf5a27c9f68e2cfc095943517
|
[
"MIT"
] | null | null | null |
Python/Alphabet_rangoli.py
|
sarathsdev/Hackerrank-Codes
|
87bcafae5d11af0bf5a27c9f68e2cfc095943517
|
[
"MIT"
] | null | null | null |
Python/Alphabet_rangoli.py
|
sarathsdev/Hackerrank-Codes
|
87bcafae5d11af0bf5a27c9f68e2cfc095943517
|
[
"MIT"
] | null | null | null |
Problem: You are given an integer N . Your task is to print an alphabet rangoli of size N . (Rangoli is a form of Indian folk art based on creation of patterns.)
#size 3
----c----
--c-b-c--
c-b-a-b-c
--c-b-c--
----c----
#size 5
--------e--------
------e-d-e------
----e-d-c-d-e----
--e-d-c-b-c-d-e--
e-d-c-b-a-b-c-d-e
--e-d-c-b-c-d-e--
----e-d-c-d-e----
------e-d-e------
--------e--------
#size 10
------------------j------------------
----------------j-i-j----------------
--------------j-i-h-i-j--------------
------------j-i-h-g-h-i-j------------
----------j-i-h-g-f-g-h-i-j----------
--------j-i-h-g-f-e-f-g-h-i-j--------
------j-i-h-g-f-e-d-e-f-g-h-i-j------
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
------j-i-h-g-f-e-d-e-f-g-h-i-j------
--------j-i-h-g-f-e-f-g-h-i-j--------
----------j-i-h-g-f-g-h-i-j----------
------------j-i-h-g-h-i-j------------
--------------j-i-h-i-j--------------
----------------j-i-j----------------
------------------j------------------
#Input Format
Only one line of input containing N, the size of the rangoli.
#Constraints
0 < N < 27
#Output Format
Print the alphabet rangoli in the format explained above.
#Sample input
5
#Sample Output
--------e--------
------e-d-e------
----e-d-c-d-e----
--e-d-c-b-c-d-e--
e-d-c-b-a-b-c-d-e
--e-d-c-b-c-d-e--
----e-d-c-d-e----
------e-d-e------
--------e--------
#Implimentaion in python using strings
n = int(input().strip())
w = (n-1) * 2 + ((n * 2) - 1)
#above halfside
for i in range(1,n,1):
number_of_letter = (i*2) - 1
s = ''
letter_value = 97 + n - 1
for i in range(0,number_of_letter):
if(i != 0):
s += '-'
s += chr(letter_value)
if(i<(number_of_letter-1) / 2):
letter_value = letter_value - 1
else:
letter_value = letter_value + 1
print(s.center(w,'-'))
#below halfside
for i in range(n,0,-1):
number_of_letter = (i*2) - 1
s = ''
letter_value = 97 + n - 1
for i in range(0,number_of_letter):
if(i != 0):
s += '-'
s += chr(letter_value)
if(i<(number_of_letter-1) / 2):
letter_value = letter_value - 1
else:
letter_value = letter_value + 1
print(s.center(w,'-'))
| 23.960396 | 161 | 0.42438 |
d95a5bede55b463982b64db265c15da8a489c09a
| 1,485 |
py
|
Python
|
scenario_player/services/rpc/schemas/instances.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
scenario_player/services/rpc/schemas/instances.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
scenario_player/services/rpc/schemas/instances.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
from marshmallow.exceptions import ValidationError
from marshmallow.fields import String, Url
from scenario_player.constants import GAS_STRATEGIES
from scenario_player.services.common.schemas import BytesField, SPSchema
from scenario_player.services.rpc.schemas.base import RPCClientID, RPCCreateResourceSchema
class GasPrice(String):
def _deserialize(self, value, attr, data, **kwargs):
deserialzed = super(GasPrice, self)._deserialize(value, attr, data, **kwargs)
try:
return int(deserialzed)
except ValueError:
key = deserialzed.upper()
if key in GAS_STRATEGIES:
return key
raise ValidationError(f"{value} - not an int-string or known gas price strategy!")
class CreateClientSchema(SPSchema):
"""POST /rpc/client
load-only parameters:
- chain_url (:class:`Url`)
- privkey (:class:`BytesField`)
- gas_price (str)
dump-only parameters:
- client_id (:class:`RPCClientID`)
"""
# Deserialization fields.
chain_url = Url(required=True, load_only=True)
privkey = BytesField(required=True, load_only=True)
gas_price = GasPrice(required=False, load_only=True, missing="FAST")
# Serialization fields.
client_id = RPCClientID(required=True, dump_only=True)
class DeleteInstanceRequest(RPCCreateResourceSchema):
"""DELETE /rpc/client
load-only parameters:
- client_id (:class:`RPCClientID`)
"""
| 29.117647 | 94 | 0.688889 |
fbcfce854d5f46a6c219661def94a18101ef4854
| 2,943 |
py
|
Python
|
capablerobot_usbhub/registers_main.py
|
mattvenn/CapableRobot_USBHub_Driver
|
19a2eefdbb8afc7e3ebe3277f15a872b509a494d
|
[
"MIT"
] | 16 |
2019-07-01T23:47:22.000Z
|
2022-02-14T21:16:33.000Z
|
capablerobot_usbhub/registers/main.py
|
d-c-d/CapableRobot_USBHub_Driver
|
27579ac028bc2e71ce94983c7183d18fc82422a4
|
[
"MIT"
] | 2 |
2020-01-08T08:30:39.000Z
|
2022-02-23T00:49:09.000Z
|
capablerobot_usbhub/registers/main.py
|
d-c-d/CapableRobot_USBHub_Driver
|
27579ac028bc2e71ce94983c7183d18fc82422a4
|
[
"MIT"
] | 6 |
2020-01-07T15:37:23.000Z
|
2022-02-07T08:25:36.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Chris Osterwood for Capable Robot Components
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from construct import *
main_revision = BitStruct(
"device_id" / BitsInteger(16),
Padding(8),
"revision_id" / BitsInteger(8),
)
main_vendor_id = BitStruct(
"value" / BitsInteger(16),
)
main_product_id = BitStruct(
"value" / BitsInteger(16),
)
main_device_id = BitStruct(
"value" / BitsInteger(16),
)
main_hub_configuration = BitStruct(
"self_power" / BitsInteger(1),
"vsm_disable" / BitsInteger(1),
"hs_disable" / BitsInteger(1),
"mtt_enable" / BitsInteger(1),
"eop_disable" / BitsInteger(1),
"current_sense" / BitsInteger(2),
"port_power" / BitsInteger(1),
Padding(2),
"oc_timer" / BitsInteger(2),
"compound" / BitsInteger(1),
Padding(3),
Padding(4),
"prtmap_enable" / BitsInteger(1),
Padding(2),
"string_enable" / BitsInteger(1),
)
main_hub_configuration_1 = BitStruct(
"self_power" / BitsInteger(1),
"vsm_disable" / BitsInteger(1),
"hs_disable" / BitsInteger(1),
"mtt_enable" / BitsInteger(1),
"eop_disable" / BitsInteger(1),
"current_sense" / BitsInteger(2),
"port_power" / BitsInteger(1),
)
main_hub_configuration_2 = BitStruct(
Padding(2),
"oc_timer" / BitsInteger(2),
"compound" / BitsInteger(1),
Padding(3),
)
main_hub_configuration_3 = BitStruct(
Padding(4),
"prtmap_enable" / BitsInteger(1),
Padding(2),
"string_enable" / BitsInteger(1),
)
main_port_swap = BitStruct(
Padding(3),
"port4" / BitsInteger(1),
"port3" / BitsInteger(1),
"port2" / BitsInteger(1),
"port1" / BitsInteger(1),
"port0" / BitsInteger(1),
)
main_hub_control = BitStruct(
Padding(6),
"lpm_disable" / BitsInteger(1),
"reset" / BitsInteger(1),
)
main_suspend = BitStruct(
Padding(7),
"suspend" / BitsInteger(1),
)
| 27.764151 | 79 | 0.69385 |
1ba5a7a00768d4eb21c00fd65c3685b631a2b758
| 1,009 |
py
|
Python
|
examples/building_custom_algorithms/mean_rating_user_item.py
|
PGBI/Surprise
|
76e47037675afc6c0fb017490a88d1b2b2dff0f7
|
[
"BSD-3-Clause"
] | 5,572 |
2016-11-24T08:21:53.000Z
|
2022-03-31T20:35:00.000Z
|
examples/building_custom_algorithms/mean_rating_user_item.py
|
daihui-lu/Surprise
|
46b9914995e6c8c7d227b46f2eaeef2d4600580f
|
[
"BSD-3-Clause"
] | 393 |
2016-11-22T12:48:00.000Z
|
2022-03-26T15:09:53.000Z
|
examples/building_custom_algorithms/mean_rating_user_item.py
|
daihui-lu/Surprise
|
46b9914995e6c8c7d227b46f2eaeef2d4600580f
|
[
"BSD-3-Clause"
] | 1,096 |
2016-12-08T22:01:57.000Z
|
2022-03-29T03:55:54.000Z
|
"""
This module descibes how to build your own prediction algorithm. Please refer
to User Guide for more insight.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from surprise import AlgoBase
from surprise import Dataset
from surprise.model_selection import cross_validate
class MyOwnAlgorithm(AlgoBase):
def __init__(self):
# Always call base method before doing anything.
AlgoBase.__init__(self)
def estimate(self, u, i):
sum_means = self.trainset.global_mean
div = 1
if self.trainset.knows_user(u):
sum_means += np.mean([r for (_, r) in self.trainset.ur[u]])
div += 1
if self.trainset.knows_item(i):
sum_means += np.mean([r for (_, r) in self.trainset.ir[i]])
div += 1
return sum_means / div
data = Dataset.load_builtin('ml-100k')
algo = MyOwnAlgorithm()
cross_validate(algo, data, verbose=True)
| 24.02381 | 77 | 0.656095 |
5bf500fed97423e4b1e1d91c5f47dc80960c4f5b
| 6,247 |
py
|
Python
|
CS_ex4.py
|
MomenK/WIDC-method
|
1c49e56fea88bbb5a563a53c3fa1b4c77172b908
|
[
"BSD-3-Clause"
] | null | null | null |
CS_ex4.py
|
MomenK/WIDC-method
|
1c49e56fea88bbb5a563a53c3fa1b4c77172b908
|
[
"BSD-3-Clause"
] | null | null | null |
CS_ex4.py
|
MomenK/WIDC-method
|
1c49e56fea88bbb5a563a53c3fa1b4c77172b908
|
[
"BSD-3-Clause"
] | null | null | null |
from pylbfgs import owlqn
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct, idct
from cosamp_fn import cosamp
import cvxpy as cvx
from os import listdir
from scipy.signal import hilbert, chirp
from scipy import signal
from scipy import interpolate
def dct2(x):
return dct(dct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)
def idct2(x):
return idct(idct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)
def evaluate(x, g, step):
"""An in-memory evaluation callback."""
# we want to return two things:
# (1) the norm squared of the residuals, sum((Ax-b).^2), and
# (2) the gradient 2*A'(Ax-b)
# expand x columns-first
x2 = x.reshape((nx, ny)).T
# Ax is just the inverse 2D dct of x2
Ax2 = idct2(x2)
# stack columns and extract samples
Ax = Ax2.T.flat[perm].reshape(y.shape)
# calculate the residual Ax-b and its 2-norm squared
Axb = Ax - y
fx = np.sum(np.power(Axb, 2))
# project residual vector (k x 1) onto blank image (ny x nx)
Axb2 = np.zeros(x2.shape)
Axb2.T.flat[perm] = Axb # fill columns-first
# A'(Ax-b) is just the 2D dct of Axb2
AtAxb2 = 2 * dct2(Axb2)
AtAxb = AtAxb2.T.reshape(x.shape) # stack columns
# copy over the gradient vector
np.copyto(g, AtAxb)
return fx
######################################################################################
######################################################################################
def capture(folder,index):
RFPath = folder+'/M_RFArrays/'
TimePath = folder+'/M_Arrays/'
files = listdir(RFPath)
timeFiles = ['T'+file for file in files]
print(files,timeFiles)
return RFPath + files[index],TimePath + timeFiles[index]
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def clean(X):
X = butter_highpass_filter(X.T,1*1e6,20*1e6,order =5).T
# X = butter_lowpass_filter( X.T,8*1e6,20*1e6,order =5).T
img = hilbert(X.T).T
# img = hilbert(X)
img= img/np.amax(img)
img = np.abs(img)
img = 20*np.log10(img)
return img
def corrr(x,y,scale):
kind = 'quadratic'
xsize = x.shape[0]
xaxis = np.linspace(0,xsize,xsize)
upaxis = np.linspace(0,xsize,xsize*scale)
fx = interpolate.interp1d(xaxis, x,kind= kind)
fy = interpolate.interp1d(xaxis, y,kind= kind)
x = fx(upaxis)
y = fy(upaxis)
return signal.correlate(x,y,mode='same')
def diameter(X):
upsample = 10
[nx, ny] = X.shape
mx = int(nx/2)
corrMapTop = np.zeros((upsample*mx,ny))
corrMapBot = np.zeros((upsample*mx,ny))
some_index = int(ny/2)
sigTop = X[:,some_index][0:mx]
sigBot = X[:,some_index][mx:]
for i in range(ny):
sigTop1 = X[:,i][0:mx]
corrMapTop[:,i] = corrr(sigTop1,sigTop,upsample)
sigBot1 = X[:,i][mx:]
corrMapBot[:,i] = corrr(sigBot1,sigBot,upsample)
TopInd = np.argmax(corrMapTop,axis=0)/upsample
BotInd = mx + np.argmax(corrMapBot,axis=0)/upsample
return BotInd - TopInd
pass
######################################################################################
def repeat_random(k,ny,nx,C):
permCD = np.zeros((k,C))
perm = np.zeros((k,nx))
for j in range(C):
permCD[:,j] = np.random.choice(ny, k, replace=False)
offset = 0
for i in range(nx):
ii = int(i%C)
perm[:,i] = permCD[:,ii] + offset
offset = offset+ny
return perm.flat[:].astype(int)
######################################################################################
## Read Image file
file_name= 'Rabbit_Full/'+'Aor_M_20'
index = 0
XF,TF = capture(file_name,index)
X = np.load(XF)
X = X[500:800,:3000:2]
Time = np.load(TF)
######################################################################################
## Randomly samples the signal
# [nx,ny] = X.shape
[ny,nx] = X.shape
# create random sampling index vector
s = 0.4
# k = round(nx * ny * s)
# perm = np.random.choice(nx * ny, k, replace=False) # random sample of indices
# This bits consider uniform sampling! consider have a random rpeat rate of beat cycle
C = 20
k = round(ny * s)
perm = repeat_random(k,ny,nx,C)
######################################################################################
# take random samples of image, store them in a vector b
y = b = X.T.flat[perm].astype(float)
# create images of mask (for visualization)
Xm = 0 * np.ones(X.shape)
# Xm.T.flat[perm] = X.T.flat[perm]
Xm.T.flat[perm] = 255 * np.ones(perm.shape)
Xm.reshape((nx, ny)).T
# perform the L1 minimization in memory
Xat2 = owlqn(nx*ny, evaluate, None, 5)
# # transform the output back into the spatial domain
Xat = Xat2.reshape((nx, ny)).T # stack columns
Xrecon = idct2(Xat)
# print(X.shape)
# print(Xat.shape)
# print(Xrecon.shape)
######################################################################################
## Plot
fig,axes = plt.subplots(1,4)
axes = axes.reshape(-1)
image = axes[0].imshow(clean(X),aspect='auto',cmap='gray')
image.set_clim(vmin=-40, vmax= 0)
axes[0].title.set_text('Original Image (100 % sampling)')
image = axes[1].imshow(Xm,aspect='auto',cmap='gray')
# image.set_clim(vmin=-40, vmax= 0)
axes[1].title.set_text('Random sampling matrix (40 % sampling)')
image = axes[2].imshow(clean(Xrecon),aspect='auto',cmap='gray')
image.set_clim(vmin=-38, vmax= 0)
axes[2].title.set_text('Reconstructed Image (40 % sampling)')
axes[3].plot(diameter(X)[10:],'b')
axes[3].plot(diameter(Xrecon)[10:],'r')
axes[3].title.set_text('Diameter traces')
plt.show()
# # print(perm.shape)
| 27.04329 | 86 | 0.577397 |
00927b461f6adbe317807c4364610c7d61065aac
| 1,608 |
py
|
Python
|
src/config.py
|
plasma-chat/plasma-client
|
e8a59d1ffa158058afab40af5fc1e889a3e4d26c
|
[
"MIT"
] | null | null | null |
src/config.py
|
plasma-chat/plasma-client
|
e8a59d1ffa158058afab40af5fc1e889a3e4d26c
|
[
"MIT"
] | null | null | null |
src/config.py
|
plasma-chat/plasma-client
|
e8a59d1ffa158058afab40af5fc1e889a3e4d26c
|
[
"MIT"
] | null | null | null |
# Copyright 2022 iiPython
# Modules
import json
import socket
from typing import Any
from iipython import color, clear
# Configuration class
class Configuration(object):
def __init__(self) -> None:
self.data, self.prompted = {}, False
try:
with open("config.json", "r") as f:
self.data = json.loads(f.read())
except Exception:
pass
def get(self, key: str, prompt: str = None) -> Any:
if key not in self.data:
if prompt is not None:
clear()
self.prompted = True
self.data[key] = input(color(prompt))
return self.data.get(key)
def save(self) -> None:
with open("config.json", "w+") as f:
f.write(json.dumps(self.data, indent = 4))
def parse_address(self, addr: str) -> tuple:
if addr == ":":
host, port = "localhost", 42080
elif addr.count(":") > 1:
raise ValueError("address is invalid!")
elif ":" not in addr:
host, port = addr, 42080
else:
host, port = addr.split(":")
if not (host.strip() and port.strip()):
raise ValueError("address is invalid!")
# Convert port
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError("port is invalid!")
except ValueError:
raise ValueError("port is invalid!")
# Convert domain names
return socket.getaddrinfo(host, port)[0][4]
# Initialization
config = Configuration()
| 25.935484 | 55 | 0.536692 |
ea56a22e273b57e76b3229597ebf9cbdfe4618b9
| 1,326 |
py
|
Python
|
keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
|
nuxeh/keystone
|
ae61c5c081e1917213ef94e33dda43ae0c9c4b55
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
|
nuxeh/keystone
|
ae61c5c081e1917213ef94e33dda43ae0c9c4b55
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
|
nuxeh/keystone
|
ae61c5c081e1917213ef94e33dda43ae0c9c4b55
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
service_table = sql.Table('service', meta, autoload=True)
services = list(service_table.select().execute())
for service in services:
extra_dict = jsonutils.loads(service.extra)
# Skip records where service is not null
if extra_dict.get('name') is not None:
continue
# Default the name to empty string
extra_dict['name'] = ''
new_values = {
'extra': jsonutils.dumps(extra_dict),
}
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)
| 35.837838 | 75 | 0.69457 |
d7437b46c0ef992227ee682711909c21b2a469d8
| 3,979 |
py
|
Python
|
autotest/gdrivers/blx.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/blx.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/blx.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test BLX support.
# Author: Even Rouault < even dot rouault @ mines-paris dot org >
#
###############################################################################
# Copyright (c) 2008, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Test reading a little-endian BLX
def blx_1():
prj = 'WGS84'
gt = [ 20.0004166, 0.0008333, 0.0, 50.0004166, 0.0, -0.0008333 ]
tst = gdaltest.GDALTest( 'BLX', 's4103.blx', 1, 47024 )
return tst.testOpen( check_prj = prj, check_gt = gt )
###############################################################################
# Test reading a big-endian BLX
def blx_2():
prj = 'WGS84'
gt = [ 20.0004166, 0.0008333, 0.0, 50.0004166, 0.0, -0.0008333 ]
tst = gdaltest.GDALTest( 'BLX', 's4103.xlb', 1, 47024 )
return tst.testOpen( check_prj = prj, check_gt = gt )
###############################################################################
# Test writing a little-endian BLX
def blx_3():
tst = gdaltest.GDALTest( 'BLX', 's4103.xlb', 1, 47024 )
return tst.testCreateCopy( check_gt = 1, check_srs = 1 )
###############################################################################
# Test writing a big-endian BLX
def blx_4():
tst = gdaltest.GDALTest( 'BLX', 's4103.blx', 1, 47024, options = [ 'BIGENDIAN=YES' ] )
return tst.testCreateCopy( check_gt = 1, check_srs = 1 )
###############################################################################
# Test overviews
def blx_5():
ds = gdal.Open( 'data/s4103.blx' )
band = ds.GetRasterBand(1)
if band.GetOverviewCount() != 4:
gdaltest.post_reason( 'did not get expected overview count' )
return 'fail'
cs = band.GetOverview(0).Checksum()
if cs != 42981:
gdaltest.post_reason( 'wrong overview checksum (%d)' % cs )
return 'fail'
cs = band.GetOverview(1).Checksum()
if cs != 61363:
gdaltest.post_reason( 'wrong overview checksum (%d)' % cs )
return 'fail'
cs = band.GetOverview(2).Checksum()
if cs != 48060:
gdaltest.post_reason( 'wrong overview checksum (%d)' % cs )
return 'fail'
cs = band.GetOverview(3).Checksum()
if cs != 12058:
gdaltest.post_reason( 'wrong overview checksum (%d)' % cs )
return 'fail'
return 'success'
gdaltest_list = [
blx_1,
blx_2,
blx_3,
blx_4,
blx_5
]
if __name__ == '__main__':
gdaltest.setup_run( 'blx' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 30.143939 | 90 | 0.562704 |
ba46c9eaf01e8f8fd5a1e5396a2312c06284e555
| 6,375 |
py
|
Python
|
gpytorch/gpytorch/kernels/drug_response_kernel.py
|
ltronneb/PIICM
|
1305232733920eef3843949ceafd53f3e19e2550
|
[
"MIT"
] | null | null | null |
gpytorch/gpytorch/kernels/drug_response_kernel.py
|
ltronneb/PIICM
|
1305232733920eef3843949ceafd53f3e19e2550
|
[
"MIT"
] | null | null | null |
gpytorch/gpytorch/kernels/drug_response_kernel.py
|
ltronneb/PIICM
|
1305232733920eef3843949ceafd53f3e19e2550
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import torch
from .kernel import Kernel
from .index_kernel import IndexKernel
from ..lazy import KroneckerProductLazyTensor, GPattKroneckerProductLazyTensor, lazify
from ..lazy.NotPSDNonLazyTensor import notpsdlazify
from ..lazy.permutation_lazy_tensor import PermutationLazyTensor
class DrugResponseKernel(Kernel):
"""
Implements the intrinsic coregionalization model (ICM) with or without encoded invariances
"""
def __init__(self, data_covar_module, num_combinations, num_cell_lines, symmetric=True, drug_rank=1,
cell_linerank=1,
task_covar_prior=None,
**kwargs):
super(DrugResponseKernel, self).__init__(**kwargs)
# Check for CUDA
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
if symmetric:
self.expanded_num_combinations = 2 * num_combinations
else:
self.expanded_num_combinations = num_combinations
self.num_cell_lines = num_cell_lines
self.drugcombo_covar_module = IndexKernel(
num_tasks=self.expanded_num_combinations, batch_shape=self.batch_shape, rank=drug_rank,
prior=task_covar_prior
)
self.cellline_covar_module = IndexKernel(
num_tasks=self.num_cell_lines, batch_shape=self.batch_shape, rank=cell_linerank,
prior=task_covar_prior
)
# If symmetric, set up permutation matrix
if symmetric:
interp_indices = torch.zeros((self.expanded_num_combinations, self.expanded_num_combinations),
dtype=torch.long, device=dev)
interp_values = torch.zeros((self.expanded_num_combinations, self.expanded_num_combinations),device=dev)
colcounter = 0
for i in range(num_combinations):
interp_indices[colcounter, colcounter] = i + num_combinations
interp_values[colcounter, colcounter] = 1
colcounter += 1
for i in range(num_combinations):
interp_indices[colcounter, colcounter] = i
interp_values[colcounter, colcounter] = 1
colcounter += 1
self.symmetric_indices = interp_indices
self.symmetric_values = interp_values
self.symmetric = symmetric
# And reflection matrix
self.reflection = torch.tensor([[0.0, 1.0], [1.0, 0.0]],device=dev)
else:
self.symmetric = False
self.data_covar_module = data_covar_module
self.num_combinations = num_combinations
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):
if last_dim_is_batch:
raise RuntimeError("MultitaskKernel does not accept the last_dim_is_batch argument.")
covar_drugcombo = self.drugcombo_covar_module.covar_matrix
covar_cellline = self.cellline_covar_module.covar_matrix
data_covar_matrix = self.data_covar_module.forward(x1, x2, **params)
covar_x = lazify(data_covar_matrix)
if self.symmetric:
# Ensure things are on correct device
device = x1.device
self.symmetric_indices = self.symmetric_indices.to(device)
self.symmetric_values = self.symmetric_values.to(device)
self.reflection = self.reflection.to(device)
# Make some copies
covar_drugcombo_t = covar_drugcombo.clone()
covar_drugcombo_tt = covar_drugcombo.clone()
covar_drugcombo_sym_row = PermutationLazyTensor(covar_drugcombo_t,
left_interp_indices=self.symmetric_indices,
left_interp_values=self.symmetric_values,
right_interp_indices=None,
right_interp_values=None)
covar_drugcombo_sym_total = PermutationLazyTensor(covar_drugcombo_tt,
left_interp_indices=self.symmetric_indices,
left_interp_values=self.symmetric_values,
right_interp_indices=self.symmetric_indices,
right_interp_values=self.symmetric_values)
if x1.shape[1] > 1: # For the 2-d case we flip the axis
data_covar_module_reflected = deepcopy(self.data_covar_module)
data_covar_matrix_reflected = data_covar_module_reflected.forward(x1.matmul(self.reflection),
x2, **params)
covar_x_reflected = notpsdlazify(data_covar_matrix_reflected)
kron_lt1 = KroneckerProductLazyTensor(covar_x, covar_cellline,
0.25 * covar_drugcombo + 0.25 * covar_drugcombo_sym_total)
kron_lt2 = KroneckerProductLazyTensor(covar_x_reflected, covar_cellline,
0.25 * covar_drugcombo_sym_row +
0.25 * covar_drugcombo_sym_row.t())
res = GPattKroneckerProductLazyTensor(kron_lt1) + GPattKroneckerProductLazyTensor(kron_lt2)
else:
covar_k = 0.25 * covar_drugcombo + 0.25 * covar_drugcombo_sym_row + \
0.25 * covar_drugcombo_sym_row.t() + 0.25 * covar_drugcombo_sym_total
kron_lt = KroneckerProductLazyTensor(covar_x, covar_k, covar_cellline)
res = GPattKroneckerProductLazyTensor(kron_lt)
else:
kron_lt = KroneckerProductLazyTensor(covar_x, covar_cellline, covar_drugcombo)
res = GPattKroneckerProductLazyTensor(kron_lt)
return res.diag() if diag else res
def num_outputs_per_input(self, x1, x2):
"""
Given `n` data points `x1` and `m` datapoints `x2`, this multitask
kernel returns an `(n*num_tasks) x (m*num_tasks)` covariance matrix.
"""
return (self.expanded_num_combinations * self.num_cell_lines)
| 53.125 | 116 | 0.599529 |
83a0118c84e0a362b93f673f52dc7cdc8b60c9bc
| 1,648 |
py
|
Python
|
examples/data/preprocessing/earthquake_data.py
|
maximlt/holoviz
|
9f86c3814928225864b9119eac357682faab5f3e
|
[
"BSD-3-Clause"
] | 207 |
2019-11-14T08:41:44.000Z
|
2022-03-31T11:26:18.000Z
|
examples/data/preprocessing/earthquake_data.py
|
maximlt/holoviz
|
9f86c3814928225864b9119eac357682faab5f3e
|
[
"BSD-3-Clause"
] | 74 |
2019-11-21T16:39:45.000Z
|
2022-02-15T16:46:51.000Z
|
examples/data/preprocessing/earthquake_data.py
|
maximlt/holoviz
|
9f86c3814928225864b9119eac357682faab5f3e
|
[
"BSD-3-Clause"
] | 36 |
2020-01-17T08:01:53.000Z
|
2022-03-11T01:33:47.000Z
|
import os
import pandas as pd
import calendar
import datetime as dt
import requests
URL = "https://earthquake.usgs.gov/fdsnws/event/1/query.csv?starttime={start}&endtime={end}&minmagnitude=2.0&orderby=time"
for yr in range(2000, 2019):
for m in range(1, 13):
if os.path.isfile('{yr}_{m}.csv'.format(yr=yr, m=m)):
continue
_, ed = calendar.monthrange(yr, m)
start = dt.datetime(yr, m, 1)
end = dt.datetime(yr, m, ed, 23, 59, 59)
with open('{yr}_{m}.csv'.format(yr=yr, m=m), 'w', encoding='utf-8') as f:
f.write(requests.get(URL.format(start=start, end=end)).content.decode('utf-8'))
dfs = []
for i in range(2000, 2019):
for m in range(1, 13):
if not os.path.isfile('%d_%d.csv' % (i, m)):
continue
df = pd.read_csv('%d_%d.csv' % (i, m), dtype={'nst': 'float64'})
dfs.append(df)
df = pd.concat(dfs, sort=True)
df.to_parquet('../earthquakes.parq', 'fastparquet')
# Reprojected, cleaned and gzip (not snappy)
# import numpy as np
# import pandas as pd
# from holoviews.util.transform import lon_lat_to_easting_northing
# df = pd.read_parquet('../data/earthquakes.parq')
# #df.time = df.time.astype('datetime64[ns]')
# cleaned_df = df.copy()
# cleaned_df['mag'] = df.mag.where(df.mag > 0)
# cleaned_df = cleaned_df.reset_index()
# x, y = lon_lat_to_easting_northing(cleaned_df.longitude, cleaned_df.latitude)
# cleaned_projected = cleaned_df.join([pd.DataFrame({'easting': x}), pd.DataFrame({'northing': y})])
# cleaned_projected.to_parquet('../data/earthquakes-projected.parq', 'fastparquet', compression='gzip', file_scheme='simple')
| 35.06383 | 125 | 0.655947 |
3eea85b9cecc23bd89ceb27d5f0a5479e55de532
| 4,705 |
py
|
Python
|
test/test_info_contact.py
|
Elen-T/python_training
|
69383759e4229441657bd57af28a7a38b04d026d
|
[
"Apache-2.0"
] | null | null | null |
test/test_info_contact.py
|
Elen-T/python_training
|
69383759e4229441657bd57af28a7a38b04d026d
|
[
"Apache-2.0"
] | null | null | null |
test/test_info_contact.py
|
Elen-T/python_training
|
69383759e4229441657bd57af28a7a38b04d026d
|
[
"Apache-2.0"
] | null | null | null |
import re
from model.contacts import Contacts
# Задание №21: Переделать тесты для проверки информации о контактах на главной странице
def test_contact_info_on_home_page(app, db):
contact_from_ui = sorted(app.contact.get_contact_list(), key=Contacts.id_or_max)
contact_from_db = sorted(db.get_contact_list(), key=Contacts.id_or_max)
for db_contact in range(len(contact_from_db)):
for ui_contact in range(len(contact_from_ui)):
#assert contact_from_ui[ui_contact].id == contact_from_db[db_contact].id
assert contact_from_ui[ui_contact].firstname == contact_from_db[db_contact].firstname
assert contact_from_ui[ui_contact].lastname == contact_from_db[db_contact].lastname
assert contact_from_ui[ui_contact].address == contact_from_db[db_contact].address
assert contact_from_ui[db_contact].all_phones_from_home_page == "\n".join(
filter(lambda x: x is not None,
[contact_from_db[db_contact].home_tel,
contact_from_db[db_contact].mobile_tel,
contact_from_db[db_contact].work_tel,
contact_from_db[db_contact].second_phone]))
#ui_contact += 1
#db_contact += 1
'''def test_info_on_home_page(app, db):
contact_from_home_page = app.contact.get_contact_list()
for contact in contact_from_home_page :
assert contact.all_emails_from_home_page == merge_emails_like_on_home_page(db.get_contact_by_id(contact.id))
assert contact.firstname == (db.get_contact_by_id(contact.id).firstname).strip()
assert contact.lastname == (db.get_contact_by_id(contact.id).lastname).strip()
assert contact.address == (db.get_contact_by_id(contact.id).address).strip()
assert contact.all_phones_from_home_page == merge_phones_like_on_home_page(db.get_contact_by_id(contact.id))'''
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
def merge_phones_like_on_home_page(contact):
return "\n".join(
filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.home, contact.mobile, contact.work, contact.phone2]))))
def clear(s):
return re.sub("[() -]", "", s) # регуляр выражение, 1й параметр - шаблон(что надо заменять), 2й - на что заменять,3й - где (тк в форме редактирования номер телефона может содержать эти символы )
'''def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone,
contact.secondaryphone])))) # склеиваем с помощью перевода строки, map - применяем функцию lambda (clear) ко всем элементам списка,(перед этим делаем фильт, чтобы отобрать не пустые значения) потом к результату map применяем фильтр (оставляем все не пустые строки)
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
# склеиваем с помощью перевода строки, map - применяем функцию lambda (clear) ко всем элементам списка,(перед этим делаем фильт, чтобы отобрать не пустые значения) потом к результату map применяем фильтр (оставляем все не пустые строки)
def test_info_on_home_page(app): #
contact_from_home_page = app.contact.get_contact_list()[0] # объект кот прочитали с главной страницы, проверка для 1го контакта (индекс 0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0) # получаем информацию о контакте из формы редактирования
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page) # сравниваем с результатом склейк
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address'''
| 64.452055 | 305 | 0.692242 |
cacfcfa7423a6aacc7289323997fae0d6aff3a79
| 20,870 |
py
|
Python
|
tests/test_opt.py
|
ponponon/loguru
|
d38ced7539b888e9e9db7495f49f4499b3ee77e1
|
[
"MIT"
] | 11,391 |
2018-12-08T17:44:13.000Z
|
2022-03-31T17:55:24.000Z
|
tests/test_opt.py
|
ponponon/loguru
|
d38ced7539b888e9e9db7495f49f4499b3ee77e1
|
[
"MIT"
] | 610 |
2018-12-08T18:03:03.000Z
|
2022-03-31T22:28:14.000Z
|
tests/test_opt.py
|
ponponon/loguru
|
d38ced7539b888e9e9db7495f49f4499b3ee77e1
|
[
"MIT"
] | 601 |
2018-12-08T17:46:42.000Z
|
2022-03-30T04:23:56.000Z
|
import sys
from unittest.mock import MagicMock
import pytest
from loguru import logger
from .conftest import parse
def test_record(writer):
logger.add(writer, format="{message}")
logger.opt(record=True).debug("1")
logger.opt(record=True).debug("2 {record[level]}")
logger.opt(record=True).log(11, "3 {0} {a} {record[level].no}", 4, a=5)
assert writer.read() == "1\n2 DEBUG\n3 4 5 11\n"
def test_record_in_kwargs_too(writer):
logger.add(writer, catch=False)
with pytest.raises(TypeError, match=r"The message can't be formatted"):
logger.opt(record=True).info("Foo {record}", record=123)
def test_record_not_in_extra():
extra = None
def sink(message):
nonlocal extra
extra = message.record["extra"]
logger.add(sink, catch=False)
logger.opt(record=True).info("Test")
assert extra == {}
def test_kwargs_in_extra_of_record():
message = None
def sink(message_):
nonlocal message
message = message_
logger.add(sink, format="{message}", catch=False)
logger.opt(record=True).info("Test {record[extra][foo]}", foo=123)
assert message == "Test 123\n"
assert message.record["extra"] == {"foo": 123}
def test_exception_boolean(writer):
logger.add(writer, format="{level.name}: {message}")
try:
1 / 0
except Exception:
logger.opt(exception=True).debug("Error {0} {record}", 1, record="test")
lines = writer.read().strip().splitlines()
assert lines[0] == "DEBUG: Error 1 test"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_exception_exc_info(writer):
logger.add(writer, format="{message}")
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.opt(exception=exc_info).debug("test")
lines = writer.read().strip().splitlines()
assert lines[0] == "test"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_exception_class(writer):
logger.add(writer, format="{message}")
try:
1 / 0
except Exception:
_, exc_class, _ = sys.exc_info()
logger.opt(exception=exc_class).debug("test")
lines = writer.read().strip().splitlines()
assert lines[0] == "test"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_exception_log_funcion(writer):
logger.add(writer, format="{level.no} {message}")
try:
1 / 0
except Exception:
logger.opt(exception=True).log(50, "Error")
lines = writer.read().strip().splitlines()
assert lines[0] == "50 Error"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_lazy(writer):
counter = 0
def laziness():
nonlocal counter
counter += 1
return counter
logger.add(writer, level=10, format="{level.no} => {message}")
logger.opt(lazy=True).log(10, "1: {lazy}", lazy=laziness)
logger.opt(lazy=True).log(5, "2: {0}", laziness)
logger.remove()
logger.opt(lazy=True).log(20, "3: {}", laziness)
i = logger.add(writer, level=15, format="{level.no} => {message}")
logger.add(writer, level=20, format="{level.no} => {message}")
logger.log(17, "4: {}", counter)
logger.opt(lazy=True).log(14, "5: {lazy}", lazy=lambda: counter)
logger.remove(i)
logger.opt(lazy=True).log(16, "6: {0}", lambda: counter)
logger.opt(lazy=True).info("7: {}", laziness)
logger.debug("7: {}", counter)
assert writer.read() == "10 => 1: 1\n17 => 4: 1\n20 => 7: 2\n"
def test_logging_within_lazy_function(writer):
logger.add(writer, level=20, format="{message}")
def laziness():
logger.trace("Nope")
logger.warning("Yes Warn")
logger.opt(lazy=True).trace("No", laziness)
assert writer.read() == ""
logger.opt(lazy=True).info("Yes", laziness)
assert writer.read() == "Yes Warn\nYes\n"
def test_depth(writer):
logger.add(writer, format="{function} : {message}")
def a():
logger.opt(depth=1).debug("Test 1")
logger.opt(depth=0).debug("Test 2")
logger.opt(depth=1).log(10, "Test 3")
a()
logger.remove()
assert writer.read() == "test_depth : Test 1\na : Test 2\ntest_depth : Test 3\n"
def test_capture(writer):
logger.add(writer, format="{message} {extra}")
logger.opt(capture=False).info("No {}", 123, no=False)
logger.opt(capture=False).info("Formatted: {fmt}", fmt=456)
logger.opt(capture=False).info("Formatted bis: {} {fmt}", 123, fmt=456)
assert writer.read() == "No 123 {}\nFormatted: 456 {}\nFormatted bis: 123 456 {}\n"
def test_colors(writer):
logger.add(writer, format="<red>a</red> {message}", colorize=True)
logger.opt(colors=True).debug("<blue>b</blue>")
logger.opt(colors=True).log(20, "<y>c</y>")
assert writer.read() == parse(
"<red>a</red> <blue>b</blue>\n" "<red>a</red> <y>c</y>\n", strip=False
)
def test_colors_not_colorize(writer):
logger.add(writer, format="<red>a</red> {message}", colorize=False)
logger.opt(colors=True).debug("<blue>b</blue>")
assert writer.read() == parse("<red>a</red> <blue>b</blue>\n", strip=True)
def test_colors_doesnt_color_unrelated(writer):
logger.add(writer, format="{message} {extra[trap]}", colorize=True)
logger.bind(trap="<red>B</red>").opt(colors=True).debug("<red>A</red>")
assert writer.read() == parse("<red>A</red>", strip=False) + " <red>B</red>\n"
def test_colors_doesnt_strip_unrelated(writer):
logger.add(writer, format="{message} {extra[trap]}", colorize=False)
logger.bind(trap="<red>B</red>").opt(colors=True).debug("<red>A</red>")
assert writer.read() == parse("<red>A</red>", strip=True) + " <red>B</red>\n"
def test_colors_doesnt_raise_unrelated_colorize(writer):
logger.add(writer, format="{message} {extra[trap]}", colorize=True, catch=False)
logger.bind(trap="</red>").opt(colors=True).debug("A")
assert writer.read() == "A </red>\n"
def test_colors_doesnt_raise_unrelated_not_colorize(writer):
logger.add(writer, format="{message} {extra[trap]}", colorize=False, catch=False)
logger.bind(trap="</red>").opt(colors=True).debug("A")
assert writer.read() == "A </red>\n"
def test_colors_doesnt_raise_unrelated_colorize_dynamic(writer):
logger.add(writer, format=lambda x: "{message} {extra[trap]}", colorize=True, catch=False)
logger.bind(trap="</red>").opt(colors=True).debug("A")
assert writer.read() == "A </red>"
def test_colors_doesnt_raise_unrelated_not_colorize_dynamic(writer):
logger.add(writer, format=lambda x: "{message} {extra[trap]}", colorize=False, catch=False)
logger.bind(trap="</red>").opt(colors=True).debug("A")
assert writer.read() == "A </red>"
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_within_record(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger_ = logger.bind(start="<red>", end="</red>")
logger_.opt(colors=True, record=True).debug("{record[extra][start]}B{record[extra][end]}")
assert writer.read() == "<red>B</red>\n"
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_nested(writer, colorize):
logger.add(writer, format="(<red>[{message}]</red>)", colorize=colorize)
logger.opt(colors=True).debug("A<green>B</green>C<blue>D</blue>E")
assert writer.read() == parse(
"(<red>[A<green>B</green>C<blue>D</blue>E]</red>)\n", strip=not colorize
)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_stripped_in_message_record(colorize):
message = None
def sink(msg):
nonlocal message
message = msg.record["message"]
logger.add(sink, colorize=colorize)
logger.opt(colors=True).debug("<red>Test</red>")
assert message == "Test"
@pytest.mark.parametrize("message", ["<red>", "</red>", "X </red> <red> Y"])
@pytest.mark.parametrize("colorize", [True, False])
def test_invalid_markup_in_message(writer, message, colorize):
logger.add(writer, format="<red>{message}</red>", colorize=colorize, catch=False)
with pytest.raises(ValueError):
logger.opt(colors=True).debug(message)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_args(writer, colorize):
logger.add(writer, format="=> {message} <=", colorize=colorize)
logger.opt(colors=True).debug("the {0}test{end}", "<red>", end="</red>")
assert writer.read() == "=> the <red>test</red> <=\n"
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_level(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger.level("DEBUG", color="<green>")
logger.opt(colors=True).debug("a <level>level</level> b")
assert writer.read() == parse("a <green>level</green> b\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_double_message(writer, colorize):
logger.add(
writer, format="<red><b>{message}...</b> - <c>...{message}</c></red>", colorize=colorize
)
logger.opt(colors=True).debug("<g>foo</g> bar <g>baz</g>")
assert writer.read() == parse(
"<red><b><g>foo</g> bar <g>baz</g>...</b> - <c>...<g>foo</g> bar <g>baz</g></c></red>\n",
strip=not colorize,
)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_multiple_calls(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger.opt(colors=True).debug("a <red>foo</red> b")
logger.opt(colors=True).debug("a <red>foo</red> b")
assert writer.read() == parse("a <red>foo</red> b\na <red>foo</red> b\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_multiple_calls_level_color_changed(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger.level("INFO", color="<blue>")
logger.opt(colors=True).info("a <level>foo</level> b")
logger.level("INFO", color="<red>")
logger.opt(colors=True).info("a <level>foo</level> b")
assert writer.read() == parse("a <blue>foo</blue> b\na <red>foo</red> b\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_dynamic_formatter(writer, colorize):
logger.add(writer, format=lambda r: "<red>{message}</red>", colorize=colorize)
logger.opt(colors=True).debug("<b>a</b> <y>b</y>")
assert writer.read() == parse("<red><b>a</b> <y>b</y></red>", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_format_specs(writer, colorize):
fmt = "<g>{level.no:03d} {message:} {message!s:} {{nope}} {extra[a][b]!r}</g>"
logger.add(writer, colorize=colorize, format=fmt)
logger.bind(a={"b": "c"}).opt(colors=True).debug("<g>{X}</g>")
assert writer.read() == parse("<g>010 <g>{X}</g> {X} {nope} 'c'</g>\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_message_specs(writer, colorize):
logger.add(writer, colorize=colorize, format="<g>{message}</g>")
logger.opt(colors=True).debug("{} <b>A</b> {{nope}} {key:03d} {let!r}", 1, key=10, let="c")
logger.opt(colors=True).debug("<b>{0:0{1}d}</b>", 2, 4)
assert writer.read() == parse(
"<g>1 <b>A</b> {nope} 010 'c'</g>\n<g><b>0002</b></g>\n", strip=not colorize
)
@pytest.mark.parametrize("colorize", [True, False])
def test_colored_string_used_as_spec(writer, colorize):
logger.add(writer, colorize=colorize, format="{level.no:{message}} <red>{message}</red>")
logger.opt(colors=True).log(30, "03d")
assert writer.read() == parse("030 <red>03d</red>\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colored_string_getitem(writer, colorize):
logger.add(writer, colorize=colorize, format="<red>{message[0]}</red>")
logger.opt(colors=True).info("ABC")
assert writer.read() == parse("<red>A</red>\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_without_formatting_args(writer, colorize):
string = "{} This { should } not } raise {"
logger.add(writer, colorize=colorize, format="{message}")
logger.opt(colors=True).info(string)
assert writer.read() == string + "\n"
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_recursion_depth_exceeded_in_format(writer, colorize):
with pytest.raises(ValueError, match=r"Invalid format"):
logger.add(writer, format="{message:{message:{message:}}}", colorize=colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_recursion_depth_exceeded_in_message(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
with pytest.raises(ValueError, match=r"Max string recursion exceeded"):
logger.opt(colors=True).info("{foo:{foo:{foo:}}}", foo=123)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_auto_indexing(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger.opt(colors=True).info("<red>{}</red> <green>{}</green>", "foo", "bar")
assert writer.read() == parse("<red>foo</red> <green>bar</green>\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
def test_colors_with_manual_indexing(writer, colorize):
logger.add(writer, format="{message}", colorize=colorize)
logger.opt(colors=True).info("<red>{1}</red> <green>{0}</green>", "foo", "bar")
assert writer.read() == parse("<red>bar</red> <green>foo</green>\n", strip=not colorize)
@pytest.mark.parametrize("colorize", [True, False])
@pytest.mark.parametrize("message", ["{} {0}", "{1} {}"])
def test_colors_with_invalid_indexing(writer, colorize, message):
logger.add(writer, format="{message}", colorize=colorize)
with pytest.raises(ValueError, match=r"cannot switch"):
logger.opt(colors=True).debug(message, 1, 2, 3)
def test_raw(writer):
logger.add(writer, format="", colorize=True)
logger.opt(raw=True).info("Raw {}", "message")
logger.opt(raw=True).log(30, " + The end")
assert writer.read() == "Raw message + The end"
def test_raw_with_format_function(writer):
logger.add(writer, format=lambda _: "{time} \n")
logger.opt(raw=True).debug("Raw {message} bis", message="message")
assert writer.read() == "Raw message bis"
@pytest.mark.parametrize("colorize", [True, False])
def test_raw_with_colors(writer, colorize):
logger.add(writer, format="XYZ", colorize=colorize)
logger.opt(raw=True, colors=True).info("Raw <red>colors</red> and <lvl>level</lvl>")
assert writer.read() == parse("Raw <red>colors</red> and <b>level</b>", strip=not colorize)
def test_args_with_colors_not_formatted_twice(capsys):
logger.add(sys.stdout, format="{message}", colorize=True)
logger.add(sys.stderr, format="{message}", colorize=False)
a = MagicMock(__format__=MagicMock(return_value="a"))
b = MagicMock(__format__=MagicMock(return_value="b"))
logger.opt(colors=True).info("{} <red>{foo}</red>", a, foo=b)
out, err = capsys.readouterr()
assert out == parse("a <red>b</red>\n")
assert err == "a b\n"
assert a.__format__.call_count == 1
assert b.__format__.call_count == 1
@pytest.mark.parametrize("colorize", [True, False])
def test_level_tag_wrapping_with_colors(writer, colorize):
logger.add(writer, format="<level>FOO {message} BAR</level>", colorize=colorize)
logger.opt(colors=True).info("> foo <red>{}</> bar <lvl>{}</> baz <green>{}</green> <", 1, 2, 3)
logger.opt(colors=True).log(33, "<lvl> {} <red>{}</red> {} </lvl>", 1, 2, 3)
assert writer.read() == parse(
"<b>FOO > foo <red>1</red> bar <b>2</b> baz <green>3</green> < BAR</b>\n"
"<level>FOO <level> 1 <red>2</red> 3 </level> BAR</level>\n",
strip=not colorize,
)
@pytest.mark.parametrize("dynamic_format", [True, False])
@pytest.mark.parametrize("colorize", [True, False])
@pytest.mark.parametrize("colors", [True, False])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.parametrize("use_log", [True, False])
@pytest.mark.parametrize("use_arg", [True, False])
def test_all_colors_combinations(writer, dynamic_format, colorize, colors, raw, use_log, use_arg):
format_ = "<level>{level.no:03}</level> <red>{message}</red>"
message = "<green>The</green> <lvl>{}</lvl>"
arg = "message"
def formatter(_):
return format_ + "\n"
logger.add(writer, format=formatter if dynamic_format else format_, colorize=colorize)
logger_ = logger.opt(colors=colors, raw=raw)
if use_log:
if use_arg:
logger_.log(20, message, arg)
else:
logger_.log(20, message.format(arg))
else:
if use_arg:
logger_.info(message, arg)
else:
logger_.info(message.format(arg))
if use_log:
if raw:
if colors:
expected = parse("<green>The</green> <level>message</level>", strip=not colorize)
else:
expected = "<green>The</green> <lvl>message</lvl>"
else:
if colors:
expected = parse(
"<level>020</level> <red><green>The</green> <level>message</level></red>\n",
strip=not colorize,
)
else:
expected = (
parse("<level>020</level> <red>%s</red>\n", strip=not colorize)
% "<green>The</green> <lvl>message</lvl>"
)
else:
if raw:
if colors:
expected = parse("<green>The</green> <b>message</b>", strip=not colorize)
else:
expected = "<green>The</green> <lvl>message</lvl>"
else:
if colors:
expected = parse(
"<b>020</b> <red><green>The</green> <b>message</b></red>\n", strip=not colorize
)
else:
expected = (
parse("<b>020</b> <red>%s</red>\n", strip=not colorize)
% "<green>The</green> <lvl>message</lvl>"
)
assert writer.read() == expected
def test_raw_with_record(writer):
logger.add(writer, format="Nope\n")
logger.opt(raw=True, record=True).debug("Raw in '{record[function]}'\n")
assert writer.read() == "Raw in 'test_raw_with_record'\n"
def test_keep_extra(writer):
logger.configure(extra=dict(test=123))
logger.add(writer, format="{extra[test]}")
logger.opt().debug("")
logger.opt().log(50, "")
assert writer.read() == "123\n123\n"
def test_before_bind(writer):
logger.add(writer, format="{message}")
logger.opt(record=True).bind(key="value").info("{record[level]}")
assert writer.read() == "INFO\n"
def test_deprecated_ansi_argument(writer):
logger.add(writer, format="{message}", colorize=True)
with pytest.warns(DeprecationWarning):
logger.opt(ansi=True).info("Foo <red>bar</red> baz")
assert writer.read() == parse("Foo <red>bar</red> baz\n")
@pytest.mark.parametrize("colors", [True, False])
def test_message_update_not_overridden_by_patch(writer, colors):
def patcher(record):
record["message"] += " [Patched]"
logger.add(writer, format="{level} {message}", colorize=True)
logger.patch(patcher).opt(colors=colors).info("Message")
assert writer.read() == "INFO Message [Patched]\n"
@pytest.mark.parametrize("colors", [True, False])
def test_message_update_not_overridden_by_format(writer, colors):
def formatter(record):
record["message"] += " [Formatted]"
return "{level} {message}\n"
logger.add(writer, format=formatter, colorize=True)
logger.opt(colors=colors).info("Message")
assert writer.read() == "INFO Message [Formatted]\n"
@pytest.mark.parametrize("colors", [True, False])
def test_message_update_not_overridden_by_filter(writer, colors):
def filter(record):
record["message"] += " [Filtered]"
return True
logger.add(writer, format="{level} {message}", filter=filter, colorize=True)
logger.opt(colors=colors).info("Message")
assert writer.read() == "INFO Message [Filtered]\n"
@pytest.mark.parametrize("colors", [True, False])
def test_message_update_not_overridden_by_raw(writer, colors):
logger.add(writer, colorize=True)
logger.patch(lambda r: r.update(message="Updated!")).opt(raw=True, colors=colors).info("Raw!")
assert writer.read() == "Updated!"
def test_overridden_message_ignore_colors(writer):
def formatter(record):
record["message"] += " <blue>[Ignored]</blue> </xyz>"
return "{message}\n"
logger.add(writer, format=formatter, colorize=True)
logger.opt(colors=True).info("<red>Message</red>")
assert writer.read() == "Message <blue>[Ignored]</blue> </xyz>\n"
| 34.667774 | 100 | 0.641591 |
0844207decb3a376d443046edefedc5115d873cd
| 906 |
py
|
Python
|
hue_bridge/logger.py
|
SmartEnergyPlatform/hue-bridge-connector
|
c3efd650fc6312b9f6ed312e3e5dad2ffb467c1e
|
[
"Apache-2.0"
] | null | null | null |
hue_bridge/logger.py
|
SmartEnergyPlatform/hue-bridge-connector
|
c3efd650fc6312b9f6ed312e3e5dad2ffb467c1e
|
[
"Apache-2.0"
] | null | null | null |
hue_bridge/logger.py
|
SmartEnergyPlatform/hue-bridge-connector
|
c3efd650fc6312b9f6ed312e3e5dad2ffb467c1e
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2018 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
from connector_client.modules.logger import connector_client_log_handler
except ImportError as ex:
exit("{} - {}".format(__name__, ex.msg))
import logging
root_logger = logging.getLogger("hue-bridge-gateway")
root_logger.setLevel(logging.INFO)
root_logger.addHandler(connector_client_log_handler)
| 33.555556 | 76 | 0.759382 |
e58e2461b239fdfcce50c6936f123e5e15d0930a
| 4,464 |
py
|
Python
|
label_studio/tests/test_api_integration.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 3 |
2021-07-16T03:48:21.000Z
|
2022-01-10T04:58:25.000Z
|
label_studio/tests/test_api_integration.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 6 |
2022-02-21T15:19:35.000Z
|
2022-03-07T15:25:16.000Z
|
label_studio/tests/test_api_integration.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 1 |
2021-05-24T15:46:08.000Z
|
2021-05-24T15:46:08.000Z
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import pytest
import yaml
import json
import io
import os
import time
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from operator import itemgetter
@pytest.fixture
@pytest.mark.django_db
def client_and_token(business_client):
token = Token.objects.get(user=business_client.business.admin)
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
client.team_id = business_client.team.id
client.organization_pk = business_client.organization.pk
return client, token
@pytest.mark.django_db
def pytest_generate_tests(metafunc):
if 'test_suite' in metafunc.fixturenames:
with io.open(os.path.join(os.path.dirname(__file__), 'test_data/full_steps.yml'), encoding='utf-8') as f:
test_suites = yaml.load(f)
metafunc.parametrize('test_name, test_suite', list(test_suites.items()))
@pytest.fixture(autouse=True)
def delete_projects_at_the_end(db, request):
from projects.models import Project
def delete_projects():
p = Project.objects.all()
print(f'Deleting {p.count()} projects')
p.delete()
request.addfinalizer(delete_projects)
@pytest.mark.integration_tests
@pytest.mark.django_db
def test_full_steps(client_and_token, test_name, test_suite):
print(f'Start {test_name}')
client, token = client_and_token
project_config = test_suite['project_config']
project_config['team_id'] = client.team_id
chosen_ml_backend = test_suite['ml_backend']
upload_tasks = test_suite['upload_tasks']
annotations = test_suite['annotations']
prediction_tasks = test_suite.get('prediction_tasks', upload_tasks)
wait_model = test_suite.get('wait_model', 10)
expected_predictions = test_suite['expected_predictions']
def get(url, code=200):
r = client.get(url)
assert r.status_code == code
return r.json()
def post(url, payload, code=(200, 201)):
r = client.post(url, data=json.dumps(payload), content_type='application/json', headers={'Authorization': f'Token {token}'}) # noqa
assert r.status_code in code
return r.json()
def patch(url, payload, code=200):
r = client.patch(url, data=json.dumps(payload), content_type='application/json', headers={'Authorization': f'Token {token}'}) # noqa
assert r.status_code == code
return r.json()
# create project
project_id = post('/api/projects/', project_config)['id']
# activate ML backend
connections_list = get(f'/api/projects/{project_id}/backends/connections')
chosen_connection = None
for connection in connections_list:
print(f'Test connection {connection}...')
if connection['ml_backend']['name'] == chosen_ml_backend:
chosen_connection = connection
break
assert chosen_connection, f'Connection to {chosen_ml_backend} not found'
patch(f'/api/projects/{project_id}/', {
'ml_backend_active_connection': chosen_connection['id'],
'active_learning_enabled': True
})
# upload data
post(f'/api/projects/{project_id}/tasks/bulk/', upload_tasks)
# get tasks list
tasks_list = get(f'/api/projects/{project_id}/tasks/')
tasks_list = sorted(tasks_list, key=itemgetter('id'))
for task, annotation in zip(tasks_list, annotations):
# make annotation
post(f'/api/tasks/{task["id"]}/annotations/', annotation)
time.sleep(1)
# get predictions
time.sleep(wait_model)
predictions = post(f'/api/projects/{project_id}/predict', prediction_tasks)
# assert len(predictions['model_version']) > 0, predictions
for prediction, expected_prediction in zip(predictions['results'], expected_predictions):
assert prediction['result'] == expected_prediction['result']
assert prediction['score'] > expected_prediction['score_above']
# delete project and check whether model is deleted too
r = client.post(f'/projects/{project_id}/delete/', content_type='text/html; charset=utf-8')
assert r.status_code == 302
print('Trying to get final predictions after model removing fails...')
# TODO: check why this works but should fail
post(f'/api/projects/{project_id}/predict', prediction_tasks)
| 36.590164 | 168 | 0.705645 |
4e42e50a87c503ce148316a8812f5e44eeed6965
| 11,237 |
py
|
Python
|
sympy/combinatorics/graycode.py
|
MartinThoma/sympy
|
009d0031bec7222ffa472e52148a2b4e441cd3a5
|
[
"BSD-3-Clause"
] | 603 |
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
sympy/combinatorics/graycode.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 387 |
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
sympy/combinatorics/graycode.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 35 |
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
from sympy.core import Basic
import random
class GrayCode(Basic):
"""
A Gray code is essentially a Hamiltonian walk on
a n-dimensional cube with edge length of one.
The vertices of the cube are represented by vectors
whose values are binary. The Hamilton walk visits
each vertex exactly once. The Gray code for a 3d
cube is ['000','100','110','010','011','111','101',
'001'].
A Gray code solves the problem of sequentially
generating all possible subsets of n objects in such
a way that each subset is obtained from the previous
one by either deleting or adding a single object.
In the above example, 1 indicates that the object is
present, and 0 indicates that its absent.
Gray codes have applications in statistics as well when
we want to compute various statistics related to subsets
in an efficient manner.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> a = GrayCode(4)
>>> list(a.generate_gray())
['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \
'1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000']
References
==========
.. [1] Nijenhuis,A. and Wilf,H.S.(1978).
Combinatorial Algorithms. Academic Press.
.. [2] Knuth, D. (2011). The Art of Computer Programming, Vol 4
Addison Wesley
"""
_skip = False
_current = 0
_rank = None
def __new__(cls, n, *args, **kw_args):
"""
Default constructor.
It takes a single argument ``n`` which gives the dimension of the Gray
code. The starting Gray code string (``start``) or the starting ``rank``
may also be given; the default is to start at rank = 0 ('0...0').
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a
GrayCode(3)
>>> a.n
3
>>> a = GrayCode(3, start='100')
>>> a.current
'100'
>>> a = GrayCode(4, rank=4)
>>> a.current
'0110'
>>> a.rank
4
"""
if n < 1 or int(n) != n:
raise ValueError(
'Gray code dimension must be a positive integer, not %i' % n)
n = int(n)
args = (n,) + args
obj = Basic.__new__(cls, *args)
if 'start' in kw_args:
obj._current = kw_args["start"]
if len(obj._current) > n:
raise ValueError('Gray code start has length %i but '
'should not be greater than %i' % (len(obj._current), n))
elif 'rank' in kw_args:
if int(kw_args["rank"]) != kw_args["rank"]:
raise ValueError('Gray code rank must be a positive integer, '
'not %i' % kw_args["rank"])
obj._rank = int(kw_args["rank"]) % obj.selections
obj._current = obj.unrank(n, obj._rank)
return obj
def next(self, delta=1):
"""
Returns the Gray code a distance ``delta`` (default = 1) from the
current value in canonical order.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3, start='110')
>>> a.next().current
'111'
>>> a.next(-1).current
'010'
"""
return GrayCode(self.n, rank=(self.rank + delta) % self.selections)
@property
def selections(self):
"""
Returns the number of bit vectors in the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a.selections
8
"""
return 2**self.n
@property
def n(self):
"""
Returns the dimension of the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(5)
>>> a.n
5
"""
return self.args[0]
def generate_gray(self, **hints):
"""
Generates the sequence of bit vectors of a Gray Code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(start='011'))
['011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(rank=4))
['110', '111', '101', '100']
See Also
========
skip
References
==========
.. [1] Knuth, D. (2011). The Art of Computer Programming,
Vol 4, Addison Wesley
"""
bits = self.n
start = None
if "start" in hints:
start = hints["start"]
elif "rank" in hints:
start = GrayCode.unrank(self.n, hints["rank"])
if start is not None:
self._current = start
current = self.current
graycode_bin = gray_to_bin(current)
if len(graycode_bin) > self.n:
raise ValueError('Gray code start has length %i but should '
'not be greater than %i' % (len(graycode_bin), bits))
self._current = int(current, 2)
graycode_int = int(''.join(graycode_bin), 2)
for i in range(graycode_int, 1 << bits):
if self._skip:
self._skip = False
else:
yield self.current
bbtc = (i ^ (i + 1))
gbtc = (bbtc ^ (bbtc >> 1))
self._current = (self._current ^ gbtc)
self._current = 0
def skip(self):
"""
Skips the bit generation.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> for i in a.generate_gray():
... if i == '010':
... a.skip()
... print(i)
...
000
001
011
010
111
101
100
See Also
========
generate_gray
"""
self._skip = True
@property
def rank(self):
"""
Ranks the Gray code.
A ranking algorithm determines the position (or rank)
of a combinatorial object among all the objects w.r.t.
a given order. For example, the 4 bit binary reflected
Gray code (BRGC) '0101' has a rank of 6 as it appears in
the 6th position in the canonical ordering of the family
of 4 bit Gray codes.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> GrayCode(3, start='100').rank
7
>>> GrayCode(3, rank=7).current
'100'
See Also
========
unrank
References
==========
.. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html
"""
if self._rank is None:
self._rank = int(gray_to_bin(self.current), 2)
return self._rank
@property
def current(self):
"""
Returns the currently referenced Gray code as a bit string.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(3, start='100').current
'100'
"""
rv = self._current or '0'
if type(rv) is not str:
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
@classmethod
def unrank(self, n, rank):
"""
Unranks an n-bit sized Gray code of rank k. This method exists
so that a derivative GrayCode class can define its own code of
a given rank.
The string here is generated in reverse order to allow for tail-call
optimization.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(5, rank=3).current
'00010'
>>> GrayCode.unrank(5, 3)
'00010'
See Also
========
rank
"""
def _unrank(k, n):
if n == 1:
return str(k % 2)
m = 2**(n - 1)
if k < m:
return '0' + _unrank(k, n - 1)
return '1' + _unrank(m - (k % m) - 1, n - 1)
return _unrank(rank, n)
def random_bitstring(n):
"""
Generates a random bitlist of length n.
Examples
========
>>> from sympy.combinatorics.graycode import random_bitstring
>>> random_bitstring(3) # doctest: +SKIP
100
"""
return ''.join([random.choice('01') for i in range(n)])
def gray_to_bin(bin_list):
"""
Convert from Gray coding to binary coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import gray_to_bin
>>> gray_to_bin('100')
'111'
See Also
========
bin_to_gray
"""
b = [bin_list[0]]
for i in range(1, len(bin_list)):
b += str(int(b[i - 1] != bin_list[i]))
return ''.join(b)
def bin_to_gray(bin_list):
"""
Convert from binary coding to gray coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import bin_to_gray
>>> bin_to_gray('111')
'100'
See Also
========
gray_to_bin
"""
b = [bin_list[0]]
for i in range(1, len(bin_list)):
b += str(int(bin_list[i]) ^ int(bin_list[i - 1]))
return ''.join(b)
def get_subset_from_bitstring(super_set, bitstring):
"""
Gets the subset defined by the bitstring.
Examples
========
>>> from sympy.combinatorics.graycode import get_subset_from_bitstring
>>> get_subset_from_bitstring(['a', 'b', 'c', 'd'], '0011')
['c', 'd']
>>> get_subset_from_bitstring(['c', 'a', 'c', 'c'], '1100')
['c', 'a']
See Also
========
graycode_subsets
"""
if len(super_set) != len(bitstring):
raise ValueError("The sizes of the lists are not equal")
return [super_set[i] for i, j in enumerate(bitstring)
if bitstring[i] == '1']
def graycode_subsets(gray_code_set):
"""
Generates the subsets as enumerated by a Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import graycode_subsets
>>> list(graycode_subsets(['a', 'b', 'c']))
[[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \
['a', 'c'], ['a']]
>>> list(graycode_subsets(['a', 'b', 'c', 'c']))
[[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \
['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \
['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']]
See Also
========
get_subset_from_bitstring
"""
for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):
yield get_subset_from_bitstring(gray_code_set, bitstring)
| 26.071926 | 80 | 0.51633 |
ed7164aac1415d9f22fdd5c061de4ccbb4eea124
| 2,771 |
py
|
Python
|
lc0819_most_common_word.py
|
bowen0701/python-algorithms-data-structures
|
e625f59a9fc59e4728825078d4434a7968a724e5
|
[
"BSD-2-Clause"
] | 8 |
2019-03-18T06:37:24.000Z
|
2022-01-30T07:50:58.000Z
|
lc0819_most_common_word.py
|
bowen0701/python-algorithms-data-structures
|
e625f59a9fc59e4728825078d4434a7968a724e5
|
[
"BSD-2-Clause"
] | null | null | null |
lc0819_most_common_word.py
|
bowen0701/python-algorithms-data-structures
|
e625f59a9fc59e4728825078d4434a7968a724e5
|
[
"BSD-2-Clause"
] | null | null | null |
"""Leetcode 819. Most Common Word
Easy
URL: https://leetcode.com/problems/most-common-word/
Given a paragraph and a list of banned words, return the most frequent word that
is not in the list of banned words.
It is guaranteed there is at least one word that isn't banned,
and that the answer is unique.
Words in the list of banned words are given in lowercase, and free of punctuation.
Words in the paragraph are not case sensitive. The answer is in lowercase.
Example:
Input:
paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
banned = ["hit"]
Output: "ball"
Explanation:
"hit" occurs 3 times, but it is a banned word.
"ball" occurs twice (and no other word does), so it is the
most frequent non-banned word in the paragraph.
Note that words in the paragraph are not case sensitive,
that punctuation is ignored (even if adjacent to words, such as "ball,"),
and that "hit" isn't the answer even though it occurs more because it is banned.
Note:
- 1 <= paragraph.length <= 1000.
- 0 <= banned.length <= 100.
- 1 <= banned[i].length <= 10.
- The answer is unique, and written in lowercase (even if its occurrences in
paragraph may have uppercase symbols, and even if it is a proper noun.)
- paragraph only consists of letters, spaces, or the punctuation symbols !?',;.
- There are no hyphens or hyphenated words.
- Words only consist of letters, never apostrophes or other punctuation symbols.
"""
class SolutionDict(object):
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
Time complexity: O(n).
Space complexity: O(n).
"""
# Iteratively use dict to aggregate word count.
from collections import defaultdict
# Convert all punctuations to space and lower case.
punctuations = set(list('!?\',;.'))
for c in paragraph:
if c in punctuations:
paragraph = paragraph.replace(c, ' ')
paragraph = paragraph.lower()
# Store banned words in set for fast loopup.
bans = set(banned)
# Use dict for word count.
word_count_d = defaultdict(int)
words = paragraph.split()
for w in words:
if w not in bans:
word_count_d[w] += 1
# Get most common word.
result = ''
count = 0
for w, n in word_count_d.items():
if n > count:
result = w
count = n
return result
def main():
# Output: "ball"
paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
banned = ["hit"]
print SolutionDict().mostCommonWord(paragraph, banned)
if __name__ == '__main__':
main()
| 29.795699 | 82 | 0.64345 |
6bcb5283074e22d69dd539986eebec81d1af7366
| 8,358 |
py
|
Python
|
sequencers/tests/test_api_views.py
|
bihealth/digestiflow-server
|
298c53f95dbf56e7be0d0b8bcceacabc21257d5f
|
[
"MIT"
] | 13 |
2019-11-27T19:12:15.000Z
|
2021-12-01T21:32:18.000Z
|
sequencers/tests/test_api_views.py
|
bihealth/digestiflow-server
|
298c53f95dbf56e7be0d0b8bcceacabc21257d5f
|
[
"MIT"
] | 60 |
2019-03-27T14:43:19.000Z
|
2022-03-22T09:12:53.000Z
|
sequencers/tests/test_api_views.py
|
bihealth/digestiflow-server
|
298c53f95dbf56e7be0d0b8bcceacabc21257d5f
|
[
"MIT"
] | 3 |
2020-11-09T07:08:42.000Z
|
2022-02-09T11:37:54.000Z
|
# TODO: check timeline events
import json
from test_plus.test import APITestCase
from digestiflow.test_utils import SetupUserMixin, SetupProjectMixin, AuthenticatedRequestMixin
from ..models import SequencingMachine
from ..tests import SetupSequencingMachineMixin
class SequencingMachineListCreateApiViewTest(
SetupSequencingMachineMixin,
SetupProjectMixin,
SetupUserMixin,
AuthenticatedRequestMixin,
APITestCase,
):
"""Tests for creation of sequencing machines using REST API"""
url_name = "api:sequencers"
def testGet(self):
"""Test that querying API for the machine list works (with super user)"""
response = self.runGet(self.root)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(len(data), 1)
def testGetAccessDenied(self):
"""Test that access is denied if role assignment is missing"""
self.runGet(None)
self.response_401()
for user in (self.norole, self.unrelated_owner):
self.runGet(user)
self.response_403()
def testGetAccessAllowed(self):
"""Test that access is allowed if role assignment is correct"""
for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):
response = self.runGet(user)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(len(data), 1)
def testPost(self):
"""Test that creating machine via API works (with super user)"""
response = self.runPost(self.root, data=self.post_data)
self.response_201(response)
data = json.loads(response.content.decode("utf-8"))
self.assertIn("sodar_uuid", data)
def testPostAccessDenied(self):
"""Test that creating machine via API is denied if role assignment is missing"""
self.runPost(None, data=self.post_data)
self.response_401()
for user in (self.guest, self.norole, self.unrelated_owner):
self.runPost(user, data=self.post_data)
self.response_403()
def testPostAccessAllowed(self):
"""Test that creating machine via API is allowed if role assignment is correct"""
for user in (self.contributor, self.delegate, self.owner, self.root):
response = self.runPost(user, data=self.post_data)
self.response_201(response)
data = json.loads(response.content.decode("utf-8"))
self.assertIn("sodar_uuid", data)
SequencingMachine.objects.filter(sodar_uuid=data["sodar_uuid"]).delete()
class SequencingMachineUpdateApiViewTest(
SetupSequencingMachineMixin,
SetupProjectMixin,
SetupUserMixin,
AuthenticatedRequestMixin,
APITestCase,
):
"""Tests for detail view, update, delete of sequencing machines using REST API"""
url_name = "api:sequencers"
def testGet(self):
"""Test that querying API for the machine list works (with super user)"""
response = self.runGet(self.root, sequencer=self.hiseq2000.sodar_uuid)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["sodar_uuid"], str(self.hiseq2000.sodar_uuid))
def testGetAccessDenied(self):
"""Test that access is denied if role assignment is missing"""
self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)
self.response_401()
for user in (self.norole, self.unrelated_owner):
self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)
self.response_403()
def testGetAccessAllowed(self):
"""Test that access is allowed if role assignment is correct"""
for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):
response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["sodar_uuid"], str(self.hiseq2000.sodar_uuid))
def testUpdate(self):
"""Test that creating machine via API works (with super user)"""
response = self.runPut(self.root, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["vendor_id"], self.post_data["vendor_id"])
def testUpdateAccessDenied(self):
"""Test that creating machine via API is denied if role assignment is missing"""
self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)
self.response_401()
for user in (self.guest, self.norole, self.unrelated_owner):
self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)
self.response_403()
def testUpdateAccessAllowed(self):
"""Test that creating machine via API is allowed if role assignment is correct"""
for user in (self.contributor, self.delegate, self.owner, self.root):
response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["vendor_id"], self.post_data["vendor_id"])
def testDelete(self):
"""Test that creating machine via API works (with super user)"""
self.assertEqual(SequencingMachine.objects.count(), 1)
response = self.runDelete(self.root, sequencer=self.hiseq2000.sodar_uuid)
self.response_204(response)
self.assertEqual(SequencingMachine.objects.count(), 0)
def testDeleteAccessDenied(self):
"""Test that creating machine via API is denied if role assignment is missing"""
self.assertEqual(SequencingMachine.objects.count(), 1)
self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)
self.assertEqual(SequencingMachine.objects.count(), 1)
self.response_401()
for user in (self.guest, self.norole, self.unrelated_owner):
self.assertEqual(SequencingMachine.objects.count(), 1)
self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)
self.assertEqual(SequencingMachine.objects.count(), 1)
self.response_403()
def testDeleteAccessAllowed(self):
"""Test that creating machine via API is allowed if role assignment is correct"""
for user in (self.contributor, self.delegate, self.owner, self.root):
SequencingMachine.objects.all().delete()
machine = self.make_machine()
self.assertEqual(SequencingMachine.objects.count(), 1)
response = self.runDelete(user, sequencer=machine.sodar_uuid)
self.response_204(response)
self.assertEqual(SequencingMachine.objects.count(), 0)
class SequencingMachineByVendorIdApiViewTest(
SetupSequencingMachineMixin,
SetupProjectMixin,
SetupUserMixin,
AuthenticatedRequestMixin,
APITestCase,
):
"""Test that resolving sequencing machine by vendor ID works"""
url_name = "api:sequencers"
def testGet(self):
"""Test that querying API for the machine list works (with super user)"""
response = self.runGet(self.root, sequencer=self.hiseq2000.vendor_id)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["sodar_uuid"], str(self.hiseq2000.sodar_uuid))
def testGetAccessDenied(self):
"""Test that access is denied if role assignment is missing"""
self.runGet(None, sequencer=self.hiseq2000.vendor_id)
self.response_401()
for user in (self.norole, self.unrelated_owner):
self.runGet(user)
self.response_403()
def testGetAccessAllowed(self):
"""Test that access is allowed if role assignment is correct"""
for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):
response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)
self.response_200(response)
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data["sodar_uuid"], str(self.hiseq2000.sodar_uuid))
| 43.989474 | 99 | 0.679229 |
9ddbb45c8b304bfb8948b46b219e56621970635d
| 620 |
py
|
Python
|
mainapp/migrations/0001_initial.py
|
grebenshchikovr/Django_HW
|
779dc75839a47920eebdfb96f31a3d4cacce9045
|
[
"MIT"
] | null | null | null |
mainapp/migrations/0001_initial.py
|
grebenshchikovr/Django_HW
|
779dc75839a47920eebdfb96f31a3d4cacce9045
|
[
"MIT"
] | null | null | null |
mainapp/migrations/0001_initial.py
|
grebenshchikovr/Django_HW
|
779dc75839a47920eebdfb96f31a3d4cacce9045
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-14 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='название')),
('description', models.TextField(blank=True, verbose_name='описание')),
],
),
]
| 26.956522 | 114 | 0.6 |
bb02ead662c48590d7b43a547c238c824532e5a6
| 119 |
py
|
Python
|
WebsiteScanning/Web Scanning.py
|
SudhansuShakya/Python
|
83b369dcc91e3197bc6116cb3aba29edd0fdbf00
|
[
"Unlicense"
] | null | null | null |
WebsiteScanning/Web Scanning.py
|
SudhansuShakya/Python
|
83b369dcc91e3197bc6116cb3aba29edd0fdbf00
|
[
"Unlicense"
] | null | null | null |
WebsiteScanning/Web Scanning.py
|
SudhansuShakya/Python
|
83b369dcc91e3197bc6116cb3aba29edd0fdbf00
|
[
"Unlicense"
] | null | null | null |
import urllib.request
url=input("Enter Url: ")
text=urllib.request.urlopen(url)
print(text.read().decode('utf-8'))
| 23.8 | 35 | 0.714286 |
eaac8c11d4e127749882179ec9a83f22e1f17027
| 438 |
py
|
Python
|
tests/test_symbol_parser.py
|
Johnabell/latex2mathml
|
2eea4ec348b1cc3d98e9705a568fa7e6981eeed1
|
[
"MIT"
] | null | null | null |
tests/test_symbol_parser.py
|
Johnabell/latex2mathml
|
2eea4ec348b1cc3d98e9705a568fa7e6981eeed1
|
[
"MIT"
] | null | null | null |
tests/test_symbol_parser.py
|
Johnabell/latex2mathml
|
2eea4ec348b1cc3d98e9705a568fa7e6981eeed1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# __author__ = "Ronie Martinez"
# __copyright__ = "Copyright 2016-2019, Ronie Martinez"
# __credits__ = ["Ronie Martinez"]
# __license__ = "MIT"
# __maintainer__ = "Ronie Martinez"
# __email__ = "[email protected]"
from latex2mathml.symbols_parser import convert_symbol
def test_operator_plus():
assert '0002B' == convert_symbol('+')
def test_alias_command():
assert '02192' == convert_symbol(r'\to')
| 25.764706 | 55 | 0.726027 |
83b19fdacb2cefd3e8eb389bfafe8b85f16347b2
| 1,320 |
py
|
Python
|
app/pylib/tf_rl/utils/geometry.py
|
mingkaic/rocnnet
|
b0e6b9ef1b80ee3d33d68f48dd051a99c2df39ab
|
[
"MIT"
] | 3 |
2017-01-18T20:42:56.000Z
|
2018-11-07T12:56:15.000Z
|
app/pylib/tf_rl/utils/geometry.py
|
mingkaic/rocnnet
|
b0e6b9ef1b80ee3d33d68f48dd051a99c2df39ab
|
[
"MIT"
] | 10 |
2016-12-01T08:15:28.000Z
|
2018-09-28T17:16:32.000Z
|
app/pylib/tf_rl/utils/geometry.py
|
mingkaic/rocnnet
|
b0e6b9ef1b80ee3d33d68f48dd051a99c2df39ab
|
[
"MIT"
] | null | null | null |
"""
This module assumes that all geometrical points are
represented as 1D numpy arrays.
It was designed and tested on 2D points,
but if you try it on 3D points you may
be pleasantly surprised ;-)
"""
import numpy as np
def point_distance(x, y):
"""Returns euclidean distance between points x and y"""
return np.linalg.norm(x-y)
def point_projected_on_line(line_s, line_e, point):
"""Project point on line that goes through line_s and line_e
assumes line_e is not equal or close to line_s
"""
line_along = line_e - line_s
transformed_point = point - line_s
point_dot_line = np.dot(transformed_point, line_along)
line_along_norm = np.dot(line_along, line_along)
transformed_projection = (point_dot_line / line_along_norm) * line_along
return transformed_projection + line_s
def point_segment_distance(segment_s, segment_e, point):
"""Returns distance from point to the closest point on segment
connecting points segment_s and segment_e"""
projected = point_projected_on_line(segment_s, segment_e, point)
if np.isclose(point_distance(segment_s, projected) + point_distance(projected, segment_e),
point_distance(segment_s, segment_e)):
# projected on segment
return point_distance(point, projected)
else:
return min(point_distance(point, segment_s), point_distance(point, segment_e))
| 31.428571 | 91 | 0.777273 |
23a2753f1b19c260bbb933ccf92ce53c2a06861e
| 30,049 |
py
|
Python
|
InceptionV3.py
|
krishnateja95/Quantization-Test_bed
|
efdf1fd207d1012a5b1f299952a7837f3cc40f5a
|
[
"MIT"
] | null | null | null |
InceptionV3.py
|
krishnateja95/Quantization-Test_bed
|
efdf1fd207d1012a5b1f299952a7837f3cc40f5a
|
[
"MIT"
] | null | null | null |
InceptionV3.py
|
krishnateja95/Quantization-Test_bed
|
efdf1fd207d1012a5b1f299952a7837f3cc40f5a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import argparse
import pickle as pkl
import cv2
import tqdm
import h5py
import sys
def quantize_weights(weights):
abs_weights = np.abs(weights)
vmax = np.max(abs_weights)
s = vmax / 127.
qweights = weights / s
qweights = np.round(qweights)
qweights = qweights.astype(np.int8)
return qweights, s
def batch_norm(x, mean, variance, offset=None, scale=None):
return tf.nn.batch_normalization(x, mean, variance, offset, scale, variance_epsilon=1e-3)
def maxpool_2d(x, k=2, s=2, padding='VALID'):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1],padding=padding)
def avgpool_2d(x, k=2, s=1, padding='VALID'):
# AvgPool2D wrapper
return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, s, s,1],padding=padding)
def get_dense_weights(weights, weight_name, bias_name='bbb', quant=True):
w = weights[weight_name]
if quant:
w, s = quantize_weights(w)
w = tf.constant(w, dtype=tf.float32)
else:
w = tf.constant(weights[weight_name], dtype=tf.float32)
s = 1.0
try:
b = tf.constant(weights[bias_name], dtype=tf.float32)
except:
b = None
return w, b, s
def denselayer(x,w,b,quant,calibrate,x_max=[],x_min=[],weight_scale=1.0,activation_scale=1., activation=''):
if calibrate:
x_max.append(tf.reduce_max(x))
x_min.append(tf.reduce_min(x))
x = tf.matmul(x, w)
if quant:
x, sx = quantize_tensor(x,activation_scale)
x = tf.cast(x, dtype=tf.float32)
x = tf.matmul(x, w)
x = x*weight_scale
x = x*sx
x = tf.add(x, b)
if activation == "relu":
x = tf.nn.relu(x)
return x, x_max, x_min
def quantize_conv_weights(weights,conv_quant):
if conv_quant == 'per_channel':
s = []
for i in range(weights.shape[-1]):
abs_weights = np.abs(weights[:,:,:,i])
vmax = np.max(abs_weights)
scale = vmax/127.
s.append(scale)
scales = np.array(s)
qweights = np.divide(weights,scales)
qweights = np.round(qweights)
qweights = qweights.astype(np.int8)
return qweights,scales
if conv_quant == 'per_layer':
abs_weights = np.abs(weights)
vmax = np.max(abs_weights)
s = vmax / 127.
qweights = weights / s
qweights = np.round(qweights)
qweights = qweights.astype(np.int8)
return qweights, s
def get_conv_weights_biases(weights, conv_quant,weight_name, bias_name='bbb', quant=True):
w = weights[weight_name]
if quant:
w, s = quantize_conv_weights(w,conv_quant)
w = tf.constant(w, dtype=tf.float32)
else:
w = tf.constant(weights[weight_name], dtype=tf.float32)
s = 1.0
try:
b = tf.constant(weights[bias_name], dtype=tf.float32)
except:
b = None
return w, b, s
def get_bn_param(weights, mean, std, beta):
mean = tf.constant(weights[mean], dtype=tf.float32)
std = tf.constant(weights[std], dtype=tf.float32)
beta = tf.constant(weights[beta], dtype=tf.float32)
return mean, std, beta
def quantize_tensor(x,s):
x = tf.divide(x, s)
x = tf.rint(x)
x = tf.clip_by_value(x,-128.0,127.0)
return x,s
def conv_2d(x, w, b, quant, calibrate, x_max, x_min, activation_scale,
weight_scale=1.0, strides=1, padding='SAME', dilations=[1,1,1,1], activation=''):
if calibrate:
x_max.append(tf.reduce_max(x))
x_min.append(tf.reduce_min(x))
x = tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding=padding, dilations=dilations)
if quant:
x, sx = quantize_tensor(x,activation_scale)
x = tf.cast(x, dtype=tf.float32)
x = tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding=padding, dilations=dilations)
x = x * weight_scale
x = x*sx
if b is not None:
x = tf.nn.bias_add(x, b)
if activation == 'relu':
x = tf.nn.relu(x)
return x, x_max, x_min
def conv2d_bn(x, quant, calibrate, x_max, x_min, activation_scale,conv_quant,layer_count,
weights, strides=1, padding='SAME'):
bn_beta = 'batch_normalization_' + str(layer_count) + '/beta:0'
bn_mean = 'batch_normalization_' + str(layer_count) + '/moving_mean:0'
bn_var = 'batch_normalization_' + str(layer_count) + '/moving_variance:0'
conv_name = 'conv2d_' + str(layer_count) + '/kernel:0'
bias_name = 'conv2d_' + str(layer_count) + '/bias:0'
layer_count += 1
w, b, s = get_conv_weights_biases(weights, conv_quant,conv_name, bias_name,quant)
x, x_max, x_min = conv_2d(x, w, b, quant, calibrate, x_max, x_min, activation_scale, s, strides=strides, padding=padding)
mean, std, beta = get_bn_param(weights, bn_mean, bn_var, bn_beta)
x = batch_norm(x, mean, std, beta)
x = tf.nn.relu(x)
return x, layer_count, x_max, x_min
def InceptionV3(img_input, weights, quant, calibrate, activation_scales,conv_quant):
x_max,x_min = [], []
layer_count = 1
x = tf.reshape(img_input, shape=[-1, 299, 299, 3])
x,layer_count,x_max,x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,activation_scales[layer_count-1] ,conv_quant,
layer_count,weights,strides=2,padding='VALID')
x,layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],conv_quant,
layer_count, weights, strides=1, padding='VALID')
x, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,activation_scales[layer_count-1], conv_quant,
layer_count, weights)
x = maxpool_2d(x, k=3, s=2, padding='SAME')
x, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],conv_quant,
layer_count, weights, strides=1, padding='VALID')
x, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],conv_quant,
layer_count, weights, strides=1, padding='VALID')
x = maxpool_2d(x, k=3, s=2, padding='SAME')
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],
conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(branch5x5, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=3)
# mixed 1: 35 x 35 x 256
branch1x1, layer_count , x_max, x_min= conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(branch5x5, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1] ,conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=3)
# mixed 2: 35 x 35 x 256
branch1x1, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch5x5, layer_count, x_max, x_min = conv2d_bn(branch5x5, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=3)
# mixed 3: 17 x 17 x 768
branch3x3, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],
conv_quant,layer_count, weights, strides=2, padding='VALID')
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant, layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count,
weights, strides=2, padding='VALID')
branch_pool = maxpool_2d(x, k=3, s=2, padding='VALID')
x = tf.concat([branch3x3, branch3x3dbl, branch_pool], axis=3)
# mixed 4: 17 x 17 x 768
branch1x1, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant, layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=3)
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=3)
# mixed 7: 17 x 17 x 768
branch1x1, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7, layer_count, x_max, x_min = conv2d_bn(branch7x7, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant, layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7dbl, layer_count, x_max, x_min = conv2d_bn(branch7x7dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant, layer_count, weights)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=3)
# mixed 8: 8 x 8 x 1280
branch3x3, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3, layer_count, x_max, x_min = conv2d_bn(branch3x3, quant,calibrate,x_max,x_min,activation_scales[layer_count-1],
conv_quant,layer_count, weights, strides=2, padding='VALID')
branch7x7x3, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch7x7x3, layer_count, x_max, x_min = conv2d_bn(branch7x7x3,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7x3, layer_count, x_max, x_min = conv2d_bn(branch7x7x3,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch7x7x3, layer_count, x_max, x_min = conv2d_bn(branch7x7x3,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,
layer_count, weights, strides=2, padding='VALID')
branch_pool = maxpool_2d(x, k=3, s=2, padding='VALID')
x = tf.concat([branch3x3, branch7x7x3, branch_pool], axis=3)
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1, layer_count , x_max, x_min= conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3, layer_count, x_max, x_min = conv2d_bn(x, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3_1, layer_count, x_max, x_min = conv2d_bn(branch3x3, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3_2, layer_count, x_max, x_min = conv2d_bn(branch3x3, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3 = tf.concat([branch3x3_1, branch3x3_2], axis=3)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(x,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1], conv_quant,layer_count, weights)
branch3x3dbl, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl_1, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl,quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl_2, layer_count, x_max, x_min = conv2d_bn(branch3x3dbl, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
branch3x3dbl = tf.concat([branch3x3dbl_1, branch3x3dbl_2], axis=3)
branch_pool = avgpool_2d(x, k=3, s=1, padding='SAME')
branch_pool, layer_count, x_max, x_min = conv2d_bn(branch_pool, quant,calibrate,x_max,x_min,
activation_scales[layer_count-1],conv_quant,layer_count, weights)
x = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=3)
x = avgpool_2d(x, k=8)
w, b, s = get_dense_weights(weights, 'predictions/kernel:0', 'predictions/bias:0', quant)
x = tf.reshape(x, [-1, w.get_shape().as_list()[0]])
x, x_max, x_min = denselayer(x, w, b, quant,calibrate,x_max, x_min, s, activation_scales[layer_count-1])
print(layer_count)
if calibrate:
return x_max,x_min
else:
return x
def top5_acc(pred, k=5):
Inf = 0.
results = []
for i in range(k):
results.append(pred.index(max(pred)))
pred[pred.index(max(pred))] = Inf
return results
def weight_loader(weight_file):
weights = {}
f = h5py.File(weight_file, mode='r')
try:
layers = f.attrs['layer_names']
except:
raise ValueError("weights file must contain attribution: 'layer_names'")
for layer_name in layers:
g = f[layer_name]
for weight_name in g.attrs['weight_names']:
weight_value = g[weight_name].value
name = str(weight_name).split("'")[1]
weights[name] = weight_value
return weights
def generate_global_max_min():
global_max, global_min = [],[]
for i in range(500):
global_max.append(float("-inf"))
global_min.append(float("inf"))
return global_max,global_min
def collect_stats(all_global_max,all_global_min,max_x, min_x):
all_global_max.append(max_x)
all_global_min.append(min_x)
return all_global_max, all_global_min
def get_final_max_and_min(all_global_max, all_global_min, method= 'absolute'):
if method == 'absolute':
global_max, global_min = [], []
d_max = np.array([float("-inf") for i in all_global_max[0]])
d_min = np.array([float("inf") for i in all_global_min[0]])
for j in range(len(all_global_max[0])):
for i in range(len(all_global_max)):
if d_max[j]<all_global_max[i][j]:
d_max[j]=all_global_max[i][j]
if d_min[j]>all_global_min[i][j]:
d_min[j]=all_global_min[i][j]
return d_max, d_min
if method == 'average':
max_sum = np.array([0 for i in all_global_max[0]])
min_sum = np.array([0 for i in all_global_min[0]])
for i in range(len(all_global_max)):
max_sum = max_sum + np.array(all_global_max[i])
min_sum = min_sum + np.array(all_global_min[i])
global_max, global_min = max_sum/(i+1), min_sum/(i+1)
return global_max,global_min
def get_scales(global_max, global_min, threshold):
scales = []
for i in range(global_max.size):
abs_value = max(threshold*(np.abs(global_max[i])),threshold*(np.abs(global_min[i])))
s = np.divide(abs_value, 127.)
scales.append(s)
return scales
parse = argparse.ArgumentParser(description='Command for quantization models')
parse.add_argument('--samples',type=int,default=100,help='No. of calibration data samples')
parse.add_argument('--calib_method',type=str,default='average',help='Method to find max/min')
parse.add_argument('--conv_quant_method', type=str, default = 'per_layer', help='conv quant method')
parse.add_argument('--threshold', type=float, default = 1.00, help='conv quant method')
args = parse.parse_args()
weights = {'inception': 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5'}
weights = weight_loader('/Weights/{}'.format(weights['inception']))
global_max, global_min = generate_global_max_min()
X = tf.placeholder(tf.float32, [None, 299, 299, 3])
Y = tf.placeholder(tf.float32, [None, 1000])
def image_loader(pkl_file, model='vgg', dtype='float32'):
with open(pkl_file, 'rb') as f:
data = pkl.load(f)
f.close()
for im, target in tqdm.tqdm(zip(data['data'], data['target']), total=50000):
im = cv2.imdecode(np.fromstring(im, np.uint8), cv2.IMREAD_COLOR)
im = cv2.resize(im, (299, 299))
im = im.astype(dtype)
im = np.expand_dims(im, axis=0)
im /= 255.
im -= 0.5
im *= 2.
label = int(target)
yield im, label
acc = 0.
acc_top5 = 0.
with tf.device('/gpu:0'):
max_x1, min_x1 = InceptionV3(X, weights, False, True,global_max,args.conv_quant_method)
with tf.Session() as sess:
print('start calibrating')
all_global_max, all_global_min = [], []
i=0
for im, label in image_loader('/Data/val224_compressed.pkl'):
max_x, min_x = sess.run([max_x1, min_x1],feed_dict={X: im})
all_global_max, all_global_min = collect_stats(all_global_max,all_global_min,max_x, min_x)
i = i+1
if i==args.samples:
break
print('done calibrating')
all_global_max, all_global_min = get_final_max_and_min(all_global_max, all_global_min,args.calib_method)
scales = get_scales(all_global_max, all_global_min, args.threshold)
with tf.device('/gpu:0'):
logits = InceptionV3(X, weights, True, False, scales,args.conv_quant_method)
prediction = tf.nn.softmax(logits)
pred = tf.argmax(prediction, 1)
for im, label in image_loader('/Data/val224_compressed.pkl'):
t1, t5 = sess.run([pred, prediction], feed_dict={X: im})
if t1[0] == label:
acc += 1
if label in top5_acc(t5[0].tolist()):
acc_top5 += 1
print('Top1 accuracy of Inception_calibration_127: {}'.format(acc / 50000))
print('Top5 accuracy of Inception_calibration_127: {}'.format(acc_top5 / 50000))
write_list = ["conv quant method= ","accuracy_top_1= ","accuracy_top_5= ", 'No. of samples= ',
"Calibration method= ", "Threshold = "]
write_values = [args.conv_quant_method,str(acc/(50000)),str(acc_top5 /(50000)),str(args.samples),
args.calib_method, str(args.threshold)]
with open("samplvsacc_cpu.txt", "a") as myfile:
for items in range(len(write_list)):
myfile.write(write_list[items])
myfile.write(write_values[items])
myfile.write("\n")
print(write_list[items],write_values[items])
myfile.write("----------------------------------------------------------------------------------------------")
myfile.write("\n")
| 48.0784 | 127 | 0.598988 |
56ff69de5c0b77597019e7ce269a5c5386a35249
| 1,519 |
py
|
Python
|
uocsecrets/forum/urls.py
|
jeff-zqiu/uocweb
|
bb6e99a7ab01c9634f8b8446127c4bd1c0701388
|
[
"MIT"
] | 1 |
2018-09-24T13:32:06.000Z
|
2018-09-24T13:32:06.000Z
|
uocsecrets/forum/urls.py
|
jeff-zqiu/uocweb
|
bb6e99a7ab01c9634f8b8446127c4bd1c0701388
|
[
"MIT"
] | null | null | null |
uocsecrets/forum/urls.py
|
jeff-zqiu/uocweb
|
bb6e99a7ab01c9634f8b8446127c4bd1c0701388
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
from django.views.generic import TemplateView
app_name = 'forum'
urlpatterns = [
# /forum/
path('about/', TemplateView.as_view(template_name='forum/about.html'),name='about'),
path('', views.IndexView.as_view(), name = 'index'),
path('top/', views.IndexView.as_view(), name = 'top'),
path('new/', views.IndexView.as_view(), name = 'new'),
path('<str:mode>/<int:page>/', views.PageView.as_view(), name = 'page'),
# /forum/edit/
path('edit/', views.EditView.as_view(), name = 'new_post'),
path('<int:post_id>/edit/', views.EditView.as_view(), name='edit'),
path('<int:post_id>/edit/delete/', views.delete, name='delete'),
# /forum/<post_id>/
path('<int:post_id>/', views.ContentView.as_view() , name='content'),
path('<int:post_id>/clickup/', views.ClickUpView.as_view(), name='clickup'),
# /forum/<post_id>/comment/
path('<int:post_id>/comment/', views.CommentView.as_view(), name='new_comment'),
path('<int:post_id>/comment/<int:comment_id>/', views.CommentView.as_view(), name='comment'),
path('sign_up/', views.SignUpView.as_view(), name='sign_up'),
path('login/', views.LoginView.as_view(template_name='forum/login.html',
extra_context = {'next': '/forum/'}), name='login'),
path('logout/', views.LogoutView.as_view(), name = 'logout'),
# /forum/user/
path('user/<str:username>/', views.UserView.as_view(), name='user'),
]
| 41.054054 | 97 | 0.631995 |
3b85930f3ab0a4bf91bbe545bc4374025b56e750
| 6,302 |
py
|
Python
|
netcam_aioeos/topology/eos_check_lags.py
|
jeremyschulman/netcam-aioeos
|
ae8b46bcef1bbd86441342a9a282e404d597d662
|
[
"Apache-2.0"
] | null | null | null |
netcam_aioeos/topology/eos_check_lags.py
|
jeremyschulman/netcam-aioeos
|
ae8b46bcef1bbd86441342a9a282e404d597d662
|
[
"Apache-2.0"
] | null | null | null |
netcam_aioeos/topology/eos_check_lags.py
|
jeremyschulman/netcam-aioeos
|
ae8b46bcef1bbd86441342a9a282e404d597d662
|
[
"Apache-2.0"
] | 1 |
2022-01-04T19:55:12.000Z
|
2022-01-04T19:55:12.000Z
|
# Copyright 2021 Jeremy Schulman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import TYPE_CHECKING, AsyncGenerator, Generator
from collections import defaultdict
from itertools import chain
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from netcad.topology.checks.check_lags import LagCheckCollection, LagCheck
from netcad.device import Device, DeviceInterface
from netcad.checks import check_result_types as trt
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
if TYPE_CHECKING:
from netcam_aioeos.eos_dut import EOSDeviceUnderTest
# -----------------------------------------------------------------------------
# Exports
# -----------------------------------------------------------------------------
__all__ = ["eos_check_lags", "eos_check_one_lag"]
async def eos_check_lags(self, testcases: LagCheckCollection) -> AsyncGenerator:
"""
This chcek-executor validates that the LAGs on the device match those as
defined in the design.
"""
dut: EOSDeviceUnderTest = self
device = dut.device
cli_lacp_resp = await dut.eapi.cli("show lacp interface")
# The EOS data is a dictionary key is port-channel interface name.
dev_lacp_data = cli_lacp_resp["portChannels"]
for check in testcases.checks:
# The test case ID is the port-channel interface name.
if_name = check.check_id()
# If the expected LAG does not exist raise that failure and continue
# with the next interface.
if not (lag_status := dev_lacp_data.get(if_name)):
yield trt.CheckFailNoExists(device=device, check=check)
continue
for result in eos_check_one_lag(
device=device, check=check, lag_status=lag_status
):
yield result
def eos_check_one_lag(device: Device, check: LagCheck, lag_status: dict) -> Generator:
"""
Validates the checks for one specific LAG on the device.
"""
fails = 0
po_interfaces = lag_status["interfaces"]
# TODO: presenting this code is **ASSUMING** that the given LAG is enabled
# in the design. The test-case does account for this setting; but not
# checking it. Need to implement that logic.
# TODO: each test-case interface has an `enabled` setting to account for
# whether or not the interface is expected to be in the bundled state.
# the code below is currently not checking this setting. Need to
# implement that logic.
expd_interfaces = set(
lagif.interface for lagif in check.expected_results.interfaces
)
# -------------------------------------------------------------------------
# check the interface bundle status. we will use a defaultdict-list to find
# any non-bundled values.
# -------------------------------------------------------------------------
bundle_status = defaultdict(list)
for if_name, if_data in po_interfaces.items():
bundle_status[if_data["actorPortStatus"]].append(if_name)
bundle_status.pop("bundled")
# if there are any keys remaining in the bundled_status dictionary this
# means that there are interfaces in a non bundled state. Need to report
# this as a failure.
if bundle_status:
nonb_interfacees = list(chain.from_iterable(bundle_status.values()))
yield trt.CheckFailMissingMembers(
device=device,
check=check,
expected=list(expd_interfaces),
missing=nonb_interfacees,
)
fails += 1
# -------------------------------------------------------------------------
# Check for any missing or extra interfaces in the port-channel liss.
# -------------------------------------------------------------------------
msrd_interfaces = set(po_interfaces)
if missing_interfaces := expd_interfaces - msrd_interfaces:
yield trt.CheckFailMissingMembers(
device=device,
check=check,
field="interfaces",
expected=list(expd_interfaces),
missing=list(missing_interfaces),
)
fails += 1
if extra_interfaces := msrd_interfaces - expd_interfaces:
yield trt.CheckFailExtraMembers(
device=device,
check=check,
field="interfaces",
expected=list(expd_interfaces),
extras=list(extra_interfaces),
)
fails += 1
if fails:
return
# -------------------------------------------------------------------------
# Test case passed
# -------------------------------------------------------------------------
if_list = [iface.name for iface in sorted(map(DeviceInterface, msrd_interfaces))]
yield trt.CheckPassResult(device=device, check=check, measurement=if_list)
| 36.639535 | 86 | 0.570295 |
bb48fef753953e1e2be4638951fc417d3dc2a339
| 2,773 |
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/user.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 1 |
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-web/azure/mgmt/web/models/user.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 54 |
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-web/azure/mgmt/web/models/user.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 2 |
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class User(ProxyOnlyResource):
"""User crendentials used for publishing activity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param user_name: Username
:type user_name: str
:param publishing_user_name: Username used for publishing.
:type publishing_user_name: str
:param publishing_password: Password used for publishing.
:type publishing_password: str
:param publishing_password_hash: Password hash used for publishing.
:type publishing_password_hash: str
:param publishing_password_hash_salt: Password hash salt used for
publishing.
:type publishing_password_hash_salt: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'publishing_user_name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_name': {'key': 'properties.name', 'type': 'str'},
'publishing_user_name': {'key': 'properties.publishingUserName', 'type': 'str'},
'publishing_password': {'key': 'properties.publishingPassword', 'type': 'str'},
'publishing_password_hash': {'key': 'properties.publishingPasswordHash', 'type': 'str'},
'publishing_password_hash_salt': {'key': 'properties.publishingPasswordHashSalt', 'type': 'str'},
}
def __init__(self, publishing_user_name, kind=None, user_name=None, publishing_password=None, publishing_password_hash=None, publishing_password_hash_salt=None):
super(User, self).__init__(kind=kind)
self.user_name = user_name
self.publishing_user_name = publishing_user_name
self.publishing_password = publishing_password
self.publishing_password_hash = publishing_password_hash
self.publishing_password_hash_salt = publishing_password_hash_salt
| 40.779412 | 165 | 0.646592 |
656b8656d705bec7dd99d22103e7fcc00a54bf46
| 1,305 |
py
|
Python
|
figures/cosmo_cost_accuracy_tradeoff.py
|
deepsphere/paper-deepsphere-iclr2020
|
9d3287bfdbb681760d5d1f8138121838138532ad
|
[
"CC-BY-4.0"
] | 2 |
2021-08-19T17:31:14.000Z
|
2021-11-14T19:21:34.000Z
|
figures/cosmo_cost_accuracy_tradeoff.py
|
deepsphere/paper-deepsphere-iclr2020
|
9d3287bfdbb681760d5d1f8138121838138532ad
|
[
"CC-BY-4.0"
] | null | null | null |
figures/cosmo_cost_accuracy_tradeoff.py
|
deepsphere/paper-deepsphere-iclr2020
|
9d3287bfdbb681760d5d1f8138121838138532ad
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
from os import path
from matplotlib import pyplot as plt
#plt.rc('font', family='Latin Modern Roman') # Latin Modern for text
plt.rc('mathtext', fontset='cm') # Computer Modern for math (default is dejavusans)
# plt.rc('text', usetex=True)
# plt.rc('text.latex', preamble=r'\usepackage{lmodern}')
neighbors = [8, 20, 40]
accuracy = [87.1, 91.3, 92.5]
speed = [185, 250, 363]
fig, ax = plt.subplots(figsize=(2.5, 2.25))
ax.plot(speed, accuracy, '.-')
ax.set_xlabel('inference time [ms]')
ax.set_ylabel('accuracy [%]')
#for x, y, k in zip(speed, accuracy, neighbors):
# align = 'right' if k == 40 else 'left'
# ax.text(x, y-1.1, f'$k={k}$', horizontalalignment=align)
ax.text(speed[0]+10, accuracy[0], f'$k={neighbors[0]}$')
ax.text(speed[1], accuracy[1]-0.7, f'$k={neighbors[1]}$')
ax.text(speed[2]+4, accuracy[2]-1., f'$k={neighbors[2]}$', horizontalalignment='right')
#ax.set_ylim(86, 93)
# baselines = [
# (104, 54.2, '2D CNN baseline'),
# (185, 62.1, 'DSv1 CNN variant'),
# (185, 83.8, 'DSv1 FCN variant'),
# ]
# for x, y, label in baselines:
# ax.scatter(x, y, c=(1, 0, 0), marker='x')
# ax.text(x, y+0.5, label, horizontalalignment='left')
fig.tight_layout()
filename = path.splitext(path.basename(__file__))[0] + '.pdf'
fig.savefig(filename)
| 33.461538 | 87 | 0.63908 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.