ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40512fb396b56b153817e427054c45af66aae03 | """Testing settings.
With these settings, tests run faster.
"""
from .base import * # NOQA
from .base import env
# Base
DEBUG = False
SECRET_KEY = env("DJANGO_SECRET_KEY", default="7lEaACt4wsCj8JbXYgQLf4BmdG5QbuHTMYUGir2Gc1GHqqb2Pv8w9iXwwlIIviI2")
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# Cache
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": ""
}
}
# Passwords
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# Templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # NOQA
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # NOQA
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# Email
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
|
py | b405130879dabf85d8a3b394051ace9795f8f209 | import numpy as np
import os
import sys
import cv2
import json
import random
import copy
# set relative paths
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from core.utils import *
from core.node import *
from core.data_types import FeatureTypes, IDContainer
sys.path.append('/root/LPC_MOT/learnable_proposal_classifier/proto/')
import online_tracking_results_pb2
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s',)
mlog = logging.getLogger('myLogger')
level = logging.getLevelName('INFO')
mlog.setLevel(level)
def load_detections_from_json(detection_file, camera_name, confidence_threshold,
start_frame=-1, end_frame=100000000, do_augmentation=False, skip_frames = 1, models=None):
mlog.info("Loading detection file {}".format(detection_file))
det_nodes = []
_, ext = os.path.splitext(detection_file)
if ext == '.pb' or ext == '.det' or ext == '.filtering':
mlog.info("Please implement JSON parser")
raise NotImplementedError
elif ext == '.json':
detections_from_json = json.load(open(detection_file, 'r'))
detections_from_json = {int(k):v for k, v in detections_from_json.items()}
if do_augmentation:
detections_all = [[k, v] for k, vs in detections_from_json.items() for v in vs]
remove_ratio = random.uniform(5, 20)/ 100.0
remove_num = int(len(detections_all) * remove_ratio)
remove_index = [random.randint(0, len(detections_all)-1) for _ in range(remove_num)]
detections_left = [detections_all[ii] for ii in range(len(detections_all)) if ii not in remove_index]
mlog.info("detection num reduce from {} -> {} after randomly drop".format(len(detections_all), len(detections_left)))
detections_from_json = {}
for det in detections_left:
frame, box = det[0], det[1]
detections_from_json.setdefault(frame, []).append(box)
for frame_index in sorted(detections_from_json.keys()):
if start_frame <= frame_index < end_frame and frame_index % skip_frames == 0:
humans = detections_from_json[frame_index]
for i, human in enumerate(humans):
node = Node(len(det_nodes))
box = human[0]
# in case box dimension is too small
if box[2] < 4.0 or box[3] < 4.0:
continue
node.init(camera_name, int(frame_index / skip_frames), {
FeatureTypes.ID: IDContainer(),
FeatureTypes.Box2D: box,
FeatureTypes.OrigBox2D: box,
FeatureTypes.DetectionScore: 1.0,
FeatureTypes.ReID: human[2],
}, models=None)
det_nodes.append(node)
else:
raise Exception('unknown detection format {0}'.format(ext))
mlog.info("loaded {} number of detections".format(len(det_nodes)))
return det_nodes
def save_nodes_to_json(nodes, camera_view, output_file, skipped_frames=1):
data = {}
for node in nodes:
tid = node.tid
frames_ver = node[camera_view].timestamps
frames = [int(frame) for frame in frames_ver]
if len(frames) < 2:
continue
boxes = node[camera_view].feature(FeatureTypes.Box2D)
for frame, box in zip(frames, boxes):
data.setdefault(frame, [])
data[frame].append([ box, tid, 0 ])
# fill the skipped frames for viz
for fill in range(max(0, frame - skipped_frames + 1), frame):
data.setdefault(fill, [])
data[fill].append([ box, tid, 0 ])
with open(output_file, 'w') as fp:
fp.write(json.dumps(data, indent=2))
def save_nodes_online_pbs(nodes, camera_view, tracking_file, result_id_type='tracklet_index'):
tracks_from_pb = online_tracking_results_pb2.Tracks()
det_num_totals = 0
for idx, node in enumerate(sorted(nodes, key = lambda x: x.mv_start)):
tid = node.tid
frames = node[camera_view].timestamps
if len(frames) < 2:
continue
boxes = node[camera_view].feature(FeatureTypes.Box2D)
app_feats = list(np.array(node[camera_view].feature(FeatureTypes.ReID), dtype=np.float32))
track = tracks_from_pb.tracks.add()
if app_feats:
feat = np.mean(app_feats, axis = 0)
del track.features.features[:]
tf = track.features.features.add()
for d in feat:
tf.feats.append(d)
track.tracklet_id = int(node.tid)
track.track_id = 'single_view_track_' + str(node.tid)
for frame, box, app in zip(frames, boxes, app_feats):
detection = track.tracked_detections.add()
detection.frame_index = frame
detection.box_x = int(box[0])
detection.box_y = int(box[1])
detection.box_width = int(box[2])
detection.box_height = int(box[3])
det_num_totals += 1
#cos_simi = np.dot(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))
print("det_num_totals is {}".format(det_num_totals))
with open(tracking_file, 'wb') as f:
f.write(tracks_from_pb.SerializeToString())
|
py | b405130b38d3f2bb4a1d39b91738f77ad0df1313 | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import gym
from collections import deque
from ding.envs import NoopResetWrapper, MaxAndSkipWrapper, EpisodicLifeWrapper, FireResetWrapper, WarpFrameWrapper, ScaledFloatFrameWrapper, \
ClipRewardWrapper, FrameStackWrapper
def wrap_deepmind(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
def wrap_deepmind_mr(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'MontezumaReveng' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
|
py | b40513200cc34014b35b7fde43f20df2d63e519f | #
# Copyright (c) 2019 Google LLC.
# Copyright (c) 2016-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nestlabs/gateway/v1/trait_api.proto
from __future__ import absolute_import
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from nest.messages import schema_version_pb2 as nest_dot_messages_dot_schema__version__pb2
from wdl import data_access_pb2 as wdl_dot_data__access__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nestlabs/gateway/v1/trait_api.proto',
package='nestlabs.gateway.v1',
syntax='proto3',
serialized_pb=_b('\n#nestlabs/gateway/v1/trait_api.proto\x12\x13nestlabs.gateway.v1\x1a\x19google/protobuf/any.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\"nest/messages/schema_version.proto\x1a\x15wdl/data_access.proto\"\xbe\x02\n\x16TraitStateNotification\x12#\n\x05state\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12.\n\nstate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x15\n\rstate_version\x18\x03 \x01(\x04\x12]\n\x14notification_context\x18\x04 \x01(\x0e\x32?.nestlabs.gateway.v1.TraitStateNotification.NotificationContext\"Y\n\x13NotificationContext\x12$\n NOTIFICATION_CONTEXT_UNSPECIFIED\x10\x00\x12\x1c\n\x18INITIAL_OBSERVE_RESPONSE\x10\x01\"\xeb\x01\n\x17TraitEventsNotification\x12*\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x1a.nestlabs.gateway.v1.Event\x12\x39\n\x15request_utc_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n!request_system_time_offset_millis\x18\x03 \x01(\x04\x12>\n\x1aservice_received_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd2\x03\n\x05\x45vent\x12\"\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12(\n\nimportance\x18\x02 \x01(\x0e\x32\x14.wdl.EventImportance\x12\x10\n\x08\x65vent_id\x18\x03 \x01(\x04\x12\x36\n\x18related_event_importance\x18\x04 \x01(\x0e\x32\x14.wdl.EventImportance\x12\x18\n\x10related_event_id\x18\x05 \x01(\x04\x12\x31\n\rutc_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x19system_time_offset_millis\x18\x07 \x01(\x04\x12\x1b\n\x13relayby_resource_id\x18\x08 \x01(\t\x12\x1b\n\x13subject_resource_id\x18\t \x01(\t\x12\x19\n\x11subject_pairer_id\x18\n \x01(\t\x12\x19\n\x11subject_type_name\x18\x0b \x01(\t\x12\x1b\n\x13subject_instance_id\x18\x0c \x01(\t\x12\x34\n\x0eschema_version\x18\r \x01(\x0b\x32\x1c.nest.messages.SchemaVersion\"L\n\x0cTraitRequest\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\x12\x13\n\x0btrait_label\x18\x02 \x01(\t\x12\x12\n\nrequest_id\x18\x03 \x01(\t\"\xd0\x03\n\x0eTraitOperation\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12;\n\x08progress\x18\x02 \x01(\x0e\x32).nestlabs.gateway.v1.TraitOperation.State\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12.\n\x05\x65vent\x18\x04 \x01(\x0b\x32\x1f.nestlabs.gateway.v1.TraitEvent\x12\x1e\n\x16\x61\x63\x63\x65pted_state_version\x18\x05 \x01(\x04\x12\x34\n\x07\x63ommand\x18\x06 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitCommandH\x00\x12>\n\x06update\x18\x07 \x01(\x0b\x32,.nestlabs.gateway.v1.TraitUpdateStateRequestH\x00\"R\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0b\n\x07STARTED\x10\x03\x12\x0c\n\x08\x43OMPLETE\x10\x04\x42\t\n\x07request\"\xdb\x01\n\x13TraitObserveRequest\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x15\n\rstate_version\x18\x03 \x01(\x04\x12\x1f\n\x17include_confirmed_state\x18\x04 \x01(\x08\x12\"\n\x1ainclude_pending_operations\x18\x05 \x01(\x08\"\xd0\x02\n\x14TraitObserveResponse\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12\x43\n\x0e\x61\x63\x63\x65pted_state\x18\x02 \x01(\x0b\x32+.nestlabs.gateway.v1.TraitStateNotification\x12\x32\n\ntrait_info\x18\x03 \x01(\x0b\x32\x1e.nestlabs.gateway.v1.TraitInfo\x12\x44\n\x0f\x63onfirmed_state\x18\x04 \x01(\x0b\x32+.nestlabs.gateway.v1.TraitStateNotification\x12?\n\x12pending_operations\x18\x05 \x03(\x0b\x32#.nestlabs.gateway.v1.TraitOperation\"\xdc\x01\n\x14TraitGetStateRequest\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x15\n\rstate_version\x18\x03 \x01(\x04\x12\x1f\n\x17include_confirmed_state\x18\x04 \x01(\x08\x12\"\n\x1ainclude_pending_operations\x18\x05 \x01(\x08\"\x90\x02\n\x15TraitGetStateResponse\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12\x43\n\x0e\x61\x63\x63\x65pted_state\x18\x02 \x01(\x0b\x32+.nestlabs.gateway.v1.TraitStateNotification\x12\x32\n\ntrait_info\x18\x03 \x01(\x0b\x32\x1e.nestlabs.gateway.v1.TraitInfo\x12\x44\n\x0f\x63onfirmed_state\x18\x04 \x01(\x0b\x32+.nestlabs.gateway.v1.TraitStateNotification\"U\n\tTraitInfo\x12\x12\n\ntrait_type\x18\x01 \x01(\t\x12\x34\n\x0eschema_version\x18\x02 \x01(\x0b\x32\x1c.nest.messages.SchemaVersion\"\xf5\x01\n\x17TraitUpdateStateRequest\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12#\n\x05state\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12.\n\nstate_mask\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x15\n\rmatch_version\x18\x04 \x01(\x04\x12\x34\n\x0eschema_version\x18\x05 \x01(\x0b\x32\x1c.nest.messages.SchemaVersion\"\xd2\x01\n\x12TraitNotifyRequest\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12\x44\n\x0f\x63onfirmed_state\x18\x02 \x01(\x0b\x32+.nestlabs.gateway.v1.TraitStateNotification\x12<\n\x06\x65vents\x18\x03 \x01(\x0b\x32,.nestlabs.gateway.v1.TraitEventsNotification\"O\n\x13TraitNotifyResponse\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\"1\n\nTraitEvent\x12#\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\"\x9a\x02\n\x0cTraitCommand\x12\x38\n\rtrait_request\x18\x01 \x01(\x0b\x32!.nestlabs.gateway.v1.TraitRequest\x12%\n\x07\x63ommand\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12/\n\x0b\x65xpiry_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\rauthenticator\x18\x04 \x01(\x0c\x12\x15\n\rmatch_version\x18\x05 \x01(\x04\x12\x34\n\x0eschema_version\x18\x06 \x01(\x0b\x32\x1c.nest.messages.SchemaVersion\x12\x14\n\x0cnamespace_id\x18\x07 \x01(\t\"<\n\x11WeaveStatusReport\x12\x12\n\nprofile_id\x18\x01 \x01(\r\x12\x13\n\x0bstatus_code\x18\x02 \x01(\r2\xf3\x03\n\x08TraitApi\x12\x62\n\x07Observe\x12(.nestlabs.gateway.v1.TraitObserveRequest\x1a).nestlabs.gateway.v1.TraitObserveResponse\"\x00\x30\x01\x12\x63\n\x08GetState\x12).nestlabs.gateway.v1.TraitGetStateRequest\x1a*.nestlabs.gateway.v1.TraitGetStateResponse\"\x00\x12\x64\n\x0bUpdateState\x12,.nestlabs.gateway.v1.TraitUpdateStateRequest\x1a#.nestlabs.gateway.v1.TraitOperation\"\x00\x30\x01\x12]\n\x06Notify\x12\'.nestlabs.gateway.v1.TraitNotifyRequest\x1a(.nestlabs.gateway.v1.TraitNotifyResponse\"\x00\x12Y\n\x0bSendCommand\x12!.nestlabs.gateway.v1.TraitCommand\x1a#.nestlabs.gateway.v1.TraitOperation\"\x00\x30\x01\x42!\n\x17\x63om.nestlabs.gateway.v1P\x01\xa2\x02\x03PCLb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,nest_dot_messages_dot_schema__version__pb2.DESCRIPTOR,wdl_dot_data__access__pb2.DESCRIPTOR,])
_TRAITSTATENOTIFICATION_NOTIFICATIONCONTEXT = _descriptor.EnumDescriptor(
name='NotificationContext',
full_name='nestlabs.gateway.v1.TraitStateNotification.NotificationContext',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NOTIFICATION_CONTEXT_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INITIAL_OBSERVE_RESPONSE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=468,
serialized_end=557,
)
_sym_db.RegisterEnumDescriptor(_TRAITSTATENOTIFICATION_NOTIFICATIONCONTEXT)
_TRAITOPERATION_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='nestlabs.gateway.v1.TraitOperation.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMPLETE', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1716,
serialized_end=1798,
)
_sym_db.RegisterEnumDescriptor(_TRAITOPERATION_STATE)
_TRAITSTATENOTIFICATION = _descriptor.Descriptor(
name='TraitStateNotification',
full_name='nestlabs.gateway.v1.TraitStateNotification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='nestlabs.gateway.v1.TraitStateNotification.state', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_mask', full_name='nestlabs.gateway.v1.TraitStateNotification.state_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_version', full_name='nestlabs.gateway.v1.TraitStateNotification.state_version', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notification_context', full_name='nestlabs.gateway.v1.TraitStateNotification.notification_context', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRAITSTATENOTIFICATION_NOTIFICATIONCONTEXT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=557,
)
_TRAITEVENTSNOTIFICATION = _descriptor.Descriptor(
name='TraitEventsNotification',
full_name='nestlabs.gateway.v1.TraitEventsNotification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='events', full_name='nestlabs.gateway.v1.TraitEventsNotification.events', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request_utc_timestamp', full_name='nestlabs.gateway.v1.TraitEventsNotification.request_utc_timestamp', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request_system_time_offset_millis', full_name='nestlabs.gateway.v1.TraitEventsNotification.request_system_time_offset_millis', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_received_timestamp', full_name='nestlabs.gateway.v1.TraitEventsNotification.service_received_timestamp', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=560,
serialized_end=795,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='nestlabs.gateway.v1.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='nestlabs.gateway.v1.Event.data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='importance', full_name='nestlabs.gateway.v1.Event.importance', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='event_id', full_name='nestlabs.gateway.v1.Event.event_id', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='related_event_importance', full_name='nestlabs.gateway.v1.Event.related_event_importance', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='related_event_id', full_name='nestlabs.gateway.v1.Event.related_event_id', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utc_timestamp', full_name='nestlabs.gateway.v1.Event.utc_timestamp', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='system_time_offset_millis', full_name='nestlabs.gateway.v1.Event.system_time_offset_millis', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relayby_resource_id', full_name='nestlabs.gateway.v1.Event.relayby_resource_id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_resource_id', full_name='nestlabs.gateway.v1.Event.subject_resource_id', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_pairer_id', full_name='nestlabs.gateway.v1.Event.subject_pairer_id', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_type_name', full_name='nestlabs.gateway.v1.Event.subject_type_name', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_instance_id', full_name='nestlabs.gateway.v1.Event.subject_instance_id', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema_version', full_name='nestlabs.gateway.v1.Event.schema_version', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=798,
serialized_end=1264,
)
_TRAITREQUEST = _descriptor.Descriptor(
name='TraitRequest',
full_name='nestlabs.gateway.v1.TraitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='nestlabs.gateway.v1.TraitRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trait_label', full_name='nestlabs.gateway.v1.TraitRequest.trait_label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request_id', full_name='nestlabs.gateway.v1.TraitRequest.request_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1266,
serialized_end=1342,
)
_TRAITOPERATION = _descriptor.Descriptor(
name='TraitOperation',
full_name='nestlabs.gateway.v1.TraitOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitOperation.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='progress', full_name='nestlabs.gateway.v1.TraitOperation.progress', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='nestlabs.gateway.v1.TraitOperation.status', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='event', full_name='nestlabs.gateway.v1.TraitOperation.event', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accepted_state_version', full_name='nestlabs.gateway.v1.TraitOperation.accepted_state_version', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='nestlabs.gateway.v1.TraitOperation.command', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='nestlabs.gateway.v1.TraitOperation.update', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRAITOPERATION_STATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='request', full_name='nestlabs.gateway.v1.TraitOperation.request',
index=0, containing_type=None, fields=[]),
],
serialized_start=1345,
serialized_end=1809,
)
_TRAITOBSERVEREQUEST = _descriptor.Descriptor(
name='TraitObserveRequest',
full_name='nestlabs.gateway.v1.TraitObserveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitObserveRequest.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field_mask', full_name='nestlabs.gateway.v1.TraitObserveRequest.field_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_version', full_name='nestlabs.gateway.v1.TraitObserveRequest.state_version', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_confirmed_state', full_name='nestlabs.gateway.v1.TraitObserveRequest.include_confirmed_state', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_pending_operations', full_name='nestlabs.gateway.v1.TraitObserveRequest.include_pending_operations', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1812,
serialized_end=2031,
)
_TRAITOBSERVERESPONSE = _descriptor.Descriptor(
name='TraitObserveResponse',
full_name='nestlabs.gateway.v1.TraitObserveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitObserveResponse.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accepted_state', full_name='nestlabs.gateway.v1.TraitObserveResponse.accepted_state', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trait_info', full_name='nestlabs.gateway.v1.TraitObserveResponse.trait_info', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confirmed_state', full_name='nestlabs.gateway.v1.TraitObserveResponse.confirmed_state', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pending_operations', full_name='nestlabs.gateway.v1.TraitObserveResponse.pending_operations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2034,
serialized_end=2370,
)
_TRAITGETSTATEREQUEST = _descriptor.Descriptor(
name='TraitGetStateRequest',
full_name='nestlabs.gateway.v1.TraitGetStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitGetStateRequest.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field_mask', full_name='nestlabs.gateway.v1.TraitGetStateRequest.field_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_version', full_name='nestlabs.gateway.v1.TraitGetStateRequest.state_version', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_confirmed_state', full_name='nestlabs.gateway.v1.TraitGetStateRequest.include_confirmed_state', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_pending_operations', full_name='nestlabs.gateway.v1.TraitGetStateRequest.include_pending_operations', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2373,
serialized_end=2593,
)
_TRAITGETSTATERESPONSE = _descriptor.Descriptor(
name='TraitGetStateResponse',
full_name='nestlabs.gateway.v1.TraitGetStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitGetStateResponse.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accepted_state', full_name='nestlabs.gateway.v1.TraitGetStateResponse.accepted_state', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trait_info', full_name='nestlabs.gateway.v1.TraitGetStateResponse.trait_info', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confirmed_state', full_name='nestlabs.gateway.v1.TraitGetStateResponse.confirmed_state', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2596,
serialized_end=2868,
)
_TRAITINFO = _descriptor.Descriptor(
name='TraitInfo',
full_name='nestlabs.gateway.v1.TraitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_type', full_name='nestlabs.gateway.v1.TraitInfo.trait_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema_version', full_name='nestlabs.gateway.v1.TraitInfo.schema_version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2870,
serialized_end=2955,
)
_TRAITUPDATESTATEREQUEST = _descriptor.Descriptor(
name='TraitUpdateStateRequest',
full_name='nestlabs.gateway.v1.TraitUpdateStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitUpdateStateRequest.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='nestlabs.gateway.v1.TraitUpdateStateRequest.state', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_mask', full_name='nestlabs.gateway.v1.TraitUpdateStateRequest.state_mask', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='match_version', full_name='nestlabs.gateway.v1.TraitUpdateStateRequest.match_version', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema_version', full_name='nestlabs.gateway.v1.TraitUpdateStateRequest.schema_version', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2958,
serialized_end=3203,
)
_TRAITNOTIFYREQUEST = _descriptor.Descriptor(
name='TraitNotifyRequest',
full_name='nestlabs.gateway.v1.TraitNotifyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitNotifyRequest.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confirmed_state', full_name='nestlabs.gateway.v1.TraitNotifyRequest.confirmed_state', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='events', full_name='nestlabs.gateway.v1.TraitNotifyRequest.events', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3206,
serialized_end=3416,
)
_TRAITNOTIFYRESPONSE = _descriptor.Descriptor(
name='TraitNotifyResponse',
full_name='nestlabs.gateway.v1.TraitNotifyResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitNotifyResponse.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3418,
serialized_end=3497,
)
_TRAITEVENT = _descriptor.Descriptor(
name='TraitEvent',
full_name='nestlabs.gateway.v1.TraitEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='nestlabs.gateway.v1.TraitEvent.event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3499,
serialized_end=3548,
)
_TRAITCOMMAND = _descriptor.Descriptor(
name='TraitCommand',
full_name='nestlabs.gateway.v1.TraitCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trait_request', full_name='nestlabs.gateway.v1.TraitCommand.trait_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='nestlabs.gateway.v1.TraitCommand.command', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expiry_time', full_name='nestlabs.gateway.v1.TraitCommand.expiry_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authenticator', full_name='nestlabs.gateway.v1.TraitCommand.authenticator', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='match_version', full_name='nestlabs.gateway.v1.TraitCommand.match_version', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema_version', full_name='nestlabs.gateway.v1.TraitCommand.schema_version', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace_id', full_name='nestlabs.gateway.v1.TraitCommand.namespace_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3551,
serialized_end=3833,
)
_WEAVESTATUSREPORT = _descriptor.Descriptor(
name='WeaveStatusReport',
full_name='nestlabs.gateway.v1.WeaveStatusReport',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='profile_id', full_name='nestlabs.gateway.v1.WeaveStatusReport.profile_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_code', full_name='nestlabs.gateway.v1.WeaveStatusReport.status_code', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3835,
serialized_end=3895,
)
_TRAITSTATENOTIFICATION.fields_by_name['state'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_TRAITSTATENOTIFICATION.fields_by_name['state_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_TRAITSTATENOTIFICATION.fields_by_name['notification_context'].enum_type = _TRAITSTATENOTIFICATION_NOTIFICATIONCONTEXT
_TRAITSTATENOTIFICATION_NOTIFICATIONCONTEXT.containing_type = _TRAITSTATENOTIFICATION
_TRAITEVENTSNOTIFICATION.fields_by_name['events'].message_type = _EVENT
_TRAITEVENTSNOTIFICATION.fields_by_name['request_utc_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TRAITEVENTSNOTIFICATION.fields_by_name['service_received_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVENT.fields_by_name['data'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_EVENT.fields_by_name['importance'].enum_type = wdl_dot_data__access__pb2._EVENTIMPORTANCE
_EVENT.fields_by_name['related_event_importance'].enum_type = wdl_dot_data__access__pb2._EVENTIMPORTANCE
_EVENT.fields_by_name['utc_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVENT.fields_by_name['schema_version'].message_type = nest_dot_messages_dot_schema__version__pb2._SCHEMAVERSION
_TRAITOPERATION.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITOPERATION.fields_by_name['progress'].enum_type = _TRAITOPERATION_STATE
_TRAITOPERATION.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_TRAITOPERATION.fields_by_name['event'].message_type = _TRAITEVENT
_TRAITOPERATION.fields_by_name['command'].message_type = _TRAITCOMMAND
_TRAITOPERATION.fields_by_name['update'].message_type = _TRAITUPDATESTATEREQUEST
_TRAITOPERATION_STATE.containing_type = _TRAITOPERATION
_TRAITOPERATION.oneofs_by_name['request'].fields.append(
_TRAITOPERATION.fields_by_name['command'])
_TRAITOPERATION.fields_by_name['command'].containing_oneof = _TRAITOPERATION.oneofs_by_name['request']
_TRAITOPERATION.oneofs_by_name['request'].fields.append(
_TRAITOPERATION.fields_by_name['update'])
_TRAITOPERATION.fields_by_name['update'].containing_oneof = _TRAITOPERATION.oneofs_by_name['request']
_TRAITOBSERVEREQUEST.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITOBSERVEREQUEST.fields_by_name['field_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_TRAITOBSERVERESPONSE.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITOBSERVERESPONSE.fields_by_name['accepted_state'].message_type = _TRAITSTATENOTIFICATION
_TRAITOBSERVERESPONSE.fields_by_name['trait_info'].message_type = _TRAITINFO
_TRAITOBSERVERESPONSE.fields_by_name['confirmed_state'].message_type = _TRAITSTATENOTIFICATION
_TRAITOBSERVERESPONSE.fields_by_name['pending_operations'].message_type = _TRAITOPERATION
_TRAITGETSTATEREQUEST.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITGETSTATEREQUEST.fields_by_name['field_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_TRAITGETSTATERESPONSE.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITGETSTATERESPONSE.fields_by_name['accepted_state'].message_type = _TRAITSTATENOTIFICATION
_TRAITGETSTATERESPONSE.fields_by_name['trait_info'].message_type = _TRAITINFO
_TRAITGETSTATERESPONSE.fields_by_name['confirmed_state'].message_type = _TRAITSTATENOTIFICATION
_TRAITINFO.fields_by_name['schema_version'].message_type = nest_dot_messages_dot_schema__version__pb2._SCHEMAVERSION
_TRAITUPDATESTATEREQUEST.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITUPDATESTATEREQUEST.fields_by_name['state'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_TRAITUPDATESTATEREQUEST.fields_by_name['state_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_TRAITUPDATESTATEREQUEST.fields_by_name['schema_version'].message_type = nest_dot_messages_dot_schema__version__pb2._SCHEMAVERSION
_TRAITNOTIFYREQUEST.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITNOTIFYREQUEST.fields_by_name['confirmed_state'].message_type = _TRAITSTATENOTIFICATION
_TRAITNOTIFYREQUEST.fields_by_name['events'].message_type = _TRAITEVENTSNOTIFICATION
_TRAITNOTIFYRESPONSE.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITEVENT.fields_by_name['event'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_TRAITCOMMAND.fields_by_name['trait_request'].message_type = _TRAITREQUEST
_TRAITCOMMAND.fields_by_name['command'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_TRAITCOMMAND.fields_by_name['expiry_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TRAITCOMMAND.fields_by_name['schema_version'].message_type = nest_dot_messages_dot_schema__version__pb2._SCHEMAVERSION
DESCRIPTOR.message_types_by_name['TraitStateNotification'] = _TRAITSTATENOTIFICATION
DESCRIPTOR.message_types_by_name['TraitEventsNotification'] = _TRAITEVENTSNOTIFICATION
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['TraitRequest'] = _TRAITREQUEST
DESCRIPTOR.message_types_by_name['TraitOperation'] = _TRAITOPERATION
DESCRIPTOR.message_types_by_name['TraitObserveRequest'] = _TRAITOBSERVEREQUEST
DESCRIPTOR.message_types_by_name['TraitObserveResponse'] = _TRAITOBSERVERESPONSE
DESCRIPTOR.message_types_by_name['TraitGetStateRequest'] = _TRAITGETSTATEREQUEST
DESCRIPTOR.message_types_by_name['TraitGetStateResponse'] = _TRAITGETSTATERESPONSE
DESCRIPTOR.message_types_by_name['TraitInfo'] = _TRAITINFO
DESCRIPTOR.message_types_by_name['TraitUpdateStateRequest'] = _TRAITUPDATESTATEREQUEST
DESCRIPTOR.message_types_by_name['TraitNotifyRequest'] = _TRAITNOTIFYREQUEST
DESCRIPTOR.message_types_by_name['TraitNotifyResponse'] = _TRAITNOTIFYRESPONSE
DESCRIPTOR.message_types_by_name['TraitEvent'] = _TRAITEVENT
DESCRIPTOR.message_types_by_name['TraitCommand'] = _TRAITCOMMAND
DESCRIPTOR.message_types_by_name['WeaveStatusReport'] = _WEAVESTATUSREPORT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TraitStateNotification = _reflection.GeneratedProtocolMessageType('TraitStateNotification', (_message.Message,), dict(
DESCRIPTOR = _TRAITSTATENOTIFICATION,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitStateNotification)
))
_sym_db.RegisterMessage(TraitStateNotification)
TraitEventsNotification = _reflection.GeneratedProtocolMessageType('TraitEventsNotification', (_message.Message,), dict(
DESCRIPTOR = _TRAITEVENTSNOTIFICATION,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitEventsNotification)
))
_sym_db.RegisterMessage(TraitEventsNotification)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.Event)
))
_sym_db.RegisterMessage(Event)
TraitRequest = _reflection.GeneratedProtocolMessageType('TraitRequest', (_message.Message,), dict(
DESCRIPTOR = _TRAITREQUEST,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitRequest)
))
_sym_db.RegisterMessage(TraitRequest)
TraitOperation = _reflection.GeneratedProtocolMessageType('TraitOperation', (_message.Message,), dict(
DESCRIPTOR = _TRAITOPERATION,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitOperation)
))
_sym_db.RegisterMessage(TraitOperation)
TraitObserveRequest = _reflection.GeneratedProtocolMessageType('TraitObserveRequest', (_message.Message,), dict(
DESCRIPTOR = _TRAITOBSERVEREQUEST,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitObserveRequest)
))
_sym_db.RegisterMessage(TraitObserveRequest)
TraitObserveResponse = _reflection.GeneratedProtocolMessageType('TraitObserveResponse', (_message.Message,), dict(
DESCRIPTOR = _TRAITOBSERVERESPONSE,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitObserveResponse)
))
_sym_db.RegisterMessage(TraitObserveResponse)
TraitGetStateRequest = _reflection.GeneratedProtocolMessageType('TraitGetStateRequest', (_message.Message,), dict(
DESCRIPTOR = _TRAITGETSTATEREQUEST,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitGetStateRequest)
))
_sym_db.RegisterMessage(TraitGetStateRequest)
TraitGetStateResponse = _reflection.GeneratedProtocolMessageType('TraitGetStateResponse', (_message.Message,), dict(
DESCRIPTOR = _TRAITGETSTATERESPONSE,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitGetStateResponse)
))
_sym_db.RegisterMessage(TraitGetStateResponse)
TraitInfo = _reflection.GeneratedProtocolMessageType('TraitInfo', (_message.Message,), dict(
DESCRIPTOR = _TRAITINFO,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitInfo)
))
_sym_db.RegisterMessage(TraitInfo)
TraitUpdateStateRequest = _reflection.GeneratedProtocolMessageType('TraitUpdateStateRequest', (_message.Message,), dict(
DESCRIPTOR = _TRAITUPDATESTATEREQUEST,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitUpdateStateRequest)
))
_sym_db.RegisterMessage(TraitUpdateStateRequest)
TraitNotifyRequest = _reflection.GeneratedProtocolMessageType('TraitNotifyRequest', (_message.Message,), dict(
DESCRIPTOR = _TRAITNOTIFYREQUEST,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitNotifyRequest)
))
_sym_db.RegisterMessage(TraitNotifyRequest)
TraitNotifyResponse = _reflection.GeneratedProtocolMessageType('TraitNotifyResponse', (_message.Message,), dict(
DESCRIPTOR = _TRAITNOTIFYRESPONSE,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitNotifyResponse)
))
_sym_db.RegisterMessage(TraitNotifyResponse)
TraitEvent = _reflection.GeneratedProtocolMessageType('TraitEvent', (_message.Message,), dict(
DESCRIPTOR = _TRAITEVENT,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitEvent)
))
_sym_db.RegisterMessage(TraitEvent)
TraitCommand = _reflection.GeneratedProtocolMessageType('TraitCommand', (_message.Message,), dict(
DESCRIPTOR = _TRAITCOMMAND,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.TraitCommand)
))
_sym_db.RegisterMessage(TraitCommand)
WeaveStatusReport = _reflection.GeneratedProtocolMessageType('WeaveStatusReport', (_message.Message,), dict(
DESCRIPTOR = _WEAVESTATUSREPORT,
__module__ = 'nestlabs.gateway.v1.trait_api_pb2'
# @@protoc_insertion_point(class_scope:nestlabs.gateway.v1.WeaveStatusReport)
))
_sym_db.RegisterMessage(WeaveStatusReport)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\027com.nestlabs.gateway.v1P\001\242\002\003PCL'))
_TRAITAPI = _descriptor.ServiceDescriptor(
name='TraitApi',
full_name='nestlabs.gateway.v1.TraitApi',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=3898,
serialized_end=4397,
methods=[
_descriptor.MethodDescriptor(
name='Observe',
full_name='nestlabs.gateway.v1.TraitApi.Observe',
index=0,
containing_service=None,
input_type=_TRAITOBSERVEREQUEST,
output_type=_TRAITOBSERVERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetState',
full_name='nestlabs.gateway.v1.TraitApi.GetState',
index=1,
containing_service=None,
input_type=_TRAITGETSTATEREQUEST,
output_type=_TRAITGETSTATERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='UpdateState',
full_name='nestlabs.gateway.v1.TraitApi.UpdateState',
index=2,
containing_service=None,
input_type=_TRAITUPDATESTATEREQUEST,
output_type=_TRAITOPERATION,
options=None,
),
_descriptor.MethodDescriptor(
name='Notify',
full_name='nestlabs.gateway.v1.TraitApi.Notify',
index=3,
containing_service=None,
input_type=_TRAITNOTIFYREQUEST,
output_type=_TRAITNOTIFYRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='SendCommand',
full_name='nestlabs.gateway.v1.TraitApi.SendCommand',
index=4,
containing_service=None,
input_type=_TRAITCOMMAND,
output_type=_TRAITOPERATION,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_TRAITAPI)
DESCRIPTOR.services_by_name['TraitApi'] = _TRAITAPI
# @@protoc_insertion_point(module_scope)
|
py | b40513bfe63b75dcdea0184980e9dc25413b0bcd | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
import glob
import tempfile
import re
import inspect
import xml.etree.ElementTree as ElementTree
import llnl.util.tty as tty
from llnl.util.filesystem import \
install, ancestor, filter_file, \
HeaderList, find_headers, \
LibraryList, find_libraries, find_system_libraries
from spack.version import Version, ver
from spack.package import PackageBase, run_after, InstallError
from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable
from spack.util.prefix import Prefix
from spack.build_environment import dso_suffix
# A couple of utility functions that might be useful in general. If so, they
# should really be defined elsewhere, unless deemed heretical.
# (Or na"ive on my part).
def debug_print(msg, *args):
'''Prints a message (usu. a variable) and the callers' names for a couple
of stack frames.
'''
# https://docs.python.org/2/library/inspect.html#the-interpreter-stack
stack = inspect.stack()
_func_name = 3
tty.debug("%s.%s:\t%s" % (stack[2][_func_name], stack[1][_func_name], msg),
*args)
def raise_lib_error(*args):
'''Bails out with an error message. Shows args after the first as one per
line, tab-indented, useful for long paths to line up and stand out.
'''
raise InstallError("\n\t".join(str(i) for i in args))
def _expand_fields(s):
'''[Experimental] Expand arch-related fields in a string, typically a
filename.
Supported fields and their typical expansions are::
{platform} linux, mac
{arch} intel64 (including on Mac)
{libarch} intel64, empty on Mac
{bits} 64
'''
# Python-native string formatting requires arg list counts to match the
# replacement field count; optional fields are far easier with regexes.
_bits = '64'
_arch = 'intel64' # TBD: ia32
if 'linux' in sys.platform: # NB: linux2 vs. linux
s = re.sub('{platform}', 'linux', s)
s = re.sub('{libarch}', _arch, s)
elif 'darwin' in sys.platform:
s = re.sub('{platform}', 'mac', s)
s = re.sub('{libarch}', '', s) # no arch dirs are used (as of 2018)
# elif 'win' in sys.platform: # TBD
# s = re.sub('{platform}', 'windows', s)
s = re.sub('{arch}', _arch, s)
s = re.sub('{bits}', _bits, s)
return s
class IntelPackage(PackageBase):
"""Specialized class for licensed Intel software.
This class provides two phases that can be overridden:
1. :py:meth:`~.IntelPackage.configure`
2. :py:meth:`~.IntelPackage.install`
They both have sensible defaults and for many packages the
only thing necessary will be to override setup_run_environment
to set the appropriate environment variables.
"""
#: Phases of an Intel package
phases = ['configure', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'IntelPackage'
#: A dict that maps Spack version specs to release years, needed to infer
#: the installation directory layout for pre-2016 versions in the family of
#: Intel packages.
#
# Like any property, it can be overridden in client packages, should older
# versions ever be added there. The initial dict here contains the
# packages defined in Spack as of 2018-04. Keys could conceivably overlap
# but preferably should not - only the first key in hash traversal order
# that satisfies self.spec will be used.
version_years = {
# intel-daal is versioned 2016 and later, no divining is needed
'[email protected]:9.99': 2016,
'[email protected]:11.3.999': 2016,
'[email protected]:5.99': 2016,
}
# Below is the list of possible values for setting auto dispatch functions
# for the Intel compilers. Using these allows for the building of fat
# binaries that will detect the CPU SIMD capabilities at run time and
# activate the appropriate extensions.
auto_dispatch_options = ('COMMON-AVX512', 'MIC-AVX512', 'CORE-AVX512',
'CORE-AVX2', 'CORE-AVX-I', 'AVX', 'SSE4.2',
'SSE4.1', 'SSSE3', 'SSE3', 'SSE2')
@property
def license_required(self):
# The Intel libraries are provided without requiring a license as of
# version 2017.2. Trying to specify one anyway will fail. See:
# https://software.intel.com/en-us/articles/free-ipsxe-tools-and-libraries
return self._has_compilers or self.version < ver('2017.2')
#: Comment symbol used in the license.lic file
license_comment = '#'
#: Environment variables that Intel searches for a license file
license_vars = ['INTEL_LICENSE_FILE']
#: URL providing information on how to acquire a license key
license_url = 'https://software.intel.com/en-us/articles/intel-license-manager-faq'
#: Location where Intel searches for a license file
@property
def license_files(self):
dirs = ['Licenses']
if self._has_compilers:
dirs.append(self.component_bin_dir('compiler'))
for variant, component_suite_dir in {
'+advisor': 'advisor',
'+inspector': 'inspector',
'+itac': 'itac',
'+vtune': 'vtune_amplifier',
}.items():
if variant in self.spec:
dirs.append(self.normalize_path(
'licenses', component_suite_dir, relative=True))
files = [os.path.join(d, 'license.lic') for d in dirs]
return files
#: Components to install (list of name patterns from pset/mediaconfig.xml)
# NB: Renamed from plain components() for coding and maintainability.
@property
def pset_components(self):
# Do not detail single-purpose client packages.
if not self._has_compilers:
return ['ALL']
# tty.warn('DEBUG: installing ALL components')
# return ['ALL']
# Always include compilers and closely related components.
# Pre-2016 compiler components have different names - throw in all.
# Later releases have overlapping minor parts that differ by "edition".
# NB: The spack package 'intel' is a subset of
# 'intel-parallel-studio@composer' without the lib variants.
c = ' intel-icc intel-ifort' \
' intel-ccomp intel-fcomp intel-comp-' \
' intel-compilerproc intel-compilerprof intel-compilerpro-' \
' intel-psxe intel-openmp'
additions_for = {
'cluster': ' intel-icsxe',
'professional': ' intel-ips-',
'composer': ' intel-compxe',
}
if self._edition in additions_for:
c += additions_for[self._edition]
for variant, components_to_add in {
'+daal': ' intel-daal', # Data Analytics Acceleration Lib
'+gdb': ' intel-gdb', # Integrated Performance Primitives
'+ipp': ' intel-ipp intel-crypto-ipp',
'+mkl': ' intel-mkl', # Math Kernel Library
'+mpi': ' intel-mpi intel-imb', # MPI runtime, SDK, benchm.
'+tbb': ' intel-tbb', # Threading Building Blocks
'+advisor': ' intel-advisor',
'+clck': ' intel_clck', # Cluster Checker
'+inspector': ' intel-inspector',
'+itac': ' intel-itac intel-ta intel-tc'
' intel-trace-analyzer intel-trace-collector',
# Trace Analyzer and Collector
'+vtune': ' intel-vtune-amplifier', # VTune
}.items():
if variant in self.spec:
c += components_to_add
debug_print(c)
return c.split()
# ---------------------------------------------------------------------
# Utilities
# ---------------------------------------------------------------------
@property
def _filtered_components(self):
'''Expands the list of desired component patterns to the exact names
present in the given download.
'''
c = self.pset_components
if 'ALL' in c or 'DEFAULTS' in c: # No filter needed
return c
# mediaconfig.xml is known to contain duplicate components.
# If more than one copy of the same component is used, you
# will get an error message about invalid components.
# Use sets to prevent duplicates and for efficient traversal.
requested = set(c)
confirmed = set()
# NB: To get a reasonable overview in pretty much the documented way:
#
# grep -E '<Product|<Abbr|<Name>..[a-z]' pset/mediaconfig.xml
#
# https://software.intel.com/en-us/articles/configuration-file-format
#
xmltree = ElementTree.parse('pset/mediaconfig.xml')
for entry in xmltree.getroot().findall('.//Abbr'): # XPath expression
name_present = entry.text
for name_requested in requested:
if name_present.startswith(name_requested):
confirmed.add(name_present)
return list(confirmed)
@property
def intel64_int_suffix(self):
'''Provide the suffix for Intel library names to match a client
application's desired int size, conveyed by the active spec variant.
The possible suffixes and their meanings are:
``ilp64`` all of int, long, and pointer are 64 bit,
`` lp64`` only long and pointer are 64 bit; int will be 32bit.
'''
if '+ilp64' in self.spec:
return 'ilp64'
else:
return 'lp64'
@property
def _has_compilers(self):
return self.name in ['intel', 'intel-parallel-studio']
@property
def _edition(self):
if self.name == 'intel-parallel-studio':
return self.version[0] # clearer than .up_to(1), I think.
elif self.name == 'intel':
return 'composer'
else:
return ''
@property
def version_yearlike(self):
'''Return the version in a unified style, suitable for Version class
conditionals.
'''
# Input data for this routine: self.version
# Returns: YYYY.Nupdate[.Buildseq]
#
# Specifics by package:
#
# Package Format of self.version
# ------------------------------------------------------------
# 'intel-parallel-studio' <edition>.YYYY.Nupdate
# 'intel' YY.0.Nupdate (some assigned ad-hoc)
# Recent lib packages YYYY.Nupdate.Buildseq
# Early lib packages Major.Minor.Patch.Buildseq
# ------------------------------------------------------------
#
# Package Output
# ------------------------------------------------------------
# 'intel-parallel-studio' YYYY.Nupdate
# 'intel' YYYY.Nupdate
# Recent lib packages YYYY.Nupdate.Buildseq
# Known early lib packages YYYY.Minor.Patch.Buildseq (*)
# Unknown early lib packages (2000 + Major).Minor.Patch.Buildseq
# ----------------------------------------------------------------
#
# (*) YYYY is taken from @property "version_years" (a dict of specs)
#
try:
if self.name == 'intel':
# Has a "Minor" version element, but it is always set as 0. To
# be useful for comparisons, drop it and get YYYY.Nupdate.
v_tail = self.version[2:] # coerced just fine via __getitem__
else:
v_tail = self.version[1:]
except IndexError:
# Hmm - this happens on "spack install intel-mkl@11".
# I thought concretization picks an actual version??
return self.version # give up
if self.name == 'intel-parallel-studio':
return v_tail
v_year = self.version[0]
if v_year < 2000:
# Shoehorn Major into release year until we know better.
v_year += 2000
for spec, year in self.version_years.items():
if self.spec.satisfies(spec):
v_year = year
break
return ver('%s.%s' % (v_year, v_tail))
# ---------------------------------------------------------------------
# Directory handling common to all Intel components
# ---------------------------------------------------------------------
# For reference: classes using IntelPackage, as of Spack-0.11:
#
# intel/ intel-ipp/ intel-mpi/
# intel-daal/ intel-mkl/ intel-parallel-studio/
#
# Not using class IntelPackage:
# intel-gpu-tools/ intel-mkl-dnn/ intel-tbb/
#
def normalize_suite_dir(self, suite_dir_name, version_globs=['*.*.*']):
'''Returns the version-specific and absolute path to the directory of
an Intel product or a suite of product components.
Parameters:
suite_dir_name (str):
Name of the product directory, without numeric version.
- Examples::
composer_xe, parallel_studio_xe, compilers_and_libraries
The following will work as well, even though they are not
directly targets for Spack installation::
advisor_xe, inspector_xe, vtune_amplifier_xe,
performance_snapshots (new name for vtune as of 2018)
These are single-component products without subordinate
components and are normally made available to users by a
toplevel psxevars.sh or equivalent file to source (and thus by
the modulefiles that Spack produces).
version_globs (list of str): Suffix glob patterns (most specific
first) expected to qualify suite_dir_name to its fully
version-specific install directory (as opposed to a
compatibility directory or symlink).
'''
# See ./README-intel.rst for background and analysis of dir layouts.
d = self.prefix
# Distinguish between product installations that were done external to
# Spack (integrated via packages.yaml) and Spack-internal ones. The
# resulting prefixes may differ in directory depth and specificity.
unversioned_dirname = ''
if suite_dir_name and suite_dir_name in d:
# If e.g. MKL was installed outside of Spack, it is likely just one
# product or product component among possibly many other Intel
# products and their releases that were installed in sibling or
# cousin directories. In such cases, the prefix given to Spack
# will inevitably be a highly product-specific and preferably fully
# version-specific directory. This is what we want and need, and
# nothing more specific than that, i.e., if needed, convert, e.g.:
# .../compilers_and_libraries*/* -> .../compilers_and_libraries*
d = re.sub('(%s%s.*?)%s.*' %
(os.sep, re.escape(suite_dir_name), os.sep), r'\1', d)
# The Intel installer scripts try hard to place compatibility links
# named like this in the install dir to convey upgrade benefits to
# traditional client apps. But such a generic name can be trouble
# when given to Spack: the link target is bound to change outside
# of Spack's purview and when it does, the outcome of subsequent
# builds of dependent packages may be affected. (Though Intel has
# been remarkably good at backward compatibility.)
# I'm not sure if Spack's package hashing includes link targets.
if d.endswith(suite_dir_name):
# NB: This could get tiresome without a seen++ test.
# tty.warn('Intel product found in a version-neutral directory'
# ' - future builds may not be reproducible.')
#
# Simply doing realpath() would not be enough, because:
# compilers_and_libraries -> compilers_and_libraries_2018
# which is mostly a staging directory for symlinks (see next).
unversioned_dirname = d
else:
# By contrast, a Spack-internal MKL installation will inherit its
# prefix from install.sh of Intel's package distribution, where it
# means the high-level installation directory that is specific to
# the *vendor* (think of the default "/opt/intel"). We must now
# step down into the *product* directory to get the usual
# hierarchy. But let's not do that in haste ...
#
# For a Spack-born install, the fully-qualified release directory
# desired above may seem less important since product upgrades
# won't land in the same parent. However, only the fully qualified
# directory contains the regular files for the compiler commands:
#
# $ ls -lF <HASH>/compilers_and_libraries*/linux/bin/intel64/icc
#
# <HASH>/compilers_and_libraries_2018.1.163/linux/bin/intel64/icc*
# A regular file in the actual release directory. Bingo!
#
# <HASH>/compilers_and_libraries_2018/linux/bin/intel64/icc -> ...
# A symlink - no good. Note that "compilers_and_libraries_2018/"
# is itself a directory (not symlink) but it merely holds a
# compatibility dir hierarchy with lots of symlinks into the
# release dir.
#
# <HASH>/compilers_and_libraries/linux/bin/intel64/icc -> ...
# Ditto.
#
# Now, the Spack packages for MKL and MPI packges use version
# triplets, but the one for intel-parallel-studio does not.
# So, we can't have it quite as easy as:
# d = Prefix(d.append('compilers_and_libraries_' + self.version))
# Alright, let's see what we can find instead:
unversioned_dirname = os.path.join(d, suite_dir_name)
if unversioned_dirname:
for g in version_globs:
try_glob = unversioned_dirname + g
debug_print('trying %s' % try_glob)
matching_dirs = sorted(glob.glob(try_glob))
# NB: Python glob() returns results in arbitrary order - ugh!
# NB2: sorted() is a shortcut that is NOT number-aware.
if matching_dirs:
debug_print('found %d:' % len(matching_dirs),
matching_dirs)
# Take the highest and thus presumably newest match, which
# better be the sole one anyway.
d = matching_dirs[-1]
break
if not matching_dirs:
# No match -- return a sensible value anyway.
d = unversioned_dirname
debug_print(d)
return Prefix(d)
def normalize_path(self, component_path, component_suite_dir=None,
relative=False):
'''Returns the absolute or relative path to a component or file under a
component suite directory.
Intel's product names, scope, and directory layout changed over the
years. This function provides a unified interface to their directory
names.
Parameters:
component_path (str): a component name like 'mkl', or 'mpi', or a
deeper relative path.
component_suite_dir (str): _Unversioned_ name of the expected
parent directory of component_path. When absent or `None`, an
appropriate default will be used. A present but empty string
`""` requests that `component_path` refer to `self.prefix`
directly.
Typical values: `compilers_and_libraries`, `composer_xe`,
`parallel_studio_xe`.
Also supported: `advisor`, `inspector`, `vtune`. The actual
directory name for these suites varies by release year. The
name will be corrected as needed for use in the return value.
relative (bool): When True, return path relative to self.prefix,
otherwise, return an absolute path (the default).
'''
# Design note: Choosing the default for `component_suite_dir` was a bit
# tricky since there better be a sensible means to specify direct
# parentage under self.prefix (even though you normally shouldn't need
# a function for that). I chose "" to allow that case be represented,
# and 'None' or the absence of the kwarg to represent the most relevant
# case for the time of writing.
#
# In the 2015 releases (the earliest in Spack as of 2018), there were
# nominally two separate products that provided the compilers:
# "Composer" as lower tier, and "Parallel Studio" as upper tier. In
# Spack, we justifiably retcon both as "intel-parallel-studio@composer"
# and "...@cluster", respectively. Both of these use the older
# "composer_xe" dir layout, as do their virtual package personas.
#
# All other "intel-foo" packages in Spack as of 2018-04 use the
# "compilers_and_libraries" layout, including the 2016 releases that
# are not natively versioned by year.
cs = component_suite_dir
if cs is None and component_path.startswith('ism'):
cs = 'parallel_studio_xe'
v = self.version_yearlike
# Glob variants to complete component_suite_dir.
# Helper var for older MPI versions - those are reparented, with each
# version in their own version-named dir.
standalone_glob = '[1-9]*.*.*'
# Most other components; try most specific glob first.
# flake8 is far too opinionated about lists - ugh.
normalize_kwargs = {
'version_globs': [
'_%s' % self.version,
'_%s.*' % v.up_to(2), # should be: YYYY.Nupdate
'_*.*.*', # last resort
]
}
for rename_rule in [
# cs given as arg, in years, dir actually used, [version_globs]
[None, ':2015', 'composer_xe'],
[None, '2016:', 'compilers_and_libraries'],
['advisor', ':2016', 'advisor_xe'],
['inspector', ':2016', 'inspector_xe'],
['vtune_amplifier', ':2017', 'vtune_amplifier_xe'],
['vtune', ':2017', 'vtune_amplifier_xe'], # alt.
['itac', ':', 'itac', [os.sep + standalone_glob]],
]:
if cs == rename_rule[0] and v.satisfies(ver(rename_rule[1])):
cs = rename_rule[2]
if len(rename_rule) > 3:
normalize_kwargs = {'version_globs': rename_rule[3]}
break
d = self.normalize_suite_dir(cs, **normalize_kwargs)
# Help find components not located directly under d.
# NB: ancestor() not well suited if version_globs may contain os.sep .
parent_dir = re.sub(os.sep + re.escape(cs) + '.*', '', d)
reparent_as = {}
if cs == 'compilers_and_libraries': # must qualify further
d = os.path.join(d, _expand_fields('{platform}'))
elif cs == 'composer_xe':
reparent_as = {'mpi': 'impi'}
# ignore 'imb' (MPI Benchmarks)
for nominal_p, actual_p in reparent_as.items():
if component_path.startswith(nominal_p):
dirs = glob.glob(
os.path.join(parent_dir, actual_p, standalone_glob))
debug_print('reparent dirs: %s' % dirs)
# Brazenly assume last match is the most recent version;
# convert back to relative of parent_dir, and re-assemble.
rel_dir = dirs[-1].split(parent_dir + os.sep, 1)[-1]
component_path = component_path.replace(nominal_p, rel_dir, 1)
d = parent_dir
d = os.path.join(d, component_path)
if relative:
d = os.path.relpath(os.path.realpath(d), parent_dir)
debug_print(d)
return d
def component_bin_dir(self, component, **kwargs):
d = self.normalize_path(component, **kwargs)
if component == 'compiler': # bin dir is always under PARENT
d = os.path.join(ancestor(d), 'bin', _expand_fields('{libarch}'))
d = d.rstrip(os.sep) # cosmetics, when {libarch} is empty
# NB: Works fine even with relative=True, e.g.:
# composer_xe/compiler -> composer_xe/bin/intel64
elif component == 'mpi':
d = os.path.join(d, _expand_fields('{libarch}'), 'bin')
else:
d = os.path.join(d, 'bin')
debug_print(d)
return d
def component_lib_dir(self, component, **kwargs):
'''Provide directory suitable for find_libraries() and
SPACK_COMPILER_EXTRA_RPATHS.
'''
d = self.normalize_path(component, **kwargs)
if component == 'mpi':
d = os.path.join(d, _expand_fields('{libarch}'), 'lib')
else:
d = os.path.join(d, 'lib', _expand_fields('{libarch}'))
d = d.rstrip(os.sep) # cosmetics, when {libarch} is empty
if component == 'tbb': # must qualify further for abi
d = os.path.join(d, self._tbb_abi)
debug_print(d)
return d
def component_include_dir(self, component, **kwargs):
d = self.normalize_path(component, **kwargs)
if component == 'mpi':
d = os.path.join(d, _expand_fields('{libarch}'), 'include')
else:
d = os.path.join(d, 'include')
debug_print(d)
return d
@property
def file_to_source(self):
'''Full path of file to source for initializing an Intel package.
A client package could override as follows:
` @property`
` def file_to_source(self):`
` return self.normalize_path("apsvars.sh", "vtune_amplifier")`
'''
vars_file_info_for = {
# key (usu. spack package name) -> [rel_path, component_suite_dir]
# Extension note: handle additions by Spack name or ad-hoc keys.
'@early_compiler': ['bin/compilervars', None],
'intel-parallel-studio': ['bin/psxevars', 'parallel_studio_xe'],
'intel': ['bin/compilervars', None],
'intel-daal': ['daal/bin/daalvars', None],
'intel-ipp': ['ipp/bin/ippvars', None],
'intel-mkl': ['mkl/bin/mklvars', None],
'intel-mpi': ['mpi/{libarch}/bin/mpivars', None],
}
key = self.name
if self.version_yearlike.satisfies(ver(':2015')):
# Same file as 'intel' but 'None' for component_suite_dir will
# resolve differently. Listed as a separate entry to serve as
# example and to avoid pitfalls upon possible refactoring.
key = '@early_compiler'
f, component_suite_dir = vars_file_info_for[key]
f = _expand_fields(f) + '.sh'
# TODO?? win32 would have to handle os.sep, '.bat' (unless POSIX??)
f = self.normalize_path(f, component_suite_dir)
return f
# ---------------------------------------------------------------------
# Threading, including (WIP) support for virtual 'tbb'
# ---------------------------------------------------------------------
@property
def openmp_libs(self):
'''Supply LibraryList for linking OpenMP'''
if '%intel' in self.spec:
# NB: Hunting down explicit library files may be the Spack way of
# doing things, but be aware that "{icc|ifort} --help openmp"
# steers us towards options instead: -qopenmp-link={dynamic,static}
omp_libnames = ['libiomp5']
omp_libs = find_libraries(
omp_libnames,
root=self.component_lib_dir('compiler'),
shared=('+shared' in self.spec))
# Note about search root here: For MKL, the directory
# "$MKLROOT/../compiler" will be present even for an MKL-only
# product installation (as opposed to one being ghosted via
# packages.yaml), specificially to provide the 'iomp5' libs.
elif '%gcc' in self.spec:
gcc = Executable(self.compiler.cc)
omp_lib_path = gcc(
'--print-file-name', 'libgomp.%s' % dso_suffix, output=str)
omp_libs = LibraryList(omp_lib_path.strip())
if len(omp_libs) < 1:
raise_lib_error('Cannot locate OpenMP libraries:', omp_libnames)
debug_print(omp_libs)
return omp_libs
@property
def _gcc_executable(self):
'''Return GCC executable'''
# Match the available gcc, as it's done in tbbvars.sh.
gcc_name = 'gcc'
# but first check if -gcc-name is specified in cflags
for flag in self.spec.compiler_flags['cflags']:
if flag.startswith('-gcc-name='):
gcc_name = flag.split('-gcc-name=')[1]
break
debug_print(gcc_name)
return Executable(gcc_name)
@property
def tbb_headers(self):
# Note: TBB is included as
# #include <tbb/task_scheduler_init.h>
return HeaderList([
self.component_include_dir('tbb') + '/dummy.h'])
@property
def tbb_libs(self):
'''Supply LibraryList for linking TBB'''
# TODO: When is 'libtbbmalloc' needed?
tbb_lib = find_libraries(
['libtbb'], root=self.component_lib_dir('tbb'))
# NB: Like icc with -qopenmp, so does icpc steer us towards using an
# option: "icpc -tbb"
# TODO: clang(?)
gcc = self._gcc_executable # must be gcc, not self.compiler.cc
cxx_lib_path = gcc(
'--print-file-name', 'libstdc++.%s' % dso_suffix, output=str)
libs = tbb_lib + LibraryList(cxx_lib_path.rstrip())
debug_print(libs)
return libs
@property
def _tbb_abi(self):
'''Select the ABI needed for linking TBB'''
gcc = self._gcc_executable
matches = re.search(r'(gcc|LLVM).* ([0-9]+\.[0-9]+\.[0-9]+).*',
gcc('--version', output=str), re.I | re.M)
abi = ''
if sys.platform == 'darwin':
pass
elif matches:
# TODO: Confirm that this covers clang (needed on Linux only)
gcc_version = Version(matches.groups()[1])
if gcc_version >= ver('4.7'):
abi = 'gcc4.7'
elif gcc_version >= ver('4.4'):
abi = 'gcc4.4'
else:
abi = 'gcc4.1' # unlikely, one hopes.
# Alrighty then ...
debug_print(abi)
return abi
# ---------------------------------------------------------------------
# Support for virtual 'blas/lapack/scalapack'
# ---------------------------------------------------------------------
@property
def blas_libs(self):
# Main magic here.
# For reference, see The Intel Math Kernel Library Link Line Advisor:
# https://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/
mkl_integer = 'libmkl_intel_' + self.intel64_int_suffix
if self.spec.satisfies('threads=openmp'):
if '%intel' in self.spec:
mkl_threading = 'libmkl_intel_thread'
elif '%gcc' in self.spec:
mkl_threading = 'libmkl_gnu_thread'
threading_engine_libs = self.openmp_libs
elif self.spec.satisfies('threads=tbb'):
mkl_threading = 'libmkl_tbb_thread'
threading_engine_libs = self.tbb_libs
elif self.spec.satisfies('threads=none'):
mkl_threading = 'libmkl_sequential'
threading_engine_libs = LibraryList([])
else:
raise_lib_error('Cannot determine MKL threading libraries.')
mkl_libnames = [mkl_integer, mkl_threading, 'libmkl_core']
mkl_libs = find_libraries(
mkl_libnames,
root=self.component_lib_dir('mkl'),
shared=('+shared' in self.spec))
debug_print(mkl_libs)
if len(mkl_libs) < 3:
raise_lib_error('Cannot locate core MKL libraries:', mkl_libnames,
'in:', self.component_lib_dir('mkl'))
# The Intel MKL link line advisor recommends these system libraries
system_libs = find_system_libraries(
'libpthread libm libdl'.split(),
shared=('+shared' in self.spec))
debug_print(system_libs)
return mkl_libs + threading_engine_libs + system_libs
@property
def lapack_libs(self):
return self.blas_libs
@property
def scalapack_libs(self):
# Intel MKL does not directly depend on MPI but the BLACS library
# which underlies ScaLapack does. It comes in several personalities;
# we must supply a personality matching the MPI implementation that
# is active for the root package that asked for ScaLapack.
spec_root = self.spec.root
if sys.platform == 'darwin' and '^mpich' in spec_root:
# The only supported choice for MKL 2018 on Mac.
blacs_lib = 'libmkl_blacs_mpich'
elif '^openmpi' in spec_root:
blacs_lib = 'libmkl_blacs_openmpi'
elif '^mpich@1' in spec_root:
# Was supported only up to 2015.
blacs_lib = 'libmkl_blacs'
elif ('^mpich@2:' in spec_root or
'^mvapich2' in spec_root or
'^intel-mpi' in spec_root or
'^intel-parallel-studio' in spec_root):
blacs_lib = 'libmkl_blacs_intelmpi'
elif '^mpt' in spec_root:
blacs_lib = 'libmkl_blacs_sgimpt'
else:
raise_lib_error('Cannot find a BLACS library for the given MPI.')
int_suff = '_' + self.intel64_int_suffix
scalapack_libnames = [
'libmkl_scalapack' + int_suff,
blacs_lib + int_suff,
]
sca_libs = find_libraries(
scalapack_libnames,
root=self.component_lib_dir('mkl'),
shared=('+shared' in self.spec))
debug_print(sca_libs)
if len(sca_libs) < 2:
raise_lib_error(
'Cannot locate ScaLapack/BLACS libraries:', scalapack_libnames)
# NB: ScaLapack is installed as "cluster" components within MKL or
# MKL-encompassing products. But those were *optional* for the ca.
# 2015/2016 product releases, which was easy to overlook, and I have
# been bitten by that. Thus, complain early because it'd be a sore
# disappointment to have missing ScaLapack libs show up as a link error
# near the end phase of a client package's build phase.
return sca_libs
# ---------------------------------------------------------------------
# Support for virtual 'mpi'
# ---------------------------------------------------------------------
@property
def mpi_compiler_wrappers(self):
'''Return paths to compiler wrappers as a dict of env-like names
'''
# Intel comes with 2 different flavors of MPI wrappers:
#
# * mpiicc, mpiicpc, and mpiifort are hardcoded to wrap around
# the Intel compilers.
# * mpicc, mpicxx, mpif90, and mpif77 allow you to set which
# compilers to wrap using I_MPI_CC and friends. By default,
# wraps around the GCC compilers.
#
# In theory, these should be equivalent as long as I_MPI_CC
# and friends are set to point to the Intel compilers, but in
# practice, mpicc fails to compile some applications while
# mpiicc works.
bindir = self.component_bin_dir('mpi')
if self.compiler.name == 'intel':
wrapper_vars = {
# eschew Prefix objects -- emphasize the command strings.
'MPICC': os.path.join(bindir, 'mpiicc'),
'MPICXX': os.path.join(bindir, 'mpiicpc'),
'MPIF77': os.path.join(bindir, 'mpiifort'),
'MPIF90': os.path.join(bindir, 'mpiifort'),
'MPIFC': os.path.join(bindir, 'mpiifort'),
}
else:
wrapper_vars = {
'MPICC': os.path.join(bindir, 'mpicc'),
'MPICXX': os.path.join(bindir, 'mpicxx'),
'MPIF77': os.path.join(bindir, 'mpif77'),
'MPIF90': os.path.join(bindir, 'mpif90'),
'MPIFC': os.path.join(bindir, 'mpif90'),
}
# debug_print("wrapper_vars =", wrapper_vars)
return wrapper_vars
def mpi_setup_dependent_build_environment(
self, env, dependent_spec, compilers_of_client={}):
'''Unified back-end for setup_dependent_build_environment() of
Intel packages that provide 'mpi'.
Parameters:
env, dependent_spec: same as in
setup_dependent_build_environment().
compilers_of_client (dict): Conveys spack_cc, spack_cxx, etc.,
from the scope of dependent packages; constructed in caller.
'''
# See also: setup_dependent_package()
wrapper_vars = {
'I_MPI_CC': compilers_of_client['CC'],
'I_MPI_CXX': compilers_of_client['CXX'],
'I_MPI_F77': compilers_of_client['F77'],
'I_MPI_F90': compilers_of_client['F90'],
'I_MPI_FC': compilers_of_client['FC'],
# NB: Normally set by the modulefile, but that is not active here:
'I_MPI_ROOT': self.normalize_path('mpi'),
}
# CAUTION - SIMILAR code in:
# var/spack/repos/builtin/packages/mpich/package.py
# var/spack/repos/builtin/packages/openmpi/package.py
# var/spack/repos/builtin/packages/mvapich2/package.py
#
# On Cray, the regular compiler wrappers *are* the MPI wrappers.
if 'platform=cray' in self.spec:
# TODO: Confirm
wrapper_vars.update({
'MPICC': compilers_of_client['CC'],
'MPICXX': compilers_of_client['CXX'],
'MPIF77': compilers_of_client['F77'],
'MPIF90': compilers_of_client['F90'],
})
else:
compiler_wrapper_commands = self.mpi_compiler_wrappers
wrapper_vars.update({
'MPICC': compiler_wrapper_commands['MPICC'],
'MPICXX': compiler_wrapper_commands['MPICXX'],
'MPIF77': compiler_wrapper_commands['MPIF77'],
'MPIF90': compiler_wrapper_commands['MPIF90'],
})
# Ensure that the directory containing the compiler wrappers is in the
# PATH. Spack packages add `prefix.bin` to their dependents' paths,
# but because of the intel directory hierarchy that is insufficient.
env.prepend_path('PATH', os.path.dirname(wrapper_vars['MPICC']))
for key, value in wrapper_vars.items():
env.set(key, value)
debug_print("adding to build env:", wrapper_vars)
# ---------------------------------------------------------------------
# General support for child packages
# ---------------------------------------------------------------------
@property
def headers(self):
result = HeaderList([])
if '+mpi' in self.spec or self.provides('mpi'):
result += find_headers(
['mpi'],
root=self.component_include_dir('mpi'),
recursive=False)
if '+mkl' in self.spec or self.provides('mkl'):
result += find_headers(
['mkl_cblas', 'mkl_lapacke'],
root=self.component_include_dir('mkl'),
recursive=False)
if '+tbb' in self.spec or self.provides('tbb'):
result += self.tbb_headers
debug_print(result)
return result
@property
def libs(self):
result = LibraryList([])
if '+tbb' in self.spec or self.provides('tbb'):
result = self.tbb_libs + result
if '+mkl' in self.spec or self.provides('blas'):
result = self.blas_libs + result
if '+mkl' in self.spec or self.provides('lapack'):
result = self.lapack_libs + result
if '+mpi' in self.spec or self.provides('mpi'):
# If prefix is too general, recursive searches may get files from
# supported but inappropriate sub-architectures like 'mic'.
libnames = ['libmpifort', 'libmpi']
if 'cxx' in self.spec.last_query.extra_parameters:
libnames = ['libmpicxx'] + libnames
result = find_libraries(
libnames,
root=self.component_lib_dir('mpi'),
shared=True, recursive=True) + result
if '^mpi' in self.spec.root and ('+mkl' in self.spec or
self.provides('scalapack')):
result = self.scalapack_libs + result
debug_print(result)
return result
def setup_run_environment(self, env):
"""Adds environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ source parallel_studio_xe_2017/bin/psxevars.sh intel64
[and likewise for MKL, MPI, and other components]
"""
f = self.file_to_source
tty.debug("sourcing " + f)
# All Intel packages expect at least the architecture as argument.
# Some accept more args, but those are not (yet?) handled here.
args = (_expand_fields('{arch}'),)
# On Mac, the platform is *also required*, at least as of 2018.
# I am not sure about earlier versions.
# if sys.platform == 'darwin':
# args = ()
env.extend(EnvironmentModifications.from_sourcing_file(f, *args))
def setup_dependent_build_environment(self, env, dependent_spec):
# NB: This function is overwritten by 'mpi' provider packages:
#
# var/spack/repos/builtin/packages/intel-mpi/package.py
# var/spack/repos/builtin/packages/intel-parallel-studio/package.py
#
# They call _setup_dependent_env_callback() as well, but with the
# dictionary kwarg compilers_of_client{} present and populated.
# Handle everything in a callback version.
self._setup_dependent_env_callback(env, dependent_spec)
def _setup_dependent_env_callback(
self, env, dependent_spec, compilers_of_client={}):
# Expected to be called from a client's
# setup_dependent_build_environment(),
# with args extended to convey the client's compilers as needed.
if '+mkl' in self.spec or self.provides('mkl'):
# Spack's env philosophy demands that we replicate some of the
# settings normally handled by file_to_source ...
#
# TODO: Why is setup_run_environment()
# [which uses file_to_source()]
# not called as a matter of course upon entering the current
# function? (guarding against multiple calls notwithstanding)
#
# Use a local dict to facilitate debug_print():
env_mods = {
'MKLROOT': self.normalize_path('mkl'),
'SPACK_COMPILER_EXTRA_RPATHS': self.component_lib_dir('mkl'),
}
env.set('MKLROOT', env_mods['MKLROOT'])
env.append_path('SPACK_COMPILER_EXTRA_RPATHS',
env_mods['SPACK_COMPILER_EXTRA_RPATHS'])
debug_print("adding/modifying build env:", env_mods)
if '+mpi' in self.spec or self.provides('mpi'):
if compilers_of_client:
self.mpi_setup_dependent_build_environment(
env, dependent_spec, compilers_of_client)
# We could forego this nonce function and inline its code here,
# but (a) it sisters mpi_compiler_wrappers() [needed twice]
# which performs dizzyingly similar but necessarily different
# actions, and (b) function code leaves a bit more breathing
# room within the suffocating corset of flake8 line length.
else:
raise InstallError('compilers_of_client arg required for MPI')
def setup_dependent_package(self, module, dep_spec):
# https://spack.readthedocs.io/en/latest/spack.html#spack.package.PackageBase.setup_dependent_package
# Reminder: "module" refers to Python module.
# Called before the install() method of dependents.
if '+mpi' in self.spec or self.provides('mpi'):
compiler_wrapper_commands = self.mpi_compiler_wrappers
self.spec.mpicc = compiler_wrapper_commands['MPICC']
self.spec.mpicxx = compiler_wrapper_commands['MPICXX']
self.spec.mpif77 = compiler_wrapper_commands['MPIF77']
self.spec.mpifc = compiler_wrapper_commands['MPIFC']
debug_print(("spec '%s' received .mpi* properties:" % self.spec),
compiler_wrapper_commands)
# ---------------------------------------------------------------------
# Specifics for installation phase
# ---------------------------------------------------------------------
@property
def global_license_file(self):
"""Returns the path where a Spack-global license file should be stored.
All Intel software shares the same license, so we store it in a
common 'intel' directory."""
return os.path.join(self.global_license_dir, 'intel', 'license.lic')
@property
def _determine_license_type(self):
'''Provide appropriate license tokens for the installer (silent.cfg).
'''
# See:
# ./README-intel.rst, section "Details for licensing tokens".
# ./build_systems/README-intel.rst, section "Licenses"
#
# Ideally, we just tell the installer to look around on the system.
# Thankfully, we neither need to care nor emulate where it looks:
license_type = {'ACTIVATION_TYPE': 'exist_lic', }
# However (and only), if the spack-internal Intel license file has been
# populated beyond its templated explanatory comments, proffer it to
# the installer instead:
f = self.global_license_file
if os.path.isfile(f):
# The file will have been created upon self.license_required AND
# self.license_files having been populated, so the "if" is usually
# true by the time the present function runs; ../hooks/licensing.py
with open(f) as fh:
if re.search(r'^[ \t]*[^' + self.license_comment + '\n]',
fh.read(), re.MULTILINE):
license_type = {
'ACTIVATION_TYPE': 'license_file',
'ACTIVATION_LICENSE_FILE': f,
}
debug_print(license_type)
return license_type
def configure(self, spec, prefix):
'''Generates the silent.cfg file to pass to installer.sh.
See https://software.intel.com/en-us/articles/configuration-file-format
'''
# Both tokens AND values of the configuration file are validated during
# the run of the underlying binary installer. Any unknown token or
# unacceptable value will cause that installer to fail. Notably, this
# applies to trying to specify a license for a product that does not
# require one.
#
# Fortunately, the validator is a script from a solid code base that is
# only lightly adapted to the token vocabulary of each product and
# release. Let's get that script so we can preempt its objections.
#
# Rather than running the script on a trial file and dissecting its
# pronouncements, let's brazenly skim it for supported tokens and build
# our configuration accordingly. We can do this because the tokens are
# quite long and specific.
validator_code = open('pset/check.awk', 'r').read()
# Let's go a little further and distill the tokens (plus some noise).
tokenlike_words = set(re.findall(r'[A-Z_]{4,}', validator_code))
# NB: .cfg files generated with the "--duplicate filename" option have
# the COMPONENTS string begin with a separator - do not worry about it.
components_joined = ';'.join(self._filtered_components)
nonrpm_db_dir = os.path.join(prefix, 'nonrpm-db')
config_draft = {
# Basics first - these should be accepted in all products.
'ACCEPT_EULA': 'accept',
'PSET_MODE': 'install',
'CONTINUE_WITH_OPTIONAL_ERROR': 'yes',
'CONTINUE_WITH_INSTALLDIR_OVERWRITE': 'yes',
'SIGNING_ENABLED': 'no',
# Highly variable package specifics:
'PSET_INSTALL_DIR': prefix,
'NONRPM_DB_DIR': nonrpm_db_dir,
'COMPONENTS': components_joined,
# Conditional tokens; the first is supported post-2015 only.
# Ignore ia32; most recent products don't even provide it.
'ARCH_SELECTED': 'INTEL64', # was: 'ALL'
# 'ism' component -- see uninstall_ism(); also varies by release.
'PHONEHOME_SEND_USAGE_DATA': 'no',
# Ah, as of 2018.2, that somewhat loaded term got replaced by one
# in business-speak. We uphold our preference, both out of general
# principles and for technical reasons like overhead and non-routed
# compute nodes.
'INTEL_SW_IMPROVEMENT_PROGRAM_CONSENT': 'no',
}
# Deal with licensing only if truly needed.
# NB: Token was 'ACTIVATION' pre ~2013, so basically irrelevant here.
if 'ACTIVATION_TYPE' in tokenlike_words:
config_draft.update(self._determine_license_type)
# Write sorted *by token* so the file looks less like a hash dump.
f = open('silent.cfg', 'w')
for token, value in sorted(config_draft.items()):
if token in tokenlike_words:
f.write('%s=%s\n' % (token, value))
f.close()
def install(self, spec, prefix):
'''Runs Intel's install.sh installation script. Afterwards, save the
installer config and logs to <prefix>/.spack
'''
# prepare
tmpdir = tempfile.mkdtemp(prefix='spack-intel-')
install_script = Executable('./install.sh')
install_script.add_default_env('TMPDIR', tmpdir)
# Need to set HOME to avoid using ~/intel
install_script.add_default_env('HOME', prefix)
# perform
install_script('--silent', 'silent.cfg')
# preserve config and logs
dst = os.path.join(self.prefix, '.spack')
install('silent.cfg', dst)
for f in glob.glob('%s/intel*log' % tmpdir):
install(f, dst)
@run_after('install')
def configure_rpath(self):
if '+rpath' not in self.spec:
return
# https://software.intel.com/en-us/cpp-compiler-18.0-developer-guide-and-reference-using-configuration-files
compilers_bin_dir = self.component_bin_dir('compiler')
compilers_lib_dir = self.component_lib_dir('compiler')
for compiler_name in 'icc icpc ifort'.split():
f = os.path.join(compilers_bin_dir, compiler_name)
if not os.path.isfile(f):
raise InstallError(
'Cannot find compiler command to configure rpath:\n\t' + f)
compiler_cfg = os.path.abspath(f + '.cfg')
with open(compiler_cfg, 'w') as fh:
fh.write('-Xlinker -rpath={0}\n'.format(compilers_lib_dir))
@run_after('install')
def configure_auto_dispatch(self):
if self._has_compilers:
if ('auto_dispatch=none' in self.spec):
return
compilers_bin_dir = self.component_bin_dir('compiler')
for compiler_name in 'icc icpc ifort'.split():
f = os.path.join(compilers_bin_dir, compiler_name)
if not os.path.isfile(f):
raise InstallError(
'Cannot find compiler command to configure '
'auto_dispatch:\n\t' + f)
ad = []
for x in IntelPackage.auto_dispatch_options:
if 'auto_dispatch={0}'.format(x) in self.spec:
ad.append(x)
compiler_cfg = os.path.abspath(f + '.cfg')
with open(compiler_cfg, 'a') as fh:
fh.write('-ax{0}\n'.format(','.join(ad)))
@run_after('install')
def filter_compiler_wrappers(self):
if (('+mpi' in self.spec or self.provides('mpi')) and
'~newdtags' in self.spec):
bin_dir = self.component_bin_dir('mpi')
for f in 'mpif77 mpif90 mpigcc mpigxx mpiicc mpiicpc ' \
'mpiifort'.split():
f = os.path.join(bin_dir, f)
filter_file('-Xlinker --enable-new-dtags', ' ', f, string=True)
@run_after('install')
def uninstall_ism(self):
# The "Intel(R) Software Improvement Program" [ahem] gets installed,
# apparently regardless of PHONEHOME_SEND_USAGE_DATA.
#
# https://software.intel.com/en-us/articles/software-improvement-program
# https://software.intel.com/en-us/forums/intel-c-compiler/topic/506959
# Hubert H. (Intel) Mon, 03/10/2014 - 03:02 wrote:
# "... you can also uninstall the Intel(R) Software Manager
# completely: <installdir>/intel/ism/uninstall.sh"
f = os.path.join(self.normalize_path('ism'), 'uninstall.sh')
if os.path.isfile(f):
tty.warn('Uninstalling "Intel Software Improvement Program"'
'component')
uninstall = Executable(f)
uninstall('--silent')
# TODO? also try
# ~/intel/ism/uninstall --silent
debug_print(os.getcwd())
return
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
py | b405144b5cbd46464c8b2cffc05e9392df9e49f8 |
import cv2
import math
import numpy as np
# from matplotlib import pyplot as plt
# import scipy.stats as st
def drawSquare(frame, pointTopLeft, squareSize, color=(255, 0, 0)):
"""
@param frame - np.ndarray(np.uint8)
@returns modified frame - np.ndarray(np.uint8)
"""
modified_frame = frame.copy()
y, x = pointTopLeft
# print(x, y)
cv2.rectangle(modified_frame, (x, y), (x + squareSize, y + squareSize), color, 5)
return modified_frame
def gkern(kernlen=60, nsig=0.5):
"""Returns a 2D Gaussian kernel."""
x = np.linspace(-nsig, nsig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
def spiral(side):
x = y = 0
dx = 0
dy = -1
for i in range(side ** 2):
if (-side / 2 < x <= side / 2):
yield (x, y)
if x == y or (x < 0 and x == -y) or (x > 0 and x == 1 - y):
dx, dy = -dy, dx
x, y = x + dx, y + dy
def downsample(frame, k):
obj = (slice(None, None, k))
return frame[obj, obj]
def getBallPositionApprox(frame, lastBallPosition, certainty, BALLSIZE, RODSIZE):
generatorPosition = spiral(2 * RODSIZE)
x, y = lastBallPosition
mx = max(0, x - RODSIZE)
my = max(0, y - RODSIZE)
subframe = frame[mx : x + RODSIZE, my : y + RODSIZE, : ].astype(int)
pnkCf = magicFormula(subframe, 4)
centerPoint = np.unravel_index(pnkCf.argmin(), pnkCf.shape)
a, b = centerPoint
a = max(0, a - BALLSIZE // 2) + x - RODSIZE
b = max(0, b - BALLSIZE // 2) + y - RODSIZE
topLeftPoint = (a, b)
return topLeftPoint, pnkCf[a, b]
# maxSoFar = certainty
# ballPosition = lastBallPosition
# while True:
# try:
# positionToCheck = next(generatorPosition)
# except StopIteration:
# # return the highest one
# break # Iterator exhausted: stop the loop
def getBallPosition(frame, BALLSIZE):
"""
Return top left point of a NxN square with the most probable position of the ball
"""
frameInt = frame.astype(int)
confidence = magicFormula(frameInt, BALLSIZE)
centerPoint = np.unravel_index(confidence.argmin(), confidence.shape)
x, y = centerPoint
x = max(0, x - BALLSIZE // 2)
y = max(0, y - BALLSIZE // 2)
topLeftPoint = (x, y)
return topLeftPoint, confidence[x, y]
def markFrame(frame, BALLSIZE, lastBallPosition):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# downsampledFrame = downsample(frame, 4)
ballPosition, certainty = getBallPosition(frame, BALLSIZE)
# ballPosition, certainty = getBallPosition(downsampledFrame, BALLSIZE // 4)
# ballPosition = tuple([x * 4 for x in ballPosition])
# print(ballPosition)
if ballPosition == (0, 0):
ballPosition = lastBallPosition
else:
lastBallPosition = ballPosition
return cv2.cvtColor(drawSquare(frame, ballPosition, BALLSIZE), cv2.COLOR_RGB2BGR), ballPosition
def markVideo(filename, BALLSIZE, RODSIZE):
vidcap = cv2.VideoCapture(filename)
success = True
lastBallPosition = (0, 0)
certainty = 0
markedVideo = []
while len(markedVideo) < 151:
# if len(markedVideo) % 100 == 0:
#print(f"Frame {len(markedVideo)}")
success, frame = vidcap.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
downsampledFrame = downsample(frame, 4)
if certainty < 0:
ballPosition, certainty = getBallPositionApprox(frame, lastBallPosition, certainty, BALLSIZE, RODSIZE)
markedVideo.append(drawSquare(frame, ballPosition, BALLSIZE, color=(0, 255, 0)))
else:
ballPosition, certainty = getBallPosition(downsampledFrame, BALLSIZE // 4)
ballPosition = tuple([x * 4 for x in ballPosition])
# print(ballPosition)
if ballPosition == (0, 0):
ballPosition = lastBallPosition
else:
lastBallPosition = ballPosition
markedVideo.append(drawSquare(frame, ballPosition, BALLSIZE))
return markedVideo
KERNEL = [[0.00216509, 0.00221434, 0.00225907, 0.00229894, 0.00233368,
0.00236302, 0.00238677, 0.00240473, 0.00241678, 0.00242283,
0.00242283, 0.00241678, 0.00240473, 0.00238677, 0.00236302,
0.00233368, 0.00229894, 0.00225907, 0.00221434, 0.00216509],
[0.00221434, 0.00226472, 0.00231046, 0.00235124, 0.00238677,
0.00241678, 0.00244107, 0.00245944, 0.00247176, 0.00247795,
0.00247795, 0.00247176, 0.00245944, 0.00244107, 0.00241678,
0.00238677, 0.00235124, 0.00231046, 0.00226472, 0.00221434],
[0.00225907, 0.00231046, 0.00235712, 0.00239873, 0.00243497,
0.00246559, 0.00249037, 0.00250911, 0.00252169, 0.002528 ,
0.002528 , 0.00252169, 0.00250911, 0.00249037, 0.00246559,
0.00243497, 0.00239873, 0.00235712, 0.00231046, 0.00225907],
[0.00229894, 0.00235124, 0.00239873, 0.00244107, 0.00247795,
0.00250911, 0.00253432, 0.0025534 , 0.0025662 , 0.00257262,
0.00257262, 0.0025662 , 0.0025534 , 0.00253432, 0.00250911,
0.00247795, 0.00244107, 0.00239873, 0.00235124, 0.00229894],
[0.00233368, 0.00238677, 0.00243497, 0.00247795, 0.00251539,
0.00254703, 0.00257262, 0.00259198, 0.00260497, 0.00261149,
0.00261149, 0.00260497, 0.00259198, 0.00257262, 0.00254703,
0.00251539, 0.00247795, 0.00243497, 0.00238677, 0.00233368],
[0.00236302, 0.00241678, 0.00246559, 0.00250911, 0.00254703,
0.00257906, 0.00260497, 0.00262458, 0.00263773, 0.00264433,
0.00264433, 0.00263773, 0.00262458, 0.00260497, 0.00257906,
0.00254703, 0.00250911, 0.00246559, 0.00241678, 0.00236302],
[0.00238677, 0.00244107, 0.00249037, 0.00253432, 0.00257262,
0.00260497, 0.00263115, 0.00265095, 0.00266423, 0.0026709 ,
0.0026709 , 0.00266423, 0.00265095, 0.00263115, 0.00260497,
0.00257262, 0.00253432, 0.00249037, 0.00244107, 0.00238677],
[0.00240473, 0.00245944, 0.00250911, 0.0025534 , 0.00259198,
0.00262458, 0.00265095, 0.0026709 , 0.00268429, 0.002691 ,
0.002691 , 0.00268429, 0.0026709 , 0.00265095, 0.00262458,
0.00259198, 0.0025534 , 0.00250911, 0.00245944, 0.00240473],
[0.00241678, 0.00247176, 0.00252169, 0.0025662 , 0.00260497,
0.00263773, 0.00266423, 0.00268429, 0.00269774, 0.00270449,
0.00270449, 0.00269774, 0.00268429, 0.00266423, 0.00263773,
0.00260497, 0.0025662 , 0.00252169, 0.00247176, 0.00241678],
[0.00242283, 0.00247795, 0.002528 , 0.00257262, 0.00261149,
0.00264433, 0.0026709 , 0.002691 , 0.00270449, 0.00271126,
0.00271126, 0.00270449, 0.002691 , 0.0026709 , 0.00264433,
0.00261149, 0.00257262, 0.002528 , 0.00247795, 0.00242283],
[0.00242283, 0.00247795, 0.002528 , 0.00257262, 0.00261149,
0.00264433, 0.0026709 , 0.002691 , 0.00270449, 0.00271126,
0.00271126, 0.00270449, 0.002691 , 0.0026709 , 0.00264433,
0.00261149, 0.00257262, 0.002528 , 0.00247795, 0.00242283],
[0.00241678, 0.00247176, 0.00252169, 0.0025662 , 0.00260497,
0.00263773, 0.00266423, 0.00268429, 0.00269774, 0.00270449,
0.00270449, 0.00269774, 0.00268429, 0.00266423, 0.00263773,
0.00260497, 0.0025662 , 0.00252169, 0.00247176, 0.00241678],
[0.00240473, 0.00245944, 0.00250911, 0.0025534 , 0.00259198,
0.00262458, 0.00265095, 0.0026709 , 0.00268429, 0.002691 ,
0.002691 , 0.00268429, 0.0026709 , 0.00265095, 0.00262458,
0.00259198, 0.0025534 , 0.00250911, 0.00245944, 0.00240473],
[0.00238677, 0.00244107, 0.00249037, 0.00253432, 0.00257262,
0.00260497, 0.00263115, 0.00265095, 0.00266423, 0.0026709 ,
0.0026709 , 0.00266423, 0.00265095, 0.00263115, 0.00260497,
0.00257262, 0.00253432, 0.00249037, 0.00244107, 0.00238677],
[0.00236302, 0.00241678, 0.00246559, 0.00250911, 0.00254703,
0.00257906, 0.00260497, 0.00262458, 0.00263773, 0.00264433,
0.00264433, 0.00263773, 0.00262458, 0.00260497, 0.00257906,
0.00254703, 0.00250911, 0.00246559, 0.00241678, 0.00236302],
[0.00233368, 0.00238677, 0.00243497, 0.00247795, 0.00251539,
0.00254703, 0.00257262, 0.00259198, 0.00260497, 0.00261149,
0.00261149, 0.00260497, 0.00259198, 0.00257262, 0.00254703,
0.00251539, 0.00247795, 0.00243497, 0.00238677, 0.00233368],
[0.00229894, 0.00235124, 0.00239873, 0.00244107, 0.00247795,
0.00250911, 0.00253432, 0.0025534 , 0.0025662 , 0.00257262,
0.00257262, 0.0025662 , 0.0025534 , 0.00253432, 0.00250911,
0.00247795, 0.00244107, 0.00239873, 0.00235124, 0.00229894],
[0.00225907, 0.00231046, 0.00235712, 0.00239873, 0.00243497,
0.00246559, 0.00249037, 0.00250911, 0.00252169, 0.002528 ,
0.002528 , 0.00252169, 0.00250911, 0.00249037, 0.00246559,
0.00243497, 0.00239873, 0.00235712, 0.00231046, 0.00225907],
[0.00221434, 0.00226472, 0.00231046, 0.00235124, 0.00238677,
0.00241678, 0.00244107, 0.00245944, 0.00247176, 0.00247795,
0.00247795, 0.00247176, 0.00245944, 0.00244107, 0.00241678,
0.00238677, 0.00235124, 0.00231046, 0.00226472, 0.00221434],
[0.00216509, 0.00221434, 0.00225907, 0.00229894, 0.00233368,
0.00236302, 0.00238677, 0.00240473, 0.00241678, 0.00242283,
0.00242283, 0.00241678, 0.00240473, 0.00238677, 0.00236302,
0.00233368, 0.00229894, 0.00225907, 0.00221434, 0.00216509]]
def magicFormula(frameInt, kernel_size = 60):
pinkness = abs(190 - frameInt[ : , : , 0]) \
+ abs(100 - frameInt[ : , : , 1]) \
+ abs(100 - frameInt[ : , : , 2])
shadowPinkiness = abs(160 - frameInt[ : , : , 0]) \
+ abs(90 - frameInt[ : , : , 1]) \
+ abs(90 - frameInt[ : , : , 2])
# yellowness = abs(230 - frameInt[ : , : , 0]) \
# + abs(140 - frameInt[ : , : , 1]) \
# + abs(25 - frameInt[ : , : , 2])
# kernel = np.ones((kernel_size, kernel_size))
pinknessConfidence = np.minimum(cv2.filter2D(pinkness.astype(np.float32), 1, KERNEL), cv2.filter2D(shadowPinkiness.astype(np.float32), 1, KERNEL))
# pinknessConfidence = cv2.filter2D(pinkness.astype(np.float32), 1, kernel)
return pinknessConfidence
|
py | b40514568452cb33d22985c5238738a644c24b51 | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
Timestamp,
)
import pandas._testing as tm
def test_filter_series():
s = Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
tm.assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index),
)
tm.assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index),
)
def test_filter_single_column_df():
df = DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
tm.assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index),
)
tm.assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index),
)
def test_filter_multi_column_df():
df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2])
tm.assert_frame_equal(
grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected
)
def test_filter_mixed_df():
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2])
tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected)
def test_filter_out_all_groups():
s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups():
s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
tm.assert_series_equal(filtered, s)
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x["A"].mean() > 0)
tm.assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df():
# GH12768
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
res = df.groupby("a")
res = res.filter(lambda x: x["b"].sum() > 5, dropna=False)
expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3})
tm.assert_frame_equal(expected, res)
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
res = df.groupby("a")
res = res.filter(lambda x: x["b"].sum() > 5, dropna=True)
expected = DataFrame({"a": [], "b": []}, dtype="int64")
tm.assert_frame_equal(expected, res)
def test_filter_condition_raises():
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
grouped.filter(raise_if_sum_is_zero)
def test_filter_with_axis_in_groupby():
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64")
result = data.groupby(level=0, axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
tm.assert_frame_equal(result, expected)
def test_filter_bad_shapes():
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
s = df["B"]
g_df = df.groupby("B")
g_s = s.groupby(s)
f = lambda x: x
msg = "filter function returned a DataFrame, but expected a scalar bool"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
f = lambda x: x == 1
msg = "filter function returned a DataFrame, but expected a scalar bool"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
f = lambda x: np.outer(x, x)
msg = "can't multiply sequence by non-int of type 'str'"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
def test_filter_nan_is_false():
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
s = df["B"]
g_df = df.groupby(df["B"])
g_s = s.groupby(s)
f = lambda x: np.nan
tm.assert_frame_equal(g_df.filter(f), df.loc[[]])
tm.assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround():
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype("bool")]
new_way = grouped.filter(f)
tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype("bool")]
new_way = grouped.filter(f)
tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame(
{
"ints": Series(np.random.randint(0, 100, N)),
"floats": N / 10 * Series(np.random.random(N)),
"letters": Series(random_letters),
}
)
# Group by ints; filter on floats.
grouped = df.groupby("ints")
old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")]
new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20)
tm.assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
tm.assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby("letters")
old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")]
new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20)
tm.assert_frame_equal(new_way, old_way)
def test_filter_using_len():
# BUG GH4447
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
grouped = df.groupby("B")
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)},
index=np.arange(2, 6),
)
tm.assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
tm.assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df["B"]
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ["b"], index=np.arange(2, 6), name="B")
tm.assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
tm.assert_series_equal(actual, expected)
def test_filter_maintains_ordering():
# Simple case: index is sequential. #4621
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}
)
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
def test_filter_multiple_timestamp():
# GH 10114
df = DataFrame(
{
"A": np.arange(5, dtype="int64"),
"B": ["foo", "bar", "foo", "bar", "bar"],
"C": Timestamp("20130101"),
}
)
grouped = df.groupby(["B", "C"])
result = grouped["A"].filter(lambda x: True)
tm.assert_series_equal(df["A"], result)
result = grouped["A"].transform(len)
expected = Series([2, 3, 2, 3, 3], name="A")
tm.assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
tm.assert_frame_equal(df, result)
result = grouped.transform("sum")
expected = DataFrame({"A": [2, 8, 2, 8, 8]})
tm.assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({"A": [2, 3, 2, 3, 3]})
tm.assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index():
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index():
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index():
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index():
# GH4620
t0 = Timestamp("2013-09-30 00:05:00")
t1 = Timestamp("2013-10-30 00:05:00")
t2 = Timestamp("2013-11-30 00:05:00")
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index():
# GH4620
index = list("bbbcbbab")
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols():
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
# previously didn't have access to col A #????
filt = g.filter(lambda x: x["A"].sum() == 2)
tm.assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness():
df = DataFrame(
[
["best", "a", "x"],
["worst", "b", "y"],
["best", "c", "x"],
["best", "d", "y"],
["worst", "d", "y"],
["worst", "d", "y"],
["best", "d", "z"],
],
columns=["a", "b", "c"],
)
with pytest.raises(TypeError, match="filter function returned a.*"):
df.groupby("c").filter(lambda g: g["a"] == "best")
def test_filter_non_bool_raises():
df = DataFrame(
[
["best", "a", 1],
["worst", "b", 1],
["best", "c", 1],
["best", "d", 1],
["worst", "d", 1],
["worst", "d", 1],
["best", "d", 1],
],
columns=["a", "b", "c"],
)
with pytest.raises(TypeError, match="filter function returned a.*"):
df.groupby("a").filter(lambda g: g.c.mean())
def test_filter_dropna_with_empty_groups():
# GH 10780
data = Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)
tm.assert_series_equal(result_true, expected_true)
def test_filter_consistent_result_before_after_agg_func():
# GH 17091
df = DataFrame({"data": range(6), "key": list("ABCABC")})
grouper = df.groupby("key")
result = grouper.filter(lambda x: True)
expected = DataFrame({"data": range(6), "key": list("ABCABC")})
tm.assert_frame_equal(result, expected)
grouper.sum()
result = grouper.filter(lambda x: True)
tm.assert_frame_equal(result, expected)
|
py | b40514a0fda752e13a53b7e993c4b419563fedc3 | """GIRAFFE v2 simply uses separate decoders for each object.
"""
from im2scene.giraffev2 import (
config, training, models
)
__all__ = [
config, training, models
]
|
py | b40515321d8d91de2f879f787445406c261435e1 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
from django.core.management.base import BaseCommand
from django.conf import settings
from crum import impersonate
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate, ExecutionEnvironment
from awx.main.signals import disable_computed_fields
class Command(BaseCommand):
"""Create preloaded data, intended for new installs"""
help = 'Creates a preload tower data if there is none.'
def handle(self, *args, **kwargs):
changed = False
# Create a default organization as the first superuser found.
try:
superuser = User.objects.filter(is_superuser=True).order_by('pk')[0]
except IndexError:
superuser = None
with impersonate(superuser):
with disable_computed_fields():
if not Organization.objects.exists():
o = Organization.objects.create(name='Default')
p = Project(
name='Demo Project',
scm_type='git',
scm_url='https://github.com/ansible/ansible-tower-samples',
scm_update_on_launch=True,
scm_update_cache_timeout=0,
organization=o,
)
p.save(skip_update=True)
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
c = Credential.objects.create(
credential_type=ssh_type, name='Demo Credential', inputs={'username': superuser.username}, created_by=superuser
)
c.admin_role.members.add(superuser)
public_galaxy_credential = Credential(
name='Ansible Galaxy',
managed_by_tower=True,
credential_type=CredentialType.objects.get(kind='galaxy'),
inputs={'url': 'https://galaxy.ansible.com/'},
)
public_galaxy_credential.save()
o.galaxy_credentials.add(public_galaxy_credential)
i = Inventory.objects.create(name='Demo Inventory', organization=o, created_by=superuser)
Host.objects.create(
name='localhost',
inventory=i,
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
created_by=superuser,
)
jt = JobTemplate.objects.create(name='Demo Job Template', playbook='hello_world.yml', project=p, inventory=i)
jt.credentials.add(c)
print('Default organization added.')
print('Demo Credential, Inventory, and Job Template added.')
changed = True
for ee in reversed(settings.DEFAULT_EXECUTION_ENVIRONMENTS):
_, created = ExecutionEnvironment.objects.get_or_create(name=ee['name'], defaults={'image': ee['image'], 'managed_by_tower': True})
if created:
changed = True
print('Default Execution Environment(s) registered.')
if changed:
print('(changed: True)')
else:
print('(changed: False)')
|
py | b405158094941277e5e4d12ff333520cf5c66ffc | from .reduplication import Replacement
class StemConversion:
def __init__(self, dictDescr, errorHandler=None):
self.stemConversions = {} #{source stem number -> {destination stem number ->
# [replacementObject1, ...]}}
self.errorHandler = errorHandler
try:
self.name = dictDescr['value']
replacements = dictDescr['content']
except KeyError:
self.raise_error('Wrong stem conversion: ', dictDescr)
return
stemBase = -1
dictsNewStem = []
for obj in replacements:
if obj['name'] == 'stem-base':
try:
stemBase = int(obj['value'])
except:
self.raise_error('Wrong base stem number: ', dictDescr)
return
elif obj['name'] == 'new-stem' and 'content' in obj:
try:
newStem = int(obj['value'])
except:
self.raise_error('Wrong new stem number: ', dictDescr)
return
dictsNewStem.append((obj['content'], newStem))
for obj, newStem in dictsNewStem:
self.add_conversion(obj, stemBase, newStem)
def raise_error(self, message, data=None):
if self.errorHandler is not None:
self.errorHandler.RaiseError(message, data)
def add_conversion(self, arrDictDescr, stemBase, newStem):
for repl in arrDictDescr:
try:
if repl['name'] != 'replace':
self.raise_error('Incorrect field in a stem conversion description: ',
repl)
continue
self.add_operation(stemBase, newStem, Replacement(repl))
except KeyError:
self.raise_error('Error in a stem conversion description: ',
repl)
def add_operation(self, stemBase, newStem, repl):
try:
dictBase = self.stemConversions[stemBase]
except KeyError:
self.stemConversions[stemBase] = {}
dictBase = self.stemConversions[stemBase]
try:
dictNew = dictBase[newStem]
except KeyError:
dictBase[newStem] = []
dictNew = dictBase[newStem]
dictNew.append(repl)
def convert(self, stems):
"""Fill in the gaps in the stems description (list of tuples).
The input is changed."""
for stemBase in sorted(self.stemConversions):
if stemBase < 0 or stemBase >= len(stems):
break
for newStem in sorted(self.stemConversions[stemBase]):
# if there is no such stem, add it to the list
for i in range(len(stems), newStem+1):
stems.append(())
# explicitly written stems have higher priority and shouldn't
# be owerwritten
if len(stems[newStem]) <= 0:
stems[newStem] = self.convert_one(stems[stemBase],
self.stemConversions[stemBase][newStem])
## print stems[newStem]
def convert_one(self, stemBaseVars, stemConversion):
newStemVars = []
for stem in stemBaseVars:
for rule in stemConversion:
stem = rule.convert(stem)
newStemVars.append(stem)
return tuple(newStemVars)
|
py | b40515f9c3f7cbe96a8e37a1a1866590a6004b2c | import unittest
import pytest
import numpy as np
import pandas as pd
#from tests.definitions import DATA_PATH
from dowhy.causal_estimators.linear_regression_estimator import LinearRegressionEstimator
from dowhy.causal_identifier import IdentifiedEstimand
@pytest.fixture(params=[(10, 5, 100000), (10, 5, 100000)])
def linear_dataset(request):#beta, num_common_causes, num_samples):
beta = request.param[0]
num_common_causes = request.param[1]
num_samples = request.param[2]
range_c1 = beta*0.5
range_c2 = beta*0.5
means = np.random.uniform(-1, 1, num_common_causes)
cov_mat = np.diag(np.ones(num_common_causes))
X = np.random.multivariate_normal(means, cov_mat, num_samples)
c1 = np.random.uniform(0, range_c1, num_common_causes)
c2 = np.random.uniform(0, range_c2, num_common_causes)
t = X @ c1 #+ np.random.normal(0, 0.01)
y = X @ c2 + beta*t #+ np.random.normal(0,0.01)
print(c1)
print(c2)
ty = np.column_stack((t,y))
data = np.column_stack((X, ty))
treatment = "t"
outcome = "y"
common_causes = [("X"+str(i)) for i in range(0, num_common_causes)]
ate = beta
instruments = None
other_variables = None
col_names = common_causes + [treatment, outcome]
data = pd.DataFrame(data,
columns = col_names)
ret_dict = {
"data": data,
"treatment": treatment,
"outcome": outcome,
"common_causes": common_causes,
"ate": beta}
return(ret_dict)
#@pytest.mark.usefixtures("simulate_linear_dataset")
class TestLinearRegressionEstimator(object):
@pytest.mark.parametrize("error_tolerance", [0.01, 0.05])
def test_average_treatment_effect(self, linear_dataset, error_tolerance):
data = linear_dataset["data"]
true_ate = linear_dataset["ate"]
target_estimand = IdentifiedEstimand(
treatment_variable = linear_dataset["treatment"],
outcome_variable = linear_dataset["outcome"],
backdoor_variables = linear_dataset["common_causes"])
estimator_ate = LinearRegressionEstimator(
data,
identified_estimand = target_estimand,
treatment=linear_dataset["treatment"],
outcome=linear_dataset["outcome"])
est_ate = estimator_ate.estimate_effect()
error = est_ate.value - true_ate
print("Error in ATE estimate = {0} with tolerance {1}%. Estimated={2},True={3}".format(
error, error_tolerance*100, est_ate.value, true_ate))
res = True if (error < true_ate*error_tolerance) else False
assert res
|
py | b40516e955a3a5f1b1ab36ca0be605bd6df8c586 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import os
import sys
from typing import List
from typing import Optional
from typing import Union
import librosa
import numpy as np
import paddle
import soundfile
import yaml
from yacs.config import CfgNode
from ..executor import BaseExecutor
from ..utils import cli_register
from ..utils import download_and_decompress
from ..utils import logger
from ..utils import MODEL_HOME
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.transform.transformation import Transformation
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.utility import UpdateConfig
__all__ = ['ASRExecutor']
pretrained_models = {
"wenetspeech_zh_16k": {
'url':
'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/conformer.model.tar.gz',
'md5':
'54e7a558a6e020c2f5fb224874943f97',
'cfg_path':
'conf/conformer.yaml',
'ckpt_path':
'exp/conformer/checkpoints/wenetspeech',
},
"transformer_zh_16k": {
'url':
'https://paddlespeech.bj.bcebos.com/s2t/aishell/asr1/transformer.model.tar.gz',
'md5':
'4e8b63800c71040b9390b150e2a5d4c4',
'cfg_path':
'conf/transformer.yaml',
'ckpt_path':
'exp/transformer/checkpoints/avg_20',
}
}
model_alias = {
"ds2_offline": "paddlespeech.s2t.models.ds2:DeepSpeech2Model",
"ds2_online": "paddlespeech.s2t.models.ds2_online:DeepSpeech2ModelOnline",
"conformer": "paddlespeech.s2t.models.u2:U2Model",
"transformer": "paddlespeech.s2t.models.u2:U2Model",
"wenetspeech": "paddlespeech.s2t.models.u2:U2Model",
}
@cli_register(
name='paddlespeech.asr', description='Speech to text infer command.')
class ASRExecutor(BaseExecutor):
def __init__(self):
super(ASRExecutor, self).__init__()
self.parser = argparse.ArgumentParser(
prog='paddlespeech.asr', add_help=True)
self.parser.add_argument(
'--input', type=str, required=True, help='Audio file to recognize.')
self.parser.add_argument(
'--model',
type=str,
default='wenetspeech',
help='Choose model type of asr task.')
self.parser.add_argument(
'--lang',
type=str,
default='zh',
help='Choose model language. zh or en')
self.parser.add_argument(
"--sr",
type=int,
default=16000,
choices=[8000, 16000],
help='Choose the audio sample rate of the model. 8000 or 16000')
self.parser.add_argument(
'--config',
type=str,
default=None,
help='Config of asr task. Use deault config when it is None.')
self.parser.add_argument(
'--ckpt_path',
type=str,
default=None,
help='Checkpoint file of model.')
self.parser.add_argument(
'--device',
type=str,
default=paddle.get_device(),
help='Choose device to execute model inference.')
def _get_pretrained_path(self, tag: str) -> os.PathLike:
"""
Download and returns pretrained resources path of current task.
"""
assert tag in pretrained_models, 'Can not find pretrained resources of {}.'.format(
tag)
res_path = os.path.join(MODEL_HOME, tag)
decompressed_path = download_and_decompress(pretrained_models[tag],
res_path)
decompressed_path = os.path.abspath(decompressed_path)
logger.info(
'Use pretrained model stored in: {}'.format(decompressed_path))
return decompressed_path
def _init_from_path(self,
model_type: str='wenetspeech',
lang: str='zh',
sample_rate: int=16000,
cfg_path: Optional[os.PathLike]=None,
ckpt_path: Optional[os.PathLike]=None):
"""
Init model and other resources from a specific path.
"""
if cfg_path is None or ckpt_path is None:
sample_rate_str = '16k' if sample_rate == 16000 else '8k'
tag = model_type + '_' + lang + '_' + sample_rate_str
res_path = self._get_pretrained_path(tag) # wenetspeech_zh
self.res_path = res_path
self.cfg_path = os.path.join(res_path,
pretrained_models[tag]['cfg_path'])
self.ckpt_path = os.path.join(
res_path, pretrained_models[tag]['ckpt_path'] + ".pdparams")
logger.info(res_path)
logger.info(self.cfg_path)
logger.info(self.ckpt_path)
else:
self.cfg_path = os.path.abspath(cfg_path)
self.ckpt_path = os.path.abspath(ckpt_path + ".pdparams")
res_path = os.path.dirname(
os.path.dirname(os.path.abspath(self.cfg_path)))
#Init body.
self.config = CfgNode(new_allowed=True)
self.config.merge_from_file(self.cfg_path)
self.config.decoding.decoding_method = "attention_rescoring"
with UpdateConfig(self.config):
if model_type == "ds2_online" or model_type == "ds2_offline":
from paddlespeech.s2t.io.collator import SpeechCollator
self.config.collator.vocab_filepath = os.path.join(
res_path, self.config.collator.vocab_filepath)
self.config.collator.mean_std_filepath = os.path.join(
res_path, self.config.collator.cmvn_path)
self.collate_fn_test = SpeechCollator.from_config(self.config)
text_feature = TextFeaturizer(
unit_type=self.config.collator.unit_type,
vocab_filepath=self.config.collator.vocab_filepath,
spm_model_prefix=self.config.collator.spm_model_prefix)
self.config.model.input_dim = self.collate_fn_test.feature_size
self.config.model.output_dim = text_feature.vocab_size
elif model_type == "conformer" or model_type == "transformer" or model_type == "wenetspeech":
self.config.collator.vocab_filepath = os.path.join(
res_path, self.config.collator.vocab_filepath)
self.config.collator.augmentation_config = os.path.join(
res_path, self.config.collator.augmentation_config)
self.config.collator.spm_model_prefix = os.path.join(
res_path, self.config.collator.spm_model_prefix)
text_feature = TextFeaturizer(
unit_type=self.config.collator.unit_type,
vocab_filepath=self.config.collator.vocab_filepath,
spm_model_prefix=self.config.collator.spm_model_prefix)
self.config.model.input_dim = self.config.collator.feat_dim
self.config.model.output_dim = text_feature.vocab_size
else:
raise Exception("wrong type")
# Enter the path of model root
model_class = dynamic_import(model_type, model_alias)
model_conf = self.config.model
logger.info(model_conf)
model = model_class.from_config(model_conf)
self.model = model
self.model.eval()
# load model
model_dict = paddle.load(self.ckpt_path)
self.model.set_state_dict(model_dict)
def preprocess(self, model_type: str, input: Union[str, os.PathLike]):
"""
Input preprocess and return paddle.Tensor stored in self.input.
Input content can be a text(tts), a file(asr, cls) or a streaming(not supported yet).
"""
audio_file = input
logger.info("Preprocess audio_file:" + audio_file)
# Get the object for feature extraction
if model_type == "ds2_online" or model_type == "ds2_offline":
audio, _ = self.collate_fn_test.process_utterance(
audio_file=audio_file, transcript=" ")
audio_len = audio.shape[0]
audio = paddle.to_tensor(audio, dtype='float32')
audio_len = paddle.to_tensor(audio_len)
audio = paddle.unsqueeze(audio, axis=0)
vocab_list = collate_fn_test.vocab_list
self._inputs["audio"] = audio
self._inputs["audio_len"] = audio_len
logger.info(f"audio feat shape: {audio.shape}")
elif model_type == "conformer" or model_type == "transformer" or model_type == "wenetspeech":
logger.info("get the preprocess conf")
preprocess_conf_file = self.config.collator.augmentation_config
# redirect the cmvn path
with io.open(preprocess_conf_file, encoding="utf-8") as f:
preprocess_conf = yaml.safe_load(f)
for idx, process in enumerate(preprocess_conf["process"]):
if process['type'] == "cmvn_json":
preprocess_conf["process"][idx][
"cmvn_path"] = os.path.join(
self.res_path,
preprocess_conf["process"][idx]["cmvn_path"])
break
logger.info(preprocess_conf)
preprocess_args = {"train": False}
preprocessing = Transformation(preprocess_conf)
logger.info("read the audio file")
audio, audio_sample_rate = soundfile.read(
audio_file, dtype="int16", always_2d=True)
if self.change_format:
if audio.shape[1] >= 2:
audio = audio.mean(axis=1, dtype=np.int16)
else:
audio = audio[:, 0]
# pcm16 -> pcm 32
audio = self._pcm16to32(audio)
audio = librosa.resample(audio, audio_sample_rate,
self.sample_rate)
audio_sample_rate = self.sample_rate
# pcm32 -> pcm 16
audio = self._pcm32to16(audio)
else:
audio = audio[:, 0]
logger.info(f"audio shape: {audio.shape}")
# fbank
audio = preprocessing(audio, **preprocess_args)
audio_len = paddle.to_tensor(audio.shape[0])
audio = paddle.to_tensor(audio, dtype='float32').unsqueeze(axis=0)
text_feature = TextFeaturizer(
unit_type=self.config.collator.unit_type,
vocab_filepath=self.config.collator.vocab_filepath,
spm_model_prefix=self.config.collator.spm_model_prefix)
self._inputs["audio"] = audio
self._inputs["audio_len"] = audio_len
logger.info(f"audio feat shape: {audio.shape}")
else:
raise Exception("wrong type")
@paddle.no_grad()
def infer(self, model_type: str):
"""
Model inference and result stored in self.output.
"""
text_feature = TextFeaturizer(
unit_type=self.config.collator.unit_type,
vocab_filepath=self.config.collator.vocab_filepath,
spm_model_prefix=self.config.collator.spm_model_prefix)
cfg = self.config.decoding
audio = self._inputs["audio"]
audio_len = self._inputs["audio_len"]
if model_type == "ds2_online" or model_type == "ds2_offline":
result_transcripts = self.model.decode(
audio,
audio_len,
text_feature.vocab_list,
decoding_method=cfg.decoding_method,
lang_model_path=cfg.lang_model_path,
beam_alpha=cfg.alpha,
beam_beta=cfg.beta,
beam_size=cfg.beam_size,
cutoff_prob=cfg.cutoff_prob,
cutoff_top_n=cfg.cutoff_top_n,
num_processes=cfg.num_proc_bsearch)
self._outputs["result"] = result_transcripts[0]
elif model_type == "conformer" or model_type == "transformer" or model_type == "wenetspeech":
result_transcripts = self.model.decode(
audio,
audio_len,
text_feature=text_feature,
decoding_method=cfg.decoding_method,
lang_model_path=cfg.lang_model_path,
beam_alpha=cfg.alpha,
beam_beta=cfg.beta,
beam_size=cfg.beam_size,
cutoff_prob=cfg.cutoff_prob,
cutoff_top_n=cfg.cutoff_top_n,
num_processes=cfg.num_proc_bsearch,
ctc_weight=cfg.ctc_weight,
decoding_chunk_size=cfg.decoding_chunk_size,
num_decoding_left_chunks=cfg.num_decoding_left_chunks,
simulate_streaming=cfg.simulate_streaming)
self._outputs["result"] = result_transcripts[0][0]
else:
raise Exception("invalid model name")
def postprocess(self) -> Union[str, os.PathLike]:
"""
Output postprocess and return human-readable results such as texts and audio files.
"""
return self._outputs["result"]
def _pcm16to32(self, audio):
assert (audio.dtype == np.int16)
audio = audio.astype("float32")
bits = np.iinfo(np.int16).bits
audio = audio / (2**(bits - 1))
return audio
def _pcm32to16(self, audio):
assert (audio.dtype == np.float32)
bits = np.iinfo(np.int16).bits
audio = audio * (2**(bits - 1))
audio = np.round(audio).astype("int16")
return audio
def _check(self, audio_file: str, sample_rate: int):
self.sample_rate = sample_rate
if self.sample_rate != 16000 and self.sample_rate != 8000:
logger.error("please input --sr 8000 or --sr 16000")
raise Exception("invalid sample rate")
sys.exit(-1)
if not os.path.isfile(audio_file):
logger.error("Please input the right audio file path")
sys.exit(-1)
logger.info("checking the audio file format......")
try:
audio, audio_sample_rate = soundfile.read(
audio_file, dtype="int16", always_2d=True)
except Exception as e:
logger.error(str(e))
logger.error(
"can not open the audio file, please check the audio file format is 'wav'. \n \
you can try to use sox to change the file format.\n \
For example: \n \
sample rate: 16k \n \
sox input_audio.xx --rate 16k --bits 16 --channels 1 output_audio.wav \n \
sample rate: 8k \n \
sox input_audio.xx --rate 8k --bits 16 --channels 1 output_audio.wav \n \
")
sys.exit(-1)
logger.info("The sample rate is %d" % audio_sample_rate)
if audio_sample_rate != self.sample_rate:
logger.warning("The sample rate of the input file is not {}.\n \
The program will resample the wav file to {}.\n \
If the result does not meet your expectations,\n \
Please input the 16k 16 bit 1 channel wav file. \
".format(self.sample_rate, self.sample_rate))
while (True):
logger.info(
"Whether to change the sample rate and the channel. Y: change the sample. N: exit the prgream."
)
content = input("Input(Y/N):")
if content.strip() == "Y" or content.strip(
) == "y" or content.strip() == "yes" or content.strip() == "Yes":
logger.info(
"change the sampele rate, channel to 16k and 1 channel")
break
elif content.strip() == "N" or content.strip(
) == "n" or content.strip() == "no" or content.strip() == "No":
logger.info("Exit the program")
exit(1)
else:
logger.warning("Not regular input, please input again")
self.change_format = True
else:
logger.info("The audio file format is right")
self.change_format = False
def execute(self, argv: List[str]) -> bool:
"""
Command line entry.
"""
parser_args = self.parser.parse_args(argv)
model = parser_args.model
lang = parser_args.lang
sample_rate = parser_args.sr
config = parser_args.config
ckpt_path = parser_args.ckpt_path
audio_file = parser_args.input
device = parser_args.device
try:
res = self(model, lang, sample_rate, config, ckpt_path, audio_file,
device)
logger.info('ASR Result: {}'.format(res))
return True
except Exception as e:
print(e)
return False
def __call__(self, model, lang, sample_rate, config, ckpt_path, audio_file,
device):
"""
Python API to call an executor.
"""
audio_file = os.path.abspath(audio_file)
self._check(audio_file, sample_rate)
paddle.set_device(device)
self._init_from_path(model, lang, sample_rate, config, ckpt_path)
self.preprocess(model, audio_file)
self.infer(model)
res = self.postprocess() # Retrieve result of asr.
return res
|
py | b405183c3d7303cfd839242e2a86d62af7c63018 | class PokemonBasic:
def __init__(self, name = 'Default', hp = 0,weakness = 'None', type = 'Unknown'):
self.name = name
self.hit_point = hp
self.weakness = weakness
self.type = type
def get_type(self):
return 'Main type: ' + self.type
def get_move(self):
return 'Basic move: ' + 'Quick Attack'
def __str__(self):
return "Name: " + self.name + ", HP: " +str(self.hit_point) + ", Weakness: " + self.weakness
class PokemonExtra(PokemonBasic):
def __init__(self, name='Default', hp=0, weakness='None', type='Unknown',*args):
super().__init__(name=name, hp=hp, weakness=weakness, type=type)
self.lst = args
def get_type(self):
if len(self.lst)==0:
return super().get_type()
else:
return f"Main type: {self.type}, Secondary type: {self.lst[0]}"
def get_move(self):
if len(self.lst)==0:
return super().get_move()
else:
sum1 = ""
for elm in self.lst[1]:
sum1 += elm + ', '
sum1 = sum1.rstrip(", ")
return f"Basic move: Quick Attack\nOther move: {sum1}"
print('\n------------Basic Info:--------------')
pk = PokemonBasic()
print(pk)
print(pk.get_type())
print(pk.get_move())
print('\n------------Pokemon 1 Info:-------------')
charmander = PokemonExtra('Charmander', 39, 'Water',
'Fire')
print(charmander)
print(charmander.get_type())
print(charmander.get_move())
print('\n------------Pokemon 2 Info:-------------')
charizard = PokemonExtra('Charizard', 78, 'Water',
'Fire', 'Flying', ('Fire Spin', 'Fire Blaze'))
print(charizard)
print(charizard.get_type())
print(charizard.get_move()) |
py | b405188ad4cc8ae9318dc246b8d8d68605938094 | # Copyright (C) 2020, K.I.A.Derouiche <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""provides information about NetBSD's distributions and Pkgsrc's framework"""
import csv
import datetime
import os
def convert_date(string):
'''
Convert a date string in ISO 8601 into a datetime object.
'''
if not string:
date = None
else:
parts = [int(x) for x in string.split("-")]
if len(parts) == 3:
(year, month, day) = parts
date = datetime.date(year, month, day)
elif len(parts) == 2:
(year, month) = parts
if month == 12:
date = datetime.date(year, month, 31)
else:
date = datetime.date(year, month + 1, 1) - datetime.timedelta(1)
else:
raise ValueError("Date not in ISO 8601 format.")
return date
def _get_data_dir()-> str:
'''
Get the data directory based on the module location.
'''
return os.path.abspath(os.path.dirname(__file__))
class DistroDataOutdated(Exception):
'''
Distribution data outdated
'''
def __init__(self):
super().__init__(
"Distribution data outdated. Please check for an update for distro-info-data. "
"See /usr/share/doc/info-data/README.NetBSD for details."
)
class DistroRelease(object):
"""Represents a distributions release"""
# pylint: disable=too-few-public-methods
def __init__(self, version, codename, series, created=None, release=None, eol=None,
eol_server=None):
# pylint: disable=too-many-arguments
self.version = version
self.codename = codename
self.series = series
self.created = created
self.release = release
self.eol = eol
self.eol_server = eol_server
def is_supported(self, date):
"""Check whether this release is supported on the given date."""
return date >= self.created and (
self.eol is None
or date <= self.eol
or (self.eol_server is not None and date <= self.eol_server)
)
def _get_date(row, column):
return convert_date(row[column]) if column in row else None
class DistroInfo(object):
"""Base class for distribution information.
Use DebianDistroInfo or UbuntuDistroInfo instead of using this directly.
"""
def __init__(self, distro):
self._distro = distro
filename = os.path.join(_get_data_dir(), distro.lower() + ".csv")
csvfile = open(filename)
csv_reader = csv.DictReader(csvfile)
self._releases = []
for row in csv_reader:
release = DistroRelease(
row["version"],
row["codename"],
row["series"],
_get_date(row, "created"),
_get_date(row, "release"),
_get_date(row, "eol"),
_get_date(row, "eol-server"),
)
self._releases.append(release)
csvfile.close()
self._date = datetime.date.today()
@property
def all(self):
"""List codenames of all known distributions."""
return [x.series for x in self._releases]
def get_all(self, result="codename"):
"""List all known distributions."""
return [self._format(result, x) for x in self._releases]
def _avail(self, date):
"""Return all distributions that were available on the given date."""
return [x for x in self._releases if date >= x.created]
def codename(self, release, date=None, default=None):
"""Map codename aliases to the codename they describe."""
# pylint: disable=no-self-use,unused-argument
return release
def version(self, name, default=None):
"""Map codename or series to version"""
for release in self._releases:
if name in (release.codename, release.series):
return release.version
return default
def devel(self, date=None, result="codename"):
"""Get latest development distribution based on the given date."""
if date is None:
date = self._date
distros = [
x
for x in self._avail(date)
if x.release is None or (date < x.release and (x.eol is None or date <= x.eol))
]
if not distros:
raise DistroDataOutdated()
return self._format(result, distros[-1])
def _format(self, format_string, release):
"""Format a given distribution entry."""
if format_string == "object":
return release
if format_string == "codename":
return release.series
if format_string == "fullname":
return self._distro + " " + release.version + ' "' + release.codename + '"'
if format_string == "release":
return release.version
raise ValueError(
"Only codename, fullname, object, and release are allowed "
"result values, but not '" + format_string + "'."
)
def stable(self, date=None, result="codename"):
"""Get latest stable distribution based on the given date."""
if date is None:
date = self._date
distros = [
x
for x in self._avail(date)
if x.release is not None and date >= x.release and (x.eol is None or date <= x.eol)
]
if not distros:
raise DistroDataOutdated()
return self._format(result, distros[-1])
def supported(self, date=None, result=None):
"""Get list of all supported distributions based on the given date."""
raise NotImplementedError()
def valid(self, codename):
"""Check if the given codename is known."""
return codename in self.all
def unsupported(self, date=None, result="codename"):
"""Get list of all unsupported distributions based on the given date."""
if date is None:
date = self._date
supported = self.supported(date)
distros = [self._format(result, x) for x in self._avail(date) if x.series not in supported]
return distros
class NetBSDDistroInfo(DistroInfo):
"""provides information about Debian's distributions"""
def __init__(self):
super().__init__("netbsd")
def codename(self, release, date=None, default=None):
"""Map 'unstable', 'testing', etc. to their codenames."""
if release == "unstable":
codename = self.devel(date)
elif release == "testing":
codename = self.testing(date)
elif release == "stable":
codename = self.stable(date)
elif release == "oldstable":
codename = self.old(date)
else:
codename = default
return codename
def devel(self, date=None, result="codename"):
"""Get latest development distribution based on the given date."""
if date is None:
date = self._date
distros = [
x
for x in self._avail(date)
if x.release is None or (date < x.release and (x.eol is None or date <= x.eol))
]
if len(distros) < 2:
raise DistroDataOutdated()
return self._format(result, distros[-2])
def old(self, date=None, result="codename"):
"""Get old (stable) Debian distribution based on the given date."""
if date is None:
date = self._date
distros = [x for x in self._avail(date) if x.release is not None and date >= x.release]
if len(distros) < 2:
raise DistroDataOutdated()
return self._format(result, distros[-2])
def supported(self, date=None, result="codename"):
"""Get list of all supported Debian distributions based on the given
date."""
if date is None:
date = self._date
distros = [
self._format(result, x) for x in self._avail(date) if x.eol is None or date <= x.eol
]
return distros
def testing(self, date=None, result="codename"):
"""Get latest testing Debian distribution based on the given date."""
if date is None:
date = self._date
distros = [
x
for x in self._avail(date)
if (x.release is None and x.version)
or (x.release is not None and date < x.release and (x.eol is None or date <= x.eol))
]
if not distros:
raise DistroDataOutdated()
return self._format(result, distros[-1])
def valid(self, codename):
"""Check if the given codename is known."""
return DistroInfo.valid(self, codename) or codename in [
"unstable",
"testing",
"stable",
"oldstable",
]
class PkgsrcFrameworkInfo(DistroInfo):
"""provides information about Ubuntu's distributions"""
def __init__(self):
super().__init__("pkgsrc")
def lts(self, date=None, result="codename"):
"""Get latest long term support (LTS) Ubuntu distribution based on the
given date."""
if date is None:
date = self._date
distros = [
x for x in self._releases if x.version.find("LTS") >= 0 and x.release <= date <= x.eol
]
if not distros:
raise DistroDataOutdated()
return self._format(result, distros[-1])
def is_lts(self, codename):
"""Is codename an LTS release?"""
distros = [x for x in self._releases if x.series == codename]
if not distros:
return False
return "LTS" in distros[0].version
def supported(self, date=None, result="codename"):
"""Get list of all supported Ubuntu distributions based on the given
date."""
if date is None:
date = self._date
distros = [
self._format(result, x)
for x in self._avail(date)
if date <= x.eol or (x.eol_server is not None and date <= x.eol_server)
]
return distros
|
py | b405189a49cdd1e4a7ce7bee338411debb1c8cf6 | import json
from app.api.now_applications.models.activity_detail.underground_exploration_type import UndergroundExplorationType
class TestGetUndergroundExplorationType:
"""GET /now-applications/underground-exploration-types"""
def test_get_underground_exploration_types(self, test_client, db_session, auth_headers):
"""Should return the correct number of records with a 200 response code"""
get_resp = test_client.get(
f'/now-applications/underground-exploration-types',
headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert len(get_data['records']) == len(UndergroundExplorationType.get_active()) |
py | b405191c58c2971584ed761a636316ff24669847 | import mx
"""
demo symbol of using modulated deformable convolution
"""
def modulated_deformable_conv(data, name, num_filter, stride, lr_mult=0.1):
weight_var = mx.sym.Variable(name=name+'_conv2_offset_weight', init=mx.init.Zero(), lr_mult=lr_mult)
bias_var = mx.sym.Variable(name=name+'_conv2_offset_bias', init=mx.init.Zero(), lr_mult=lr_mult)
conv2_offset = mx.symbol.Convolution(name=name + '_conv2_offset', data=data, num_filter=27,
pad=(1, 1), kernel=(3, 3), stride=stride, weight=weight_var, bias=bias_var, lr_mult=lr_mult)
conv2_offset_t = mx.sym.slice_axis(conv2_offset, axis=1, begin=0, end=18)
conv2_mask = mx.sym.slice_axis(conv2_offset, axis=1, begin=18, end=None)
conv2_mask = 2 * mx.sym.Activation(conv2_mask, act_type='sigmoid')
conv2 = mx.contrib.symbol.ModulatedDeformableConvolution(name=name + '_conv2', data=data, offset=conv2_offset_t, mask=conv2_mask,
num_filter=num_filter, pad=(1, 1), kernel=(3, 3), stride=stride,
num_deformable_group=1, no_bias=True)
return conv2
"""
demo symbol of using modulated deformable RoI pooling
"""
def modulated_deformable_roi_pool(data, rois, spatial_scale, imfeat_dim=256, deform_fc_dim=1024, roi_size=7, trans_std=0.1):
roi_align = mx.contrib.sym.DeformablePSROIPooling(name='roi_align',
data=data,
rois=rois,
group_size=1,
pooled_size=roi_size,
sample_per_part=2,
no_trans=True,
part_size=roi_size,
output_dim=imfeat_dim,
spatial_scale=spatial_scale)
feat_deform = mx.symbol.FullyConnected(name='fc_deform_1', data=roi_align, num_hidden=deform_fc_dim)
feat_deform = mx.sym.Activation(data=feat_deform, act_type='relu', name='fc_deform_1_relu')
feat_deform = mx.symbol.FullyConnected(name='fc_deform_2', data=feat_deform, num_hidden=deform_fc_dim)
feat_deform = mx.sym.Activation(data=feat_deform, act_type='relu', name='fc_deform_2_relu')
feat_deform = mx.symbol.FullyConnected(name='fc_deform_3', data=feat_deform, num_hidden=roi_size * roi_size * 3)
roi_offset = mx.sym.slice_axis(feat_deform, axis=1, begin=0, end=roi_size * roi_size * 2)
roi_offset = mx.sym.reshape(roi_offset, shape=(-1, 2, roi_size, roi_size))
roi_mask = mx.sym.slice_axis(feat_deform, axis=1, begin=roi_size * roi_size * 2, end=None)
roi_mask_sigmoid = mx.sym.Activation(roi_mask, act_type='sigmoid')
roi_mask_sigmoid = mx.sym.reshape(roi_mask_sigmoid, shape=(-1, 1, roi_size, roi_size))
deform_roi_pool = mx.contrib.sym.DeformablePSROIPooling(name='deform_roi_pool',
data=data,
rois=rois,
trans=roi_offset,
group_size=1,
pooled_size=roi_size,
sample_per_part=2,
no_trans=False,
part_size=roi_size,
output_dim=imfeat_dim,
spatial_scale=spatial_scale,
trans_std=trans_std)
modulated_deform_roi_pool = mx.sym.broadcast_mul(deform_roi_pool, roi_mask_sigmoid)
return modulated_deform_roi_pool
|
py | b4051925fe666dd1885256921c782a141dea22a3 | from flask import (Blueprint, render_template, flash, request, redirect,
url_for, abort, make_response, jsonify)
from flask_login import login_required, current_user
from werkzeug.exceptions import BadRequest
import collections
import logging
import os
import time
from server import highlight, models, utils
from server import forms
from server.autograder import submit_continuous
from server.constants import VALID_ROLES, STAFF_ROLES, STUDENT_ROLE, MAX_UPLOAD_FILE_SIZE
from server.forms import CSRFForm, UploadSubmissionForm
from server.models import (Enrollment, ExtensionRequest, User, Course, Assignment, Group, Backup, Message,
ExternalFile, Extension, db)
from server.utils import is_safe_redirect_url, send_emails, invite_email, send_email
logger = logging.getLogger(__name__)
student = Blueprint('student', __name__)
def check_enrollment(course):
enrolled = current_user.is_enrolled(course.id)
if not enrolled and not current_user.is_admin:
flash("You have not been added to this course on OK", "warning")
def get_course(offering):
"""Get a course with the given name. If the user is not enrolled, flash
a warning message.
"""
course = Course.by_name(offering)
if not course:
abort(404)
check_enrollment(course)
return course
def get_assignment(name):
"""Get an assignment with the given name. If the user is not enrolled, flash
a warning message.
"""
assignment = Assignment.by_name(name)
if not assignment:
abort(404)
check_enrollment(assignment.course)
return assignment
@student.route('/')
def index():
if current_user.is_authenticated:
enrollments = current_user.enrollments(VALID_ROLES)
student_enrollments = [e for e in enrollments if e.role == STUDENT_ROLE]
staff_enrollments = [e.course for e in enrollments if e.role in STAFF_ROLES]
is_staff_only = staff_enrollments and not student_enrollments
# If not a student elsewhere - send staff members directly the admin view
if (is_staff_only or current_user.is_admin) and not request.args.get('student'):
if len(staff_enrollments) == 1:
return redirect(url_for('admin.course', cid=staff_enrollments[0].id))
return redirect(url_for('admin.list_courses'))
all_courses = Course.query.all()
courses = {
'instructor': staff_enrollments,
'current': [e.course for e in student_enrollments if e.course.active],
'past': [e.course for e in student_enrollments if not e.course.active],
'all': all_courses,
'num_enrolled': len(enrollments)
}
displayed_courses = courses['current'] + courses['past']
courses['all'] = [c for c in all_courses if c not in displayed_courses]
return render_template('student/courses/index.html', **courses)
else:
return render_template('index.html')
@student.route('/<offering:offering>/')
@login_required
def course(offering):
course = get_course(offering)
assignments = {
'active': [a.user_status(current_user) for a in course.assignments
if a.active and a.visible],
'inactive': [a.user_status(current_user) for a in course.assignments
if not a.active and a.visible]
}
return render_template('student/course/index.html', course=course,
**assignments)
@student.route('/<assignment_name:name>/')
@login_required
def assignment(name):
assign = get_assignment(name)
user_ids = assign.active_user_ids(current_user.id)
fs = assign.final_submission(user_ids)
revision = assign.revision(user_ids)
scores = assign.scores(user_ids)
group = Group.lookup(current_user, assign)
can_invite = assign.max_group_size > 1 and assign.active
can_remove = group and group.has_status(current_user, 'active')
if group:
can_invite = len(group.members) < assign.max_group_size
has_extension = Extension.get_extension(current_user, assign)
has_pending_extension_request = ExtensionRequest.get_extension_request(current_user, assign)
data = {
'course': assign.course,
'assignment': assign,
'backups': assign.backups(user_ids).limit(5).all(),
'subms': assign.submissions(user_ids).limit(5).all(),
'final_submission': fs,
'flagged': fs and fs.flagged,
'group': group,
'revision': revision,
'scores': scores,
'can_invite': can_invite,
'can_remove': can_remove,
'has_extension': has_extension,
'has_pending_extension_request': has_pending_extension_request,
'csrf_form': CSRFForm()
}
return render_template('student/assignment/index.html', **data)
@student.route('/<assignment_name:name>/request_extension', methods=['GET', 'POST'])
@login_required
def request_extension(name):
assign = get_assignment(name)
if request.method == 'GET':
form = forms.ExtensionRequestForm()
form.assignment_id = assign.id
form.email = current_user.email
return render_template('student/assignment/request_extension.html', course=assign.course, assignment=assign, form=form)
if request.method == 'POST':
reason = request.form.get("reason")
ExtensionRequest.create(assign, current_user, reason)
staff_record = (Enrollment.query
.filter_by(course_id=assign.course.id)
.filter(Enrollment.role.in_(STAFF_ROLES))
.one_or_none())
if staff_record:
send_email(
staff_record.user.email,
"[{}] New Extension Request for {}".format(assign.course.display_name, assign.display_name),
"You have a pending extension request! Visit your Okpy section console to handle this request.",
from_name=assign.course.display_name,
link=url_for(".section_console"),
link_text="Section Console",
)
flash("Your request for an extension on {} has been submitted.".format(assign.display_name))
return redirect(url_for('.assignment', name=name))
@student.route('/<assignment_name:name>/submit', methods=['GET', 'POST'])
@login_required
def submit_assignment(name):
# TODO: Unify student & staff upload.
assign = get_assignment(name)
group = Group.lookup(current_user, assign)
user_ids = assign.active_user_ids(current_user.id)
fs = assign.final_submission(user_ids)
if not assign.uploads_enabled:
flash("This assignment cannot be submitted online", 'warning')
return redirect(url_for('.assignment', name=assign.name))
extension = None # No need for an extension
if not assign.active:
extension = Extension.get_extension(current_user, assign)
if not extension:
flash("It's too late to submit this assignment", 'warning')
return redirect(url_for('.assignment', name=assign.name))
if request.method == "POST":
backup = Backup.create(
submitter=current_user,
assignment=assign,
submit=True,
)
assignment = backup.assignment
if extension:
backup.custom_submission_time = extension.custom_submission_time
templates = assignment.files or []
files = {}
def extract_file_index(file_ind):
""" Get the index of of file objects. Used because
request.files.getlist() does not handle uniquely indexed
lists.
>>> extract_file_index('file[12'])
12
"""
brace_loc = file_ind.find('[')
index_str = file_ind[brace_loc+1:-1]
return int(index_str)
# A list of one element lists
sorted_uploads = sorted(list(request.files.items()),
key=lambda x: extract_file_index(x[0]))
uploads = [v[1] for v in sorted_uploads]
full_path_names = list(request.form.listvalues())[0]
template_files = assign.files or []
file_names = [os.path.split(f)[1] for f in full_path_names]
missing = [t for t in template_files if t not in file_names]
if missing:
return jsonify({
'error': ('Missing files: {}. The following files are required: {}'
.format(', '.join(missing), ', '.join(template_files)))
}), 400
backup_folder_postfix = time.time()
for full_path, upload in zip(full_path_names, uploads):
data = upload.read()
if len(data) > MAX_UPLOAD_FILE_SIZE: # file is too large (over 25 MB)
return jsonify({
'error': ('{} is larger than the maximum file size of {} MB'
.format(full_path, MAX_UPLOAD_FILE_SIZE/1024/1024))
}), 400
try:
files[full_path] = str(data, 'utf-8')
except UnicodeDecodeError:
upload.stream.seek(0) # We've already read data, so reset before uploading
dest_folder = "uploads/{}/{}/{}/".format(assign.name, current_user.id, backup_folder_postfix)
bin_file = ExternalFile.upload(upload.stream, current_user.id, full_path,
staff_file=False, prefix=dest_folder,
course_id=assign.course.id,
backup=backup, assignment_id=assign.id)
db.session.add(bin_file)
message = Message(kind='file_contents', contents=files)
backup.messages.append(message)
db.session.add(backup)
db.session.commit()
# Send to continuous autograder
if assign.autograding_key and assign.continuous_autograding:
try:
submit_continuous(backup)
except ValueError as e:
flash('Did not send to autograder: {}'.format(e), 'warning')
return jsonify({
'backup': backup.hashid,
'url': url_for('.code', name=assign.name, submit=backup.submit,
bid=backup.id)
})
return render_template('student/assignment/submit.html', assignment=assign,
group=group, course=assign.course)
@student.route('/<assignment_name:name>/<bool(backups, submissions):submit>/')
@login_required
def list_backups(name, submit):
assign = get_assignment(name)
page = request.args.get('page', 1, type=int)
user_ids = assign.active_user_ids(current_user.id)
csrf_form = CSRFForm()
if submit:
backups = assign.submissions(user_ids)
else:
backups = assign.backups(user_ids)
paginate = backups.paginate(page=page, per_page=10)
return render_template('student/assignment/list.html', course=assign.course,
assignment=assign, paginate=paginate, submit=submit,
csrf_form=csrf_form)
@student.route('/<assignment_name:name>/<bool(backups, submissions):submit>/<hashid:bid>/')
@login_required
def code(name, submit, bid):
assign = get_assignment(name)
backup = Backup.query.get(bid)
if not (backup and Backup.can(backup, current_user, "view")):
abort(404)
if backup.submit != submit:
return redirect(url_for('.code', name=name, submit=backup.submit, bid=bid))
diff_type = request.args.get('diff')
if diff_type not in (None, 'short', 'full'):
return redirect(url_for('.code', name=name, submit=submit, bid=bid))
if not assign.files and diff_type:
return abort(404)
# sort comments by (filename, line)
comments = collections.defaultdict(list)
for comment in backup.comments:
comments[(comment.filename, comment.line)].append(comment)
# highlight files and add comments
files = highlight.diff_files(assign.files, backup.files(), diff_type)
for filename, source_file in files.items():
for line in source_file.lines:
line.comments = comments[(filename, line.line_after)]
for filename, ex_file in backup.external_files_dict().items():
files[filename] = ex_file
return render_template('student/assignment/code.html',
course=assign.course, assignment=assign, backup=backup,
files=files, diff_type=diff_type)
@student.route('/<assignment_name:name>/<bool(backups, submissions):submit>/'
'<hashid:bid>/download/<path:file>')
@login_required
def download(name, submit, bid, file):
backup = Backup.query.get(bid)
if not (backup and Backup.can(backup, current_user, "view")):
abort(404)
if backup.submit != submit:
return redirect(url_for('.download', name=name, submit=backup.submit,
bid=bid, file=file))
try:
contents = backup.files()[file]
except KeyError:
abort(404)
response = make_response(contents)
inline = 'raw' in request.args
content_disposition = "inline" if inline else "attachment"
response.headers["Content-Disposition"] = ("{0}; filename={1!s}"
.format(content_disposition, file))
response.headers["Content-Security-Policy"] = "default-src 'none';"
response.headers["X-Content-Type-Options"] = "nosniff"
if file.endswith('.ipynb') and not inline:
# Prevent safari from adding a .txt extension to files
response.headers["Content-Type"] = "application/octet-stream; charset=UTF-8"
else:
response.headers["Content-Type"] = "text/plain; charset=UTF-8"
return response
@student.route('/<assignment_name:name>/submissions/<hashid:bid>/flag/',
methods=['POST'])
@login_required
def flag(name, bid):
assign = get_assignment(name)
user_ids = assign.active_user_ids(current_user.id)
flag = 'flag' in request.form
next_url = request.form['next']
backup = models.Backup.query.get(bid)
if not Backup.can(backup, current_user, "view"):
abort(404)
if not assign.active:
flash('It is too late to change what submission is graded.', 'warning')
elif flag:
result = assign.flag(bid, user_ids)
flash('Flagged submission {}. '.format(result.hashid) +
'This submission will be used for grading', 'success')
else:
result = assign.unflag(bid, user_ids)
flash('Removed flag from {}. '.format(result.hashid) +
'The most recent submission will be used for grading.', 'success')
if is_safe_redirect_url(request, next_url):
return redirect(next_url)
else:
flash("Not a valid redirect", "danger")
abort(400)
@student.route('/<assignment_name:name>/group/invite/', methods=['POST'])
@login_required
def group_invite(name):
assignment = get_assignment(name)
email = request.form['email']
invitee = User.lookup(email)
if not invitee:
flash("{0} is not enrolled".format(email), 'warning')
else:
try:
Group.invite(current_user, invitee, assignment)
success = "{0} has been invited. They can accept the invite by logging into okpy.org".format(email)
invite_email(current_user, invitee, assignment)
flash(success, "success")
except BadRequest as e:
flash(e.description, 'danger')
return redirect(url_for('.assignment', name=assignment.name))
@student.route('/<assignment_name:name>/group/remove/', methods=['POST'])
@login_required
def group_remove(name):
assignment = get_assignment(name)
target = User.lookup(request.form['email'])
group = Group.lookup(current_user, assignment)
if not target:
flash("{0} is not enrolled".format(request.form['email']), 'warning')
elif not group:
flash("You are not in a group", 'warning')
else:
try:
members = [m.user.email for m in group.members]
group.remove(current_user, target)
subject = "{0} has been removed from your {1} group".format(target.email,
assignment.display_name)
if target.email == current_user.email:
descriptor = "themselves"
else:
descriptor = target.email
body = "{0} removed {1} from the group.".format(current_user.email, descriptor)
send_emails(members, subject, body)
except BadRequest as e:
flash(e.description, 'danger')
return redirect(url_for('.assignment', name=assignment.name))
@student.route('/<assignment_name:name>/group/respond/', methods=['POST'])
@login_required
def group_respond(name):
assignment = get_assignment(name)
action = request.form.get('action')
target = request.form.get('email')
if not action or action not in ['accept', 'decline', 'revoke']:
abort(400)
group = Group.lookup(current_user, assignment)
if not group:
flash("You are not in a group")
else:
try:
if action == "accept":
group.accept(current_user)
subject = "{0} has accepted the invitation to join your group".format(current_user.email)
body = "Your group for {0} now has {1} members".format(assignment.display_name,
len(group.members))
members = [m.user.email for m in group.members]
send_emails(members, subject, body)
elif action == "decline":
members = [m.user.email for m in group.members]
group.decline(current_user)
subject = "{0} declined an invite to join the group".format(current_user.email)
body = "{0} declined to join the group for {1}".format(current_user.email,
assignment.display_name)
send_emails(members, subject, body)
elif action == "revoke":
members = [m.user.email for m in group.members]
group.decline(current_user)
subject = "{0} invitation for {1} revoked".format(assignment.display_name,
target)
body = "{0} has revoked the invitation for {1}".format(current_user.email,
target)
send_emails(members, subject, body)
except BadRequest as e:
flash(e.description, 'danger')
return redirect(url_for('.assignment', name=assignment.name))
@student.route('/comments/', methods=['POST'])
@login_required
def new_comment():
if not models.Comment.can(None, current_user, "create"):
abort(403)
comment = models.Comment(
backup_id=utils.decode_id(request.form['backup_id']),
author_id=current_user.id,
filename=request.form['filename'],
line=request.form.get('line', type=int),
message=request.form['message'])
db.session.add(comment)
db.session.commit()
return render_template('student/assignment/comment.html', comment=comment)
@student.route('/comments/<hashid:comment_id>', methods=['PUT', 'DELETE'])
@login_required
def edit_comment(comment_id):
comment = models.Comment.query.get(comment_id)
if not models.Comment.can(comment, current_user, "edit"):
abort(403)
if request.method == 'DELETE':
db.session.delete(comment)
db.session.commit()
return ('', 204)
else:
comment.message = request.form['message']
db.session.commit()
return render_template('student/assignment/comment.html', comment=comment)
|
py | b405197c449ba2d4d0e10ff6e5207e60d3eecdfe | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceSportsGamesSendinviteSendModel(object):
def __init__(self):
self._biz_type = None
self._desc = None
self._open_id = None
self._user_id = None
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceSportsGamesSendinviteSendModel()
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'desc' in d:
o.desc = d['desc']
if 'open_id' in d:
o.open_id = d['open_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
py | b4051ab22ea68b8ed46f9505369bb9b6547144b5 | import unittest
from pyNastran.bdf.bdf import BDF, BDFCard, RBE1, RBE2, RBE3
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.cards.test.utils import save_load_deck
bdf = BDF(debug=False)
class TestRigid(unittest.TestCase):
def test_rbe3_01(self):
lines = [
'rbe3,6, ,3,123456,1.0,123456,41,4,+rbe3',
'+rbe3,alpha,2.0e-4',
]
card = bdf._process_card(lines)
card = BDFCard(card)
rbe = RBE3.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
lines_expected = [
'RBE3 6 3 123456 1. 123456 41 4',
' ALPHA .0002'
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
#print('dependent_nid_to_components = ', dependent_nid_to_components)
assert dependent_nid_to_components == {3: '123456'}, dependent_nid_to_components
def test_rbe3_02(self):
"""RBE3 Gmi/Cmi default"""
model = BDF()
model.add_grid(1, [0.,0.,0])
model.add_grid(4, [1.,0.,0])
model.add_grid(5, [0.,1.,0])
model.add_grid(6, [1.,1.,0])
rbe3 = model.add_rbe3(eid=1, refgrid=1, refc=1, weights=[.1, .5, 3.], comps=['123']*3,
Gmi=None, Cmi=None, Gijs=[4, 5, 6])
rbe3.write_card()
#-------------------------------------------------------------------------
def test_rbe2_01(self):
lines = [
'RBE2 100045 166007 123456 117752 101899 117766 101898 117748',
'+ 117765 117764 117763 109821 117743 117744 117750 117751',
'+ 117745 117746 101902 1.-6',
]
card = bdf._process_card(lines)
card = BDFCard(card)
rbe = RBE2.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
#print(msg)
lines_expected = [
'RBE2 100045 166007 123456 117752 101899 117766 101898 117748',
' 117765 117764 117763 109821 117743 117744 117750 117751',
' 117745 117746 101902 .000001'
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
expected = {117763: '123456', 117764: '123456', 117765: '123456', 117766: '123456',
101898: '123456', 101899: '123456', 101902: '123456', 117743: '123456',
117744: '123456', 117745: '123456', 117746: '123456', 117748: '123456',
117750: '123456', 117751: '123456', 117752: '123456', 109821: '123456'}
assert dependent_nid_to_components == expected, dependent_nid_to_components
def test_rbe2_02(self):
lines = [
'RBE2 100045 166007 123456 117752 101899 117766 101898 117748',
'+ 117765 117764 117763 109821 117743 117744 117750 117751',
'+ 117745 117746 101902 ',
]
card = bdf._process_card(lines)
card = BDFCard(card)
rbe = RBE2.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
lines_expected = [
'RBE2 100045 166007 123456 117752 101899 117766 101898 117748',
' 117765 117764 117763 109821 117743 117744 117750 117751',
' 117745 117746 101902 0.'
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
expected = {117763: '123456', 117764: '123456', 117765: '123456', 117766: '123456',
101898: '123456', 101899: '123456', 101902: '123456', 117743: '123456',
117744: '123456', 117745: '123456', 117746: '123456', 117748: '123456',
117750: '123456', 117751: '123456', 117752: '123456', 109821: '123456'}
assert dependent_nid_to_components == expected, dependent_nid_to_components
#-------------------------------------------------------------------------
def test_rbe1_01(self):
lines = [
'RBE1 10201 10201 123 10202 456',
' UM 10201 456 10202 123',
]
card = bdf._process_card(lines)
#print(print_card_8(card))
card = BDFCard(card)
rbe = RBE1.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
lines_expected = [
'RBE1 10201 10201 123 10202 456',
' UM 10201 456 10202 123'
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
assert dependent_nid_to_components == {10201: '456', 10202: '123'}, dependent_nid_to_components
def test_rbe1_02(self):
lines = [
'RBE1 1001 1000 123456',
' UM 1002 123 1003 123 1004 123',
' 1005 123 1006 123 1008 123',
' 1009 123 1010 123 1011 123',
' 1012 123',
]
card = bdf._process_card(lines)
#print(print_card_8(card))
card = BDFCard(card)
rbe = RBE1.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
lines_expected = [
'RBE1 1001 1000 123456',
' UM 1002 123 1003 123 1004 123',
' 1005 123 1006 123 1008 123',
' 1009 123 1010 123 1011 123',
' 1012 123',
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
assert dependent_nid_to_components == {1002: '123', 1003: '123', 1004: '123', 1005: '123', 1006: '123', 1008: '123', 1009: '123', 1010: '123', 1011: '123', 1012: '123'}, dependent_nid_to_components
def test_rbe1_03(self):
lines = [
'rbe1,46,3,123456, , , , , ,+rbe46',
'+rbe46,UM,4,123456,5,123456,2.0-6'
]
card = bdf._process_card(lines)
card = BDFCard(card)
rbe = RBE1.add_card(card)
fields = rbe.raw_fields()
msg = print_card_8(fields).rstrip()
lines_expected = [
'RBE1 46 3 123456',
' UM 4 123456 5 123456 .000002'
]
lines_actual = msg.rstrip().split('\n')
msg = '\n%s\n\n%s\n' % ('\n'.join(lines_expected), msg)
msg += 'nlines_actual=%i nlines_expected=%i' % (len(lines_actual), len(lines_expected))
self.assertEqual(len(lines_actual), len(lines_expected), msg)
for actual, expected in zip(lines_actual, lines_expected):
self.assertEqual(actual, expected, msg)
dependent_nid_to_components = check_rbe(rbe)
assert dependent_nid_to_components == {4: '123456', 5: '123456'}, dependent_nid_to_components
def test_rsscon(self):
model = BDF(debug=False)
eid = 100
shell_eid = 1
solid_eid = 2
rsscon = model.add_rsscon(
eid, 'ELEM',
shell_eid=shell_eid, solid_eid=solid_eid,
a_solid_grids=None, b_solid_grids=None, shell_grids=None,
comment='rsscon')
eid = 101
shell_grids = [31]
a_solid_grids = [74]
b_solid_grids = [75]
model.add_rsscon(
eid, 'GRID',
shell_eid=None, solid_eid=None,
a_solid_grids=a_solid_grids, b_solid_grids=b_solid_grids, shell_grids=shell_grids,
comment='rsscon')
eid = 102
shell_grids = [11, 14]
a_solid_grids = [12, 15]
b_solid_grids = [13, 16]
model.add_rsscon(
eid, 'GRID',
shell_eid=None, solid_eid=None,
a_solid_grids=b_solid_grids, b_solid_grids=b_solid_grids, shell_grids=shell_grids,
comment='rsscon')
dependent_nid_to_components = check_rbe(rsscon)
assert dependent_nid_to_components == {}, dependent_nid_to_components
save_load_deck(model, punch=True, run_save_load_hdf5=True)
def check_rbe(rbe):
"""simple RBE checks"""
model = BDF(debug=None)
model.rigid_elements[rbe.eid] = rbe
node_ids = []
for nid in rbe.independent_nodes + rbe.dependent_nodes:
node_ids.append(nid)
rigid_elements = model.get_rigid_elements_with_node_ids(node_ids)
if rbe.type not in ['RSSCON']:
# supported
assert rbe.eid in rigid_elements, rbe
else:
# not supported
assert rbe.eid not in rigid_elements, rbe
dependent_nid_to_components = model.get_dependent_nid_to_components(mpc_id=None, stop_on_failure=True)
return dependent_nid_to_components
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
py | b4051b79bc023acd484aae2f4b54a0294b686109 | # -*- coding: utf-8 -*-
#! \file ./tests/__init__.py
#! \author Jiří Kučera, <[email protected]>
#! \stamp 2014-04-09 22:45:13 (UTC+01:00, DST+01:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
DoIt! tests package initialization file.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import unittest
from . import test_support, test_config, test_text
def suite():
suite = unittest.TestSuite()
suite.addTest(test_support.suite())
suite.addTest(test_config.suite())
suite.addTest(test_text.suite())
return suite
#-def
|
py | b4051c73d144d24c92b0814d2ce8b0901a1a5f8a | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_transformer_base import AlarmTransformerBase
from vitrage.datasources.aodh import AODH_DATASOURCE
from vitrage.datasources.aodh.properties import AodhEventType
from vitrage.datasources.aodh.properties import AodhProperties as AodhProps
from vitrage.datasources.aodh.properties import AodhState
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import Neighbor
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.evaluator.actions.evaluator_event_transformer \
import VITRAGE_DATASOURCE
import vitrage.graph.utils as graph_utils
from vitrage.utils import datetime as datetime_utils
class AodhTransformer(AlarmTransformerBase):
# Event types which need to refer them differently
GRAPH_ACTION_MAPPING = {
AodhEventType.DELETION: GraphAction.DELETE_ENTITY,
}
def __init__(self, transformers, conf):
super(AodhTransformer, self).__init__(transformers, conf)
def _create_snapshot_entity_vertex(self, entity_event):
if _is_vitrage_alarm(entity_event):
return self._create_merge_alarm_vertex(entity_event)
return self._create_vertex(entity_event)
def _create_update_entity_vertex(self, entity_event):
if _is_vitrage_alarm(entity_event):
return self._create_merge_alarm_vertex(entity_event)
return self._create_vertex(entity_event)
def _create_vertex(self, entity_event):
metadata = {
VProps.NAME: entity_event[AodhProps.NAME],
VProps.SEVERITY: entity_event[AodhProps.SEVERITY],
AodhProps.DESCRIPTION: entity_event[AodhProps.DESCRIPTION],
AodhProps.ENABLED: entity_event[AodhProps.ENABLED],
VProps.PROJECT_ID: entity_event.get(AodhProps.PROJECT_ID, None),
AodhProps.REPEAT_ACTIONS: entity_event[AodhProps.REPEAT_ACTIONS],
VProps.RESOURCE_ID: entity_event[AodhProps.RESOURCE_ID],
'alarm_type': entity_event[AodhProps.TYPE]
}
# TODO(annarez): convert EVENT_TYPE to tuple
if entity_event[AodhProps.TYPE] == AodhProps.EVENT:
metadata[AodhProps.EVENT_TYPE] = entity_event[AodhProps.EVENT_TYPE]
elif entity_event[AodhProps.TYPE] == AodhProps.THRESHOLD:
metadata[AodhProps.STATE_TIMESTAMP] = \
entity_event[AodhProps.STATE_TIMESTAMP]
vitrage_sample_timestamp = entity_event[DSProps.SAMPLE_DATE]
update_timestamp = self._format_update_timestamp(
AodhTransformer._timestamp(entity_event), vitrage_sample_timestamp)
return graph_utils.create_vertex(
self._create_entity_key(entity_event),
vitrage_category=EntityCategory.ALARM,
vitrage_type=entity_event[DSProps.ENTITY_TYPE],
vitrage_sample_timestamp=vitrage_sample_timestamp,
entity_id=entity_event[AodhProps.ALARM_ID],
entity_state=self._get_alarm_state(entity_event),
update_timestamp=update_timestamp,
metadata=metadata)
def _create_snapshot_neighbors(self, entity_event):
return self._create_aodh_neighbors(entity_event)
def _create_update_neighbors(self, entity_event):
return self._create_aodh_neighbors(entity_event)
def _create_aodh_neighbors(self, entity_event):
graph_neighbors = entity_event.get(self.QUERY_RESULT, [])
result = []
for vertex in graph_neighbors:
edge = graph_utils.create_edge(
source_id=TransformerBase.uuid_from_deprecated_vitrage_id(
self._create_entity_key(entity_event)),
target_id=vertex.vertex_id,
relationship_type=EdgeLabel.ON)
result.append(Neighbor(vertex, edge))
return result
def _create_merge_alarm_vertex(self, entity_event):
"""Handle an alarm that already has a vitrage_id
This is a deduced alarm created in aodh by vitrage, so it already
exists in the graph.
This function will update the exiting vertex (and not create a new one)
"""
metadata = {
AodhProps.DESCRIPTION: entity_event[AodhProps.DESCRIPTION],
VProps.PROJECT_ID: entity_event[AodhProps.PROJECT_ID],
}
vitrage_sample_timestamp = entity_event[DSProps.SAMPLE_DATE]
update_timestamp = self._format_update_timestamp(
AodhTransformer._timestamp(entity_event), vitrage_sample_timestamp)
return graph_utils.create_vertex(
self._create_entity_key(entity_event),
vitrage_category=EntityCategory.ALARM,
vitrage_type=VITRAGE_DATASOURCE,
vitrage_sample_timestamp=vitrage_sample_timestamp,
entity_id=entity_event.get(AodhProps.ALARM_ID),
update_timestamp=update_timestamp,
metadata=metadata)
def _ok_status(self, entity_event):
return entity_event[AodhProps.STATE] != AodhState.ALARM
def _create_entity_key(self, entity_event):
if _is_vitrage_alarm(entity_event):
return entity_event.get(AodhProps.VITRAGE_ID)
entity_type = entity_event[DSProps.ENTITY_TYPE]
alarm_id = entity_event[AodhProps.ALARM_ID]
return tbase.build_key((EntityCategory.ALARM, entity_type, alarm_id))
@staticmethod
def _timestamp(entity_event):
return datetime_utils.change_time_str_format(
entity_event[AodhProps.TIMESTAMP],
AodhProps.TIME_FORMAT,
tbase.TIMESTAMP_FORMAT)
@staticmethod
def get_enrich_query(event):
affected_resource_id = event.get(AodhProps.RESOURCE_ID, None)
if not affected_resource_id:
return None
return {VProps.ID: affected_resource_id}
def get_vitrage_type(self):
return AODH_DATASOURCE
def _is_vitrage_alarm(entity_event):
return entity_event.get(AodhProps.VITRAGE_ID) is not None
|
py | b4051cadddf4fcc6b3969fcea29c3b8a7241a529 |
'create a phone-loop model'
import argparse
import pickle
import sys
import torch
import beer
def build_categorical(size):
mean = torch.ones(size) / size
return beer.Categorical.create(mean, prior_strength=1)
def build_categorical_2g(size):
mean = torch.ones(size, size) / size
return beer.CategoricalSet.create(mean, prior_strength=1)
def build_sb(size):
return beer.SBCategorical.create(truncation=size, prior_strength=size / 2)
def build_hsb(size):
root_sb = beer.SBCategorical.create(truncation=size,
prior_strength=size / 2)
return beer.SBCategoricalSet.create(size, root_sb,
prior_strength=size / 2)
def build_sbhp(size):
return beer.SBCategoricalHyperPrior.create(truncation=size,
prior_strength=size / 2,
hyper_prior_strength=1.)
bigram_prior = ['hierarchical_dirichlet_process', 'dirichlet2']
priors = {
'dirichlet': build_categorical,
'dirichlet2': build_categorical_2g,
'dirichlet_process': build_sb,
'gamma_dirichlet_process': build_sbhp,
'hierarchical_dirichlet_process': build_hsb,
}
def setup(parser):
parser.add_argument('--weights-prior', default='gamma_dirichlet_process',
choices=[key for key in priors],
help='type of prior over the phone weights')
parser.add_argument('decode_graph', help='decoding graph')
parser.add_argument('hmms', help='phones\' hmm')
parser.add_argument('out', help='phone loop model')
def main(args, logger):
logger.debug('load the decoding graph...')
with open(args.decode_graph, 'rb') as f:
graph, start_pdf, end_pdf = pickle.load(f)
logger.debug('load the hmms...')
with open(args.hmms, 'rb') as f:
hmms, emissions = pickle.load(f)
logger.debug('compiling the graph...')
cgraph = graph.compile()
categorical = priors[args.weights_prior](len(start_pdf))
logger.debug('create the phone-loop model...')
model_cls = beer.BigramPhoneLoop if args.weights_prior in bigram_prior else beer.PhoneLoop
ploop = model_cls.create(cgraph, start_pdf, end_pdf, emissions, categorical)
logger.debug('saving the model on disk...')
with open(args.out, 'wb') as f:
pickle.dump(ploop, f)
logger.info('successfully created a phone-loop model with ' \
f'{len(start_pdf)} phones')
if __name__ == "__main__":
main()
|
py | b4051cd54c893c12469f684f6e418565abcae243 | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RemoteProcessGroupEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'revision': 'RevisionDTO',
'id': 'str',
'uri': 'str',
'position': 'PositionDTO',
'permissions': 'PermissionsDTO',
'bulletins': 'list[BulletinEntity]',
'disconnected_node_acknowledged': 'bool',
'component': 'RemoteProcessGroupDTO',
'status': 'RemoteProcessGroupStatusDTO',
'input_port_count': 'int',
'output_port_count': 'int',
'operate_permissions': 'PermissionsDTO'
}
attribute_map = {
'revision': 'revision',
'id': 'id',
'uri': 'uri',
'position': 'position',
'permissions': 'permissions',
'bulletins': 'bulletins',
'disconnected_node_acknowledged': 'disconnectedNodeAcknowledged',
'component': 'component',
'status': 'status',
'input_port_count': 'inputPortCount',
'output_port_count': 'outputPortCount',
'operate_permissions': 'operatePermissions'
}
def __init__(self, revision=None, id=None, uri=None, position=None, permissions=None, bulletins=None, disconnected_node_acknowledged=None, component=None, status=None, input_port_count=None, output_port_count=None, operate_permissions=None):
"""
RemoteProcessGroupEntity - a model defined in Swagger
"""
self._revision = None
self._id = None
self._uri = None
self._position = None
self._permissions = None
self._bulletins = None
self._disconnected_node_acknowledged = None
self._component = None
self._status = None
self._input_port_count = None
self._output_port_count = None
self._operate_permissions = None
if revision is not None:
self.revision = revision
if id is not None:
self.id = id
if uri is not None:
self.uri = uri
if position is not None:
self.position = position
if permissions is not None:
self.permissions = permissions
if bulletins is not None:
self.bulletins = bulletins
if disconnected_node_acknowledged is not None:
self.disconnected_node_acknowledged = disconnected_node_acknowledged
if component is not None:
self.component = component
if status is not None:
self.status = status
if input_port_count is not None:
self.input_port_count = input_port_count
if output_port_count is not None:
self.output_port_count = output_port_count
if operate_permissions is not None:
self.operate_permissions = operate_permissions
@property
def revision(self):
"""
Gets the revision of this RemoteProcessGroupEntity.
The revision for this request/response. The revision is required for any mutable flow requests and is included in all responses.
:return: The revision of this RemoteProcessGroupEntity.
:rtype: RevisionDTO
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this RemoteProcessGroupEntity.
The revision for this request/response. The revision is required for any mutable flow requests and is included in all responses.
:param revision: The revision of this RemoteProcessGroupEntity.
:type: RevisionDTO
"""
self._revision = revision
@property
def id(self):
"""
Gets the id of this RemoteProcessGroupEntity.
The id of the component.
:return: The id of this RemoteProcessGroupEntity.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this RemoteProcessGroupEntity.
The id of the component.
:param id: The id of this RemoteProcessGroupEntity.
:type: str
"""
self._id = id
@property
def uri(self):
"""
Gets the uri of this RemoteProcessGroupEntity.
The URI for futures requests to the component.
:return: The uri of this RemoteProcessGroupEntity.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this RemoteProcessGroupEntity.
The URI for futures requests to the component.
:param uri: The uri of this RemoteProcessGroupEntity.
:type: str
"""
self._uri = uri
@property
def position(self):
"""
Gets the position of this RemoteProcessGroupEntity.
The position of this component in the UI if applicable.
:return: The position of this RemoteProcessGroupEntity.
:rtype: PositionDTO
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this RemoteProcessGroupEntity.
The position of this component in the UI if applicable.
:param position: The position of this RemoteProcessGroupEntity.
:type: PositionDTO
"""
self._position = position
@property
def permissions(self):
"""
Gets the permissions of this RemoteProcessGroupEntity.
The permissions for this component.
:return: The permissions of this RemoteProcessGroupEntity.
:rtype: PermissionsDTO
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this RemoteProcessGroupEntity.
The permissions for this component.
:param permissions: The permissions of this RemoteProcessGroupEntity.
:type: PermissionsDTO
"""
self._permissions = permissions
@property
def bulletins(self):
"""
Gets the bulletins of this RemoteProcessGroupEntity.
The bulletins for this component.
:return: The bulletins of this RemoteProcessGroupEntity.
:rtype: list[BulletinEntity]
"""
return self._bulletins
@bulletins.setter
def bulletins(self, bulletins):
"""
Sets the bulletins of this RemoteProcessGroupEntity.
The bulletins for this component.
:param bulletins: The bulletins of this RemoteProcessGroupEntity.
:type: list[BulletinEntity]
"""
self._bulletins = bulletins
@property
def disconnected_node_acknowledged(self):
"""
Gets the disconnected_node_acknowledged of this RemoteProcessGroupEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: The disconnected_node_acknowledged of this RemoteProcessGroupEntity.
:rtype: bool
"""
return self._disconnected_node_acknowledged
@disconnected_node_acknowledged.setter
def disconnected_node_acknowledged(self, disconnected_node_acknowledged):
"""
Sets the disconnected_node_acknowledged of this RemoteProcessGroupEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:param disconnected_node_acknowledged: The disconnected_node_acknowledged of this RemoteProcessGroupEntity.
:type: bool
"""
self._disconnected_node_acknowledged = disconnected_node_acknowledged
@property
def component(self):
"""
Gets the component of this RemoteProcessGroupEntity.
:return: The component of this RemoteProcessGroupEntity.
:rtype: RemoteProcessGroupDTO
"""
return self._component
@component.setter
def component(self, component):
"""
Sets the component of this RemoteProcessGroupEntity.
:param component: The component of this RemoteProcessGroupEntity.
:type: RemoteProcessGroupDTO
"""
self._component = component
@property
def status(self):
"""
Gets the status of this RemoteProcessGroupEntity.
The status of the remote process group.
:return: The status of this RemoteProcessGroupEntity.
:rtype: RemoteProcessGroupStatusDTO
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this RemoteProcessGroupEntity.
The status of the remote process group.
:param status: The status of this RemoteProcessGroupEntity.
:type: RemoteProcessGroupStatusDTO
"""
self._status = status
@property
def input_port_count(self):
"""
Gets the input_port_count of this RemoteProcessGroupEntity.
The number of remote input ports currently available on the target.
:return: The input_port_count of this RemoteProcessGroupEntity.
:rtype: int
"""
return self._input_port_count
@input_port_count.setter
def input_port_count(self, input_port_count):
"""
Sets the input_port_count of this RemoteProcessGroupEntity.
The number of remote input ports currently available on the target.
:param input_port_count: The input_port_count of this RemoteProcessGroupEntity.
:type: int
"""
self._input_port_count = input_port_count
@property
def output_port_count(self):
"""
Gets the output_port_count of this RemoteProcessGroupEntity.
The number of remote output ports currently available on the target.
:return: The output_port_count of this RemoteProcessGroupEntity.
:rtype: int
"""
return self._output_port_count
@output_port_count.setter
def output_port_count(self, output_port_count):
"""
Sets the output_port_count of this RemoteProcessGroupEntity.
The number of remote output ports currently available on the target.
:param output_port_count: The output_port_count of this RemoteProcessGroupEntity.
:type: int
"""
self._output_port_count = output_port_count
@property
def operate_permissions(self):
"""
Gets the operate_permissions of this RemoteProcessGroupEntity.
The permissions for this component operations.
:return: The operate_permissions of this RemoteProcessGroupEntity.
:rtype: PermissionsDTO
"""
return self._operate_permissions
@operate_permissions.setter
def operate_permissions(self, operate_permissions):
"""
Sets the operate_permissions of this RemoteProcessGroupEntity.
The permissions for this component operations.
:param operate_permissions: The operate_permissions of this RemoteProcessGroupEntity.
:type: PermissionsDTO
"""
self._operate_permissions = operate_permissions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RemoteProcessGroupEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b4051d6ade5f64e35f7a6b3cb1b7efeed3f48565 | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
Example to utilize the Python Imaging Library (Pillow) and draw bitmapped text
to 8 frames and then run autoplay on those frames.
This example is for use on (Linux) computers that are using CPython with
Adafruit Blinka to support CircuitPython libraries. CircuitPython does
not support PIL/pillow (python imaging library)!
Author(s): Melissa LeBlanc-Williams for Adafruit Industries
"""
import board
from PIL import Image, ImageDraw, ImageFont
# uncomment next line if you are using Adafruit 16x9 Charlieplexed PWM LED Matrix
# from adafruit_is31fl3731.matrix import Matrix as Display
# uncomment next line if you are using Adafruit 16x8 Charlieplexed Bonnet
from adafruit_is31fl3731.charlie_bonnet import CharlieBonnet as Display
# uncomment next line if you are using Pimoroni Scroll Phat HD LED 17 x 7
# from adafruit_is31fl3731.scroll_phat_hd import ScrollPhatHD as Display
BRIGHTNESS = 32 # Brightness can be between 0-255
i2c = board.I2C()
display = Display(i2c)
display.fill(0)
# 256 Color Grayscale Mode
image = Image.new("L", (display.width, display.height))
draw = ImageDraw.Draw(image)
# Load a font in 2 different sizes.
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 10)
# Load the text in each frame
for x in range(8):
draw.rectangle((0, 0, display.width, display.height), outline=0, fill=0)
draw.text((x + 1, -2), str(x + 1), font=font, fill=BRIGHTNESS)
display.image(image, frame=x)
display.autoplay(delay=500)
|
py | b4051f85263d884a21ffd1a0ba7dc800e1d2b5f6 | """tem path subcommand"""
from . import common as cli
from . import dot
def setup_parser(parser):
"""Set up argument parser for this subcommand."""
dot.setup_common_parser(parser)
@cli.subcommand
def cmd(args):
"""Execute this subcommand."""
dot.cmd_common(args, subdir="path")
|
py | b40520b89a7636bfd813513c1eb46bd229144361 | from django.shortcuts import get_object_or_404
from rest_framework.serializers import Serializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Todo
from .serializers import TodoSerializer
# Create your views here.
class TodoList(APIView):
def get(self, request):
"""
Returns the complete list of Todos
"""
todos = Todo.objects.all()
serializer = TodoSerializer(todos, many=True)
return Response(serializer.data)
def post(self, request):
"""
Creates a new Todo
"""
# Data of the new todo coming in the request
todo = request.data
# Using information in request to generate a new todo
new_todo = Todo.objects.create(label=todo["label"], done=todo["done"])
# Saving the new todo to the database
new_todo.save()
# Serializing the data of the new todo for readability in JSON
serializer = TodoSerializer(todo)
# Returning the serialized new todo
return Response(serializer.data)
def put(self, request, *args, **kwargs):
todo_object = Todo.objects.get()
data = request.data
todo_object.label = data["label"]
todo_object.done = data["done"]
todo_object.save()
serializer = TodoSerializer((todo_object))
return Response(serializer.data)
def delete(self, position):
"""
Deletes a specific todo
"""
remove_todo = request.data
|
py | b40520e44e3c19a456deb40c0c850936caac36ef | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkXMLPImageDataReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkXMLPImageDataReader(), 'Reading vtkXMLPImageData.',
(), ('vtkXMLPImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
py | b40520ff30ec30ec5e4a6198e8048f72a46721ac | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import os
import shutil
from loguru import logger
import tensorrt as trt
import torch
from torch2trt import torch2trt
from yolox.exp import get_exp
def make_parser():
parser = argparse.ArgumentParser("YOLOX ncnn deploy")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="pls input your expriment description file",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
parser.add_argument(
"-w", '--workspace', type=int, default=29, help='max workspace size in detect'
)
parser.add_argument("-b", '--batch', type=int, default=5, help='max batch size in detect')
return parser
@logger.catch
@torch.no_grad()
def main():
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
if not args.experiment_name:
args.experiment_name = exp.exp_name
model = exp.get_model()
file_name = os.path.join(exp.output_dir, args.experiment_name)
os.makedirs(file_name, exist_ok=True)
if args.ckpt is None:
ckpt_file = os.path.join(file_name, "best_ckpt.pth")
else:
ckpt_file = args.ckpt
ckpt = torch.load(ckpt_file, map_location="cpu")
# load the model state dict
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")
model.eval()
model.cuda()
model.head.decode_in_inference = False
x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
model_trt = torch2trt(
model,
[x],
fp16_mode=True,
log_level=trt.Logger.INFO,
max_workspace_size=(1 << args.workspace),
max_batch_size=args.batch,
)
torch.save(model_trt.state_dict(), os.path.join(file_name, "model_trt.pth"))
logger.info("Converted TensorRT model done.")
engine_file = os.path.join(file_name, "model_trt.engine")
engine_file_demo = os.path.join("demo", "TensorRT", "cpp", "model_trt.engine")
with open(engine_file, "wb") as f:
f.write(model_trt.engine.serialize())
shutil.copyfile(engine_file, engine_file_demo)
logger.info("Converted TensorRT model engine file is saved for C++ inference.")
if __name__ == "__main__":
main()
|
py | b405213111344c5db0328ef12eb7469c02ea9ad6 | import numpy as np
import os
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
import curves
import torchvision
import data
from PIL import Image
def l2_regularizer(weight_decay):
def regularizer(model):
l2 = 0.0
for p in model.parameters():
l2 += torch.sqrt(torch.sum(p ** 2))
return 0.5 * weight_decay * l2
return regularizer
def cyclic_learning_rate(epoch, cycle, alpha_1, alpha_2):
def schedule(iter):
t = ((epoch % cycle) + iter) / cycle
if t < 0.5:
return alpha_1 * (1.0 - 2.0 * t) + alpha_2 * 2.0 * t
else:
return alpha_1 * (2.0 * t - 1.0) + alpha_2 * (2.0 - 2.0 * t)
return schedule
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_checkpoint(dir, epoch, name='checkpoint', **kwargs):
state = {
'epoch': epoch,
}
state.update(kwargs)
filepath = os.path.join(dir, '%s-%d.pt' % (name, epoch))
torch.save(state, filepath)
def train(train_loader, model, optimizer, criterion, regularizer=None, lr_schedule=None):
loss_sum = 0.0
correct = 0.0
num_iters = len(train_loader)
model.train()
for iter, (input, target) in enumerate(train_loader):
if lr_schedule is not None:
lr = lr_schedule(iter / num_iters)
adjust_learning_rate(optimizer, lr)
input = input.cuda(async=True)
target = target.cuda(async=True)
output = model(input)
loss = criterion(output, target)
if regularizer is not None:
loss += regularizer(model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
aa = len(train_loader.dataset)
bb = loss_sum
return {
'loss': loss_sum / len(train_loader.dataset),
'accuracy': correct * 100.0 / len(train_loader.dataset),
}
def test_poison(testset, model, criterion, regularizer=None, **kwargs):
loss_sum = 0.0
nll_sum = 0.0
correct = 0.0
model.eval()
x_raw = testset.test_data
y_raw = testset.test_labels
y_raw = np.array(y_raw)
perc_poison = 0.5
num_test = np.shape(testset.test_data)[0]
num_poison = round(perc_poison * num_test)
random_selection_indices = np.random.choice(num_test, num_poison, replace=False)
x_raw = x_raw[random_selection_indices]
y_raw = np.array(y_raw)
y_raw = y_raw[random_selection_indices]
(is_poison_train, x_poisoned_raw, y_poisoned_raw) = data.generate_backdoor_untargeted_true(x_raw, y_raw, 1.0)
i0 = Image.fromarray(x_poisoned_raw[0])
# i0.save("11.png")
num_poison = x_poisoned_raw.shape[0]
inputs = torch.from_numpy(x_poisoned_raw).float()
target = torch.from_numpy(y_poisoned_raw).long()
inputs = inputs.cuda(async=True)
target = target.cuda(async=True)
input = inputs.permute(0, 3, 1, 2)
# aa = input1[0,:,:,1]
# bb = input[0,1,:,:]
input = input / 255
output = model(input, **kwargs)
nll = criterion(output, target)
loss = nll.clone()
if regularizer is not None:
loss += regularizer(model)
nll_sum += nll.item() * input.size(0)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
'nll': nll_sum / num_poison,
'loss': loss_sum / num_poison,
'accuracy': correct * 100.0 / num_poison,
}
def test(test_loader, model, criterion, regularizer=None, **kwargs):
loss_sum = 0.0
nll_sum = 0.0
correct = 0.0
model.eval()
for input, target in test_loader:
input = input.cuda(async=True)
target = target.cuda(async=True)
output = model(input, **kwargs)
nll = criterion(output, target)
loss = nll.clone()
if regularizer is not None:
loss += regularizer(model)
nll_sum += nll.item() * input.size(0)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
'nll': nll_sum / len(test_loader.dataset),
'loss': loss_sum / len(test_loader.dataset),
'accuracy': correct * 100.0 / len(test_loader.dataset),
}
def predictions(test_loader, model, **kwargs):
model.eval()
preds = []
targets = []
for input, target in test_loader:
input = input.cuda(async=True)
output = model(input, **kwargs)
probs = F.softmax(output, dim=1)
preds.append(probs.cpu().data.numpy())
targets.append(target.numpy())
return np.vstack(preds), np.concatenate(targets)
def isbatchnorm(module):
return issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm) or \
issubclass(module.__class__, curves._BatchNorm)
def _check_bn(module, flag):
if isbatchnorm(module):
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
if isbatchnorm(module):
module.reset_running_stats()
def _get_momenta(module, momenta):
if isbatchnorm(module):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if isbatchnorm(module):
module.momentum = momenta[module]
def update_bn(loader, model, **kwargs):
if not check_bn(model):
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
num_samples = 0
for input, _ in loader:
input = input.cuda(async=True)
batch_size = input.data.size(0)
momentum = batch_size / (num_samples + batch_size)
for module in momenta.keys():
module.momentum = momentum
model(input, **kwargs)
num_samples += batch_size
model.apply(lambda module: _set_momenta(module, momenta))
|
py | b4052253b23a298f31bcc105fdb153f1ee34eeed | from __future__ import print_function
import argparse
import os
import pandas as pd
import datetime
now = datetime.datetime.now()
## TODO: Import any additional libraries you need to define a model
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, RandomForestClassifier
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score, confusion_matrix, fbeta_score
from sklearn.metrics import f1_score, make_scorer, mean_absolute_error, mean_squared_error
from sklearn.metrics import precision_score, recall_score, r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer
from sklearn.preprocessing import PowerTransformer, QuantileTransformer
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# Provided model load function
def model_fn(model_dir):
"""Load model from the model_dir. This is the same model that is saved
in the main if statement.
"""
print("Loading model.")
# load using joblib
model = joblib.load(os.path.join(model_dir, "model.joblib"))
print("Done loading model.")
return model
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
## TODO: Add any additional arguments that you will need to pass into your model
# args holds all passed-in arguments
args = parser.parse_args()
# Read in csv training file
training_dir = args.data_dir
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
# Labels are in the first column
train_y = train_data.iloc[:,0]
train_x = train_data.iloc[:,1:]
## --- Your code here --- ##
## TODO: Define a model
## TODO: Train the model
random_state = 42
'''
AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=0.1,
n_estimators=10, random_state=42)
accuracy: 0.9590686274509805
BaggingClassifier(base_estimator=None, bootstrap=True, bootstrap_features=False,
max_features=3, max_samples=1.0, n_estimators=15, n_jobs=None,
oob_score=False, random_state=42, verbose=0,
warm_start=False)
accuracy: 0.9653186274509805
DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='gini',
max_depth=None, max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort=False,
random_state=42, splitter='best')
accuracy: 0.9340686274509805
KNeighborsClassifier(algorithm='auto', leaf_size=5, metric='minkowski',
metric_params=None, n_jobs=None, n_neighbors=5, p=2,
weights='uniform')
accuracy: 0.9590686274509805
RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=2, max_features='auto',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=30,
n_jobs=None, oob_score=False, random_state=42, verbose=0,
warm_start=False)
accuracy: 0.9653186274509805
SVC(C=100, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.6, kernel='rbf',
max_iter=-1, probability=False, random_state=42, shrinking=True, tol=0.001,
verbose=False)
accuracy: 0.9757352941176471
'''
svc = SVC(C=1.0, kernel='rbf', degree=3, gamma='auto_deprecated', coef0=0.0, shrinking=True,
probability=False, tol=0.001,cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape='ovr', random_state=random_state)
parameters = {'C': [1,10,100,1000],'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}
scoring = make_scorer(fbeta_score, beta=0.5)
grid_search = GridSearchCV(estimator=svc, param_grid = parameters, scoring = scoring,cv=10)
grid_search = grid_search.fit(train_x, train_y)
best_svc = grid_search.best_estimator_
model = best_svc
## --- End of your code --- ##
# Save the trained model
joblib.dump(model, os.path.join(args.model_dir, "model.joblib")) |
py | b4052256118189307013a372a18bf197588df126 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.update import BasicUpdateBlock
from model.extractor import BasicEncoder
from model.corr import CorrBlock
from model.utils import coords_grid, upflow8
from argparse import Namespace
from utils.image_utils import ImagePadder
from model.gam import Aggregate, Attention
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def get_args():
# This is an adapter function that converts the arguments given in out config file to the format, which the ERAFT
# expects.
args = Namespace(small=False,
dropout=False,
mixed_precision=False,
clip=1.0)
return args
class ERAFT(nn.Module):
def __init__(self, args, n_first_channels):
# args:
super(ERAFT, self).__init__()
# args = get_args()
self.args = args
self.image_padder = ImagePadder(min_size=32)
self.subtype = self.args.type
assert (self.subtype == 'standard' or self.subtype == 'warm_start')
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
'''
args.corr_levels = 4
args.corr_radius = 4
'''
# feature network, context network, and update block
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=0,
n_first_channels=n_first_channels)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=0,
n_first_channels=n_first_channels)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
self.att = Attention(args=self.args, dim=cdim, max_pos_size=160, heads=self.args.num_heads, dim_head=cdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8).to(img.device)
coords1 = coords_grid(N, H//8, W//8).to(img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True):
"""
Estimate optical flow between a pair of frames
image1:old voxel image2:new voxel(the key)
"""
# Pad Image (for flawless up&downsampling)
image1 = self.image_padder.pad(image1)
image2 = self.image_padder.pad(image2)
image1 = image1.contiguous() # 深拷贝
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
if (self.subtype == 'standard' or self.subtype == 'warm_start'):
cnet = self.cnet(image2)
else:
raise Exception
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
attention = self.att(inp)
# Initialize Grids. First channel: x, 2nd channel: y. Image is just used to get the shape
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow, attention)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(self.image_padder.unpad(flow_up))
return coords1 - coords0, flow_predictions
|
py | b40523b948598df3fd73df59ad569f4dcacc04ca |
import os.path as path
import re
import os
import pickle
import requests
from multiprocessing import Pool
import spacy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from gensim.models import Word2Vec, KeyedVectors
from ._choose_tokenizer import choose_tokenizer
from ._vocab_tokenizer import VocabTokenizer
from ._single_sequence_dataset import SingleSequenceDataset
class _Normalizer(VocabTokenizer):
def __init__(self):
super().__init__()
self._tokenizer = spacy.load('en_core_web_sm',
disable=['parser', 'tagger', 'ner', 'lemmatizer'])
def normalize(self, sentence):
sentence = re.sub(r'\[\s*\*\s*\*(.*?)\*\s*\*\s*\]', ' <DE> ', sentence)
sentence = re.sub(r'([^a-zA-Z0-9])(\s*\1\s*)+', r'\1 ', sentence)
sentence = re.sub(r'\s+', ' ', sentence.strip())
sentence = [t.text.lower() for t in self._tokenizer(sentence)]
sentence = [
self.digits_token if any(char.isdigit() for char in word) else word
for word in sentence
]
return ' '.join(sentence)
class MimicTokenizer(VocabTokenizer):
def __init__(self):
super().__init__(min_df=5)
def tokenize(self, sentence):
return sentence.split(' ')
class MimicDataset(SingleSequenceDataset):
"""Loads the Diabetes or Anemia dataset from MIMIC (III)
Note that there are special min-max lengths, see:
https://github.com/successar/AttentionExplanation/blob/master/Trainers/DatasetBC.py#L113
"""
def __init__(self, cachedir, mimicdir, model_type, subset='diabetes', batch_size=32, **kwargs):
"""Creates an MIMIC dataset instance
Args:
cachedir (str): Directory to use for caching the compiled dataset.
mimicdir (str): Directory where the mimic source files are found.
subset (diabetes', 'anemia'): Which subset to use.
seed (int): Seed used for shuffling the dataset.
batch_size (int, optional): The batch size used in the data loader. Defaults to 32.
num_workers (int, optional): The number of pytorch workers in the data loader. Defaults to 4.
"""
if subset not in ['diabetes', 'anemia']:
raise ValueError('subset must be either "diabetes" or "anemia"')
tokenizer = choose_tokenizer(cachedir, model_type, MimicTokenizer)
super().__init__(cachedir, f'mimic-{subset[0]}', model_type, tokenizer, batch_size=batch_size, **kwargs)
self._mimicdir = path.realpath(mimicdir)
self._subset = subset
self.label_names = ['negative', 'positive']
def embedding(self):
"""Creates word embedding matrix.
Returns:
np.array: shape = (vocabulary, 300)
"""
if self.model_type != 'rnn':
return None
lookup = KeyedVectors.load(f'{self._cachedir}/embeddings/mimic.wv')
embeddings = []
for word in self.tokenizer.ids_to_token:
if word == self.tokenizer.pad_token:
embeddings.append(np.zeros(300))
elif word == self.tokenizer.digits_token:
embeddings.append(lookup[word])
elif word in set(self.tokenizer.special_symbols) or word not in lookup:
embeddings.append(self._np_rng.randn(300))
else:
embeddings.append(lookup[word])
return np.vstack(embeddings)
def prepare_data(self):
"""Compile and cache the dataset.
"""
# Short-circuit the build logic if the minimum-required files exists
if (path.exists(f'{self._cachedir}/embeddings/mimic.wv') and
path.exists(f'{self._cachedir}/vocab/mimic-a.vocab') and
path.exists(f'{self._cachedir}/vocab/mimic-d.vocab') and
path.exists(f'{self._cachedir}/encoded/mimic-a_{self.model_type}.pkl') and
path.exists(f'{self._cachedir}/encoded/mimic-d_{self.model_type}.pkl')):
self.tokenizer.from_file(f'{self._cachedir}/vocab/{self.name}.vocab')
return
# Ensure that confidential files exists
if not path.exists(f'{self._mimicdir}/DIAGNOSES_ICD.csv.gz'):
raise IOError(f'The file "{self._mimicdir}/DIAGNOSES_ICD.csv.gz" is missing')
if not path.exists(f'{self._mimicdir}/NOTEEVENTS.csv.gz'):
raise IOError(f'The file "{self._mimicdir}/NOTEEVENTS.csv.gz" is missing')
# Download splitfiles
os.makedirs(f'{self._cachedir}/mimic-dataset/hadm_ids', exist_ok=True)
for split in ['train', 'dev', 'test']:
if not path.exists(f'{self._cachedir}/mimic-dataset/hadm_ids/{split}_50_hadm_ids.csv'):
with open(f'{self._cachedir}/mimic-dataset/hadm_ids/{split}_50_hadm_ids.csv', 'wb') as fp:
download_url = ('https://raw.githubusercontent.com/successar/AttentionExplanation'
f'/master/preprocess/MIMIC/{split}_50_hadm_ids.csv')
fp.write(requests.get(download_url).content)
# Build merged and normalized datafile
df_merged = None
if not path.exists(f'{self._cachedir}/mimic-dataset/merged.csv.gz'):
# Filter and collect ICD9-codes for each subject+HADM
df_icd9_codes = pd.read_csv(f'{self._mimicdir}/DIAGNOSES_ICD.csv.gz', compression='gzip',
usecols=['SUBJECT_ID', 'HADM_ID', 'ICD9_CODE'])
df_icd9_codes.dropna(inplace=True)
df_icd9_codes = df_icd9_codes.groupby(['SUBJECT_ID', 'HADM_ID'], as_index=False).agg({
'ICD9_CODE': lambda codes: ';'.join(code[:3] + '.' + code[3:] for code in codes)
})
# Filter and collect discharge summaries
print('Reading MIMIC CSV file')
df_notes = pd.read_csv(f'{self._mimicdir}/NOTEEVENTS.csv.gz', compression='gzip',
usecols=['SUBJECT_ID', 'HADM_ID', 'CATEGORY', 'CHARTDATE', 'DESCRIPTION', 'TEXT'])
df_notes.dropna(inplace=True)
df_notes = df_notes[df_notes['CATEGORY'] == 'Discharge summary']
df_notes.replace({'DESCRIPTION': {'Report' : 0, 'Addendum' : 1}}, inplace=True)
df_notes.sort_values(by=['DESCRIPTION', 'CHARTDATE'], inplace=True)
df_notes = df_notes.groupby(['SUBJECT_ID', 'HADM_ID'], as_index=False).agg({
'TEXT': lambda texts: " ".join(texts).strip()
})
# Merge tables
print('Merging MIMIC')
df_merged = df_notes.merge(df_icd9_codes, on=['SUBJECT_ID', 'HADM_ID'])
# Clean data
print('Cleaning MIMIC')
with Pool(processes=max(1, self._num_workers)) as p:
df_merged['TEXT'] = p.map(_Normalizer().normalize, df_merged['TEXT'])
df_merged.to_csv(f'{self._cachedir}/mimic-dataset/merged.csv.gz', compression='gzip', index=False)
# Build embedding model
os.makedirs(f'{self._cachedir}/embeddings', exist_ok=True)
if not path.exists(f'{self._cachedir}/embeddings/mimic.wv'):
print('building embedding model')
if df_merged is None:
df_merged = pd.read_csv(f'{self._cachedir}/mimic-dataset/merged.csv.gz', compression='gzip')
embedding = Word2Vec(map(lambda x: x.split(' '), df_merged['TEXT']),
vector_size=300, window=10, min_count=2,
workers=max(1, self._num_workers))
embedding.wv.save(f'{self._cachedir}/embeddings/mimic.wv')
# Build anemia dataset
os.makedirs(f'{self._cachedir}/encoded', exist_ok=True)
if not path.exists(f'{self._cachedir}/encoded/mimic-a_{self.model_type}.pkl'):
print('building anemia dataset')
if df_merged is None:
df_merged = pd.read_csv(f'{self._cachedir}/mimic-dataset/merged.csv.gz', compression='gzip')
# Filter data and assign target
codes = df_merged['ICD9_CODE'].str.split(';')
has_c1 = codes.apply(lambda x: any(code.startswith('285.1') for code in x))
has_c2 = codes.apply(lambda x: any(code.startswith('285.2') for code in x))
df_anemia = df_merged.loc[has_c1 ^ has_c2, :]
df_anemia = df_anemia.assign(
target = has_c1[has_c1 ^ has_c2].astype('int64')
)
# Split data
all_idx = list(range(len(df_anemia)))
train_idx, test_idx = train_test_split(all_idx, stratify=df_anemia.loc[:, 'target'],
test_size=0.2, random_state=12939)
train_idx, val_idx = train_test_split(train_idx, stratify=df_anemia.loc[:, 'target'].iloc[train_idx],
test_size=0.15, random_state=13448)
# Build vocabulary
os.makedirs(f'{self._cachedir}/vocab', exist_ok=True)
tokenizer_anemia = choose_tokenizer(self._cachedir, self.model_type, MimicTokenizer)
tokenizer_anemia.from_iterable(df_anemia.iloc[train_idx, :].loc[:, 'TEXT'])
tokenizer_anemia.to_file(f'{self._cachedir}/vocab/mimic-a.vocab')
# Encode dataset
data_anemia = {}
for name, split in [('train', train_idx), ('val', val_idx), ('test', test_idx)]:
observations = []
for idx in split:
token_idx = tokenizer_anemia.encode(df_anemia.loc[:, 'TEXT'].iat[idx])
if len(token_idx) - 2 > 4000:
continue
observations.append({
'sentence': token_idx,
'label': df_anemia.loc[:, 'target'].iat[idx],
'index': idx
})
data_anemia[name] = observations
# Save dataset
with open(f'{self._cachedir}/encoded/mimic-a_{self.model_type}.pkl', 'wb') as fp:
pickle.dump(data_anemia, fp)
# Build diabetes dataset
os.makedirs(f'{self._cachedir}/encoded', exist_ok=True)
if not path.exists(f'{self._cachedir}/encoded/mimic-d_{self.model_type}.pkl'):
print('building diabetes dataset')
if df_merged is None:
df_merged = pd.read_csv(f'{self._cachedir}/mimic-dataset/merged.csv.gz', compression='gzip')
# Load predefied hadm_ids
hadm_ids = {}
for split in ['train', 'dev', 'test']:
hadm_ids_df = pd.read_csv(f'{self._cachedir}/mimic-dataset/hadm_ids/{split}_50_hadm_ids.csv', header=None)
hadm_ids[split] = list(hadm_ids_df[0])
hadm_ids_all = hadm_ids['train'] + hadm_ids['dev'] + hadm_ids['test']
# Filter data and assign target
df_diabetes = df_merged.loc[df_merged['HADM_ID'].isin(hadm_ids_all), :]
codes = df_diabetes['ICD9_CODE'].str.split(';')
has_d1 = codes.apply(lambda x: any(code.startswith('250.00') for code in x))
df_diabetes = df_diabetes.assign(
target = has_d1.astype('int64'),
index = np.arange(len(df_diabetes))
)
# Build vocabulary
os.makedirs(f'{self._cachedir}/vocab', exist_ok=True)
tokenizer_diabetes = choose_tokenizer(self._cachedir, self.model_type, MimicTokenizer)
tokenizer_diabetes.from_iterable(df_diabetes.loc[df_diabetes['HADM_ID'].isin(hadm_ids['train']), 'TEXT'])
tokenizer_diabetes.to_file(f'{self._cachedir}/vocab/mimic-d.vocab')
# Encode dataset
data_diabetes = {}
for name, split in [('train', hadm_ids['train']), ('val', hadm_ids['dev']), ('test', hadm_ids['test'])]:
df_split = df_diabetes.loc[df_diabetes['HADM_ID'].isin(split), :]
observations = []
for idx in range(len(df_split)):
token_idx = tokenizer_diabetes.encode(df_split.loc[:, 'TEXT'].iat[idx])
if 6 > len(token_idx) - 2 > 4000:
continue
observations.append({
'sentence': token_idx,
'label': df_split.loc[:, 'target'].iat[idx],
'index': df_split.loc[:, 'index'].iat[idx]
})
data_diabetes[name] = observations
# Save dataset
with open(f'{self._cachedir}/encoded/mimic-d_{self.model_type}.pkl', 'wb') as fp:
pickle.dump(data_diabetes, fp)
# Load relevant vocabulary
self.tokenizer.from_file(f'{self._cachedir}/vocab/{self.name}.vocab')
|
py | b4052622db8898af545a5213e6138ffc5e814872 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import optparse
from proton.handlers import MessagingHandler
from proton.reactor import Container
# helper function
def get_options():
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option("-u", "--url", default="localhost:5672",
help="amqp message broker host url (default %default)")
parser.add_option("-a", "--address", default="examples",
help="node address from which messages are received (default %default)")
parser.add_option("-m", "--messages", type="int", default=100,
help="number of messages to receive; 0 receives indefinitely (default %default)")
parser.add_option("-o", "--username", default=None,
help="username for authentication (default %default)")
parser.add_option("-p", "--password", default=None,
help="password for authentication (default %default)")
opts, args = parser.parse_args()
return opts
"""
Proton event handler class
Creates an amqp connection using ANONYMOUS or PLAIN authentication.
Then attaches a receiver link to conusme messages from the broker.
"""
class Recv(MessagingHandler):
def __init__(self, url, address, count, username, password):
super(Recv, self).__init__()
# amqp broker host url
self.url = url
# amqp node address
self.address = address
# authentication credentials
self.username = username
self.password = password
# messaging counters
self.expected = count
self.received = 0
def on_start(self, event):
# select authentication options for connection
if self.username:
# basic username and password authentication
conn = event.container.connect(url=self.url,
user=self.username,
password=self.password,
allow_insecure_mechs=True)
else:
# Anonymous authentication
conn = event.container.connect(url=self.url)
# create receiver link to consume messages
if conn:
event.container.create_receiver(conn, source=self.address)
def on_message(self, event):
if event.message.id and event.message.id < self.received:
# ignore duplicate message
return
if self.expected == 0 or self.received < self.expected:
print(event.message.body)
self.received += 1
if self.received == self.expected:
print('received all', self.expected, 'messages')
event.receiver.close()
event.connection.close()
# the on_transport_error event catches socket and authentication failures
def on_transport_error(self, event):
print("Transport error:", event.transport.condition)
MessagingHandler.on_transport_error(self, event)
def on_disconnected(self, event):
print("Disconnected")
# parse arguments and get options
opts = get_options()
"""
The amqp address can be a topic or a queue.
Do not use a prefix or use 'queue://' in the amqp address for
the amqp receiver source address to receiver messages from a queue.
"""
try:
Container(Recv(opts.url, opts.address, opts.messages, opts.username, opts.password)).run()
except KeyboardInterrupt: pass
|
py | b40526c6de78083c8b03da7eb614bb3f9953f81d | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
)
from transformers.utils import logging
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from transformers import Trainer
class DummyDataset(Dataset):
def __init__(self, length: int = 101):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
return i
class DummyDataCollator:
def __call__(self, features):
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
self.fc = nn.Linear(120, 80)
def forward(self, input_ids, labels=None):
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class TestTrainerDistributed(TestCasePlus):
@require_torch_multi_gpu
def test_trainer(self):
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
output_dir = self.get_auto_remove_tmp_dir()
args = f"--output_dir {output_dir}".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.launch --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.local_rank != -1}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
dataset = DummyDataset(dataset_length)
def compute_metrics(p: EvalPrediction) -> Dict:
sequential = list(range(len(dataset)))
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}"
)
return {"success": success}
trainer = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = 2
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = None
|
py | b40527a73ca9223f7055f957aa3ca588c3ff1e78 | # coding=utf-8 Copyright 2020 Google LLC Licensed under the Apache License,
# Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
'''Command line application to output all POI in an area of the OSM.
Example:
$ bazel-bin/cabby/geo/map_processing/map_processor --region "DC" \
--min_s2_level 18 --directory "./cabby/geo/map_processing/poiTestData/"
'''
from absl import app
from absl import flags
from absl import logging
import osmnx as ox
from shapely.geometry.point import Point
from cabby.geo import regions
from cabby.geo.map_processing import map_structure
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"region", None, regions.SUPPORTED_REGION_NAMES,
regions.REGION_SUPPORT_MESSAGE)
flags.DEFINE_integer("min_s2_level", None, "Minimum S2 level of the map.")
flags.DEFINE_string("directory", None,
"The directory where the files will be saved to")
# Required flags.
flags.mark_flag_as_required("region")
flags.mark_flag_as_required("min_s2_level")
def main(argv):
del argv # Unused.
logging.info(
"Starting to build map of {} at level {}.".format(FLAGS.region, FLAGS.min_s2_level))
map = map_structure.Map(regions.get_region(FLAGS.region), FLAGS.min_s2_level)
logging.info(
"Created map of {} at level {}.".format(FLAGS.region, FLAGS.min_s2_level))
if FLAGS.directory is not None:
# Write to disk.
map.write_map(FLAGS.directory)
logging.info("Map written to => {}".format(FLAGS.directory))
# Load from disk.
map_new = map_structure.Map(
regions.get_region(FLAGS.region), FLAGS.min_s2_level, FLAGS.directory)
logging.info('Number of POI found: {0}'.format(map_new.poi.shape[0]))
if __name__ == '__main__':
app.run(main) |
py | b40529ff27b774c6b4c2aa20fbd4b7f7bc521658 | import os
from Jumpscale import j
from parameterized import parameterized
from unittest import TestCase
locations = []
for root, dirs, files in os.walk(
j.core.tools.text_replace("{DIR_BASE}/code/github/threefoldtech/jumpscaleX_core/JumpscaleCore")
):
for file in files:
if file.endswith(".py"):
file_path = os.path.join(root, file)
with open(file_path, "r") as f:
content = f.read()
if "__jslocation__ =" in content:
jslocation = content.find("__jslocation__")
location = content[content.find("=", jslocation) + 1 : content.find("\n", jslocation)]
locations.append(location.strip().strip("'").strip('"'))
class FullCoreTests(TestCase):
@parameterized.expand(locations)
def test(self, location):
module = eval(location)
if "test" in dir(module):
test = module.__getattribute__("test")
test()
else:
self.skipTest(f"{location} doesn't has test")
class CoreTests(TestCase):
@parameterized.expand(
[
"j.data.bcdb.test()",
"j.data.capnp.test()",
# "j.data.nacl.test()",
"j.data.schema.test()",
"j.data.types.test()",
"j.clients.sshkey.test()",
"j.clients.sshagent.test()",
"j.clients.ssh.test()",
"j.sal.bcdbfs.test()",
"j.tools.threebot.packages.test()",
"j.tools.syncer.test()",
"j.tools.executor.test()",
"j.tools.time.test()",
"j.tools.formatters.test()",
"j.tools.threegit.test()",
"j.tools.logger.test()",
"j.tools.jinja2.test()",
"j.tools.restic.test()",
"j.clients.redis.test()",
"j.clients.tfgrid_registry.test()",
"j.clients.sqlitedb.test()",
"j.clients.currencylayer.test()",
"j.clients.sonic.test()",
"j.clients.rdb.test()",
"j.clients.redis_config.test()",
"j.clients.threebot.test()",
"j.clients.tcp_router.test()",
"j.clients.zdb.test()",
"j.servers.gedis.test()",
"j.servers.threebot.test()",
"j.servers.openresty.test()",
"j.servers.myjobs.test()",
"j.servers.sonic.test()",
"j.servers.corex.test()",
"j.servers.tmux.test()",
"j.servers.zdb.test()",
]
)
def test(self, cmd):
if cmd == "j.clients.zdb.test()":
self.skipTest("This test shouldn't be run on CI, as it depends on machine speed (performance test)")
else:
eval(cmd)
|
py | b4052bbdd3e307bc0280fc0384672b3b4ead7881 | #encoding=utf-8
import numpy as np
class HyperVolume:
'''
计算超体积值
目标值需进行归一化处理
'''
def __init__(self,object_number):
'''
:param object_number int 目标数
'''
self.__object_number = object_number
def get_hyperVolume(self,solutions,refer_point = [1.1,1.1]):
'''
计算超体积值
:param solutions list 非支配解集,是解的目标函数值列表,形式如:[[object1,object2],[object1,object2],...]
:param refer_point list 参考点,要被solutions内所有点支配,默认为[1.2,1.2]
'''
refer_point = np.array(refer_point)
solutions = np.array(solutions)
solutions = solutions[solutions[:,0].argsort(kind="mergesort")[::-1]] # 按第一个目标降序排序
volume = 0
for i in solutions:
volume += abs(i[0] - refer_point[0])*abs(i[1] - refer_point[1])
refer_point[0] -= refer_point[0] - i[0]
return volume |
py | b4052d421e0f863d5e377fed19d4d399ebf3f566 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="recipe_parser", # Replace with your own username
version="0.1",
author="Luis Rita",
author_email="[email protected]",
description="Retrieves ingredients, quantities and units from any recipe.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/warcraft12321/HyperFoods",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[],
python_requires='>=3.6',
license='MIT'
)
|
py | b4052d4d70e111675157ed8a11549301b8b192e5 | from enum import Enum
import numpy as np
from scipy.optimize import minimize
from oddt import random_seed
from oddt.docking.MCMCUtils import MCMCUtils
from oddt.docking.internal import generate_rotor_vector
class OptimizationMethod(Enum):
"""
method of optimization (one of SIMPLEX/NO_OPTIM, NELDER_MEAD or LBFGSB)
"""
SIMPLEX = 1
NO_OPTIM = 1
NELDER_MEAD = 2
LBFGSB = 3
class MCMCAlgorithm(object):
def __init__(self, engine, optim=OptimizationMethod.NELDER_MEAD, optim_iter=10, mc_steps=50,
mut_steps=100, seed=None, ):
"""
Parameters
----------
engine: CustomEngine
engine with prepared molecules and defined scoring function
optim: OptimizationMethod
method of optimization (or SIMPLEX/NO_OPTIM if none) around locally chosen conformation point,
must be one of the values in OptimizationMethod enumeration
optim_iter: int (default=10)
number of iterations for local optimization, in the scipy.optimize.minimize
mc_steps: int (default=50)
number of steps performed by the MCMC algorithm
mut_steps: int (100)
number of mutation steps (while choosing next random conformation)
seed: int
seed for the pseudonumber generators
"""
self.engine = engine
self.ligand = engine.lig
self.optim = optim
self.optim_iter = optim_iter
self.mc_steps = mc_steps
self.mut_steps = mut_steps
if seed:
random_seed(seed)
self.num_rotors = len(self.engine.rotors)
self.lig_dict = self.engine.lig_dict
self.mcmc_utils = MCMCUtils()
def perform(self):
"""
performs the algorithm
Returns
-------
conformation, score: float[], float
best conformation and best score for this conformation
"""
x1 = generate_rotor_vector(self.num_rotors)
c1 = self.engine.lig.mutate(x1)
e1 = self.engine.score(c1)
out = {'score': e1, 'conformation': c1.copy().tolist()}
for _ in range(self.mc_steps):
c2, x2 = self.generate_conformation(x1)
e2 = self.engine.score(c2)
e3, x3 = self._optimize(e2, x2)
delta = e3 - e1
if delta < 0 or np.exp(-delta) > np.random.uniform(): # Metropolis criterion
x1 = x3
if delta < 0:
e1 = e3
conformation = self.engine.lig.mutate(x1.copy())
out = {'score': e1, 'conformation': conformation.tolist()}
return out['conformation'], out['score']
def _optimize(self, e2, x2):
bounds = ((-1., 1.), (-1., 1.), (-1., 1.), (-np.pi, np.pi), (-np.pi, np.pi), (-np.pi, np.pi))
for i in range(len(self.engine.rotors)):
bounds += ((-np.pi, np.pi),)
bounds = np.array(bounds)
if self.optim == OptimizationMethod.SIMPLEX:
return e2, x2
elif self.optim == OptimizationMethod.NELDER_MEAD:
return self._minimize_nelder_mead(x2)
elif self.optim == OptimizationMethod.LBFGSB:
return self._minimize_lbfgsb(bounds, x2)
return e2, x2
def _minimize_nelder_mead(self, x2):
m = minimize(self.mcmc_utils.score_coords, x2, args=(self.engine, self.engine.score),
method='Nelder-Mead')
e3, x3 = self._extract_from_scipy_minimize(m)
return e3, x3
def _extract_from_scipy_minimize(self, m):
x3 = m.x
x3 = self.mcmc_utils.keep_bound(x3)
e3 = m.fun
return e3, x3
def _minimize_lbfgsb(self, bounds, x2):
m = minimize(self.mcmc_utils.score_coords, x2, method='L-BFGS-B',
jac=self.mcmc_utils.score_coords_jac,
args=(self.engine, self.engine.score), bounds=bounds, options={'maxiter': self.optim_iter})
e3, x3 = self._extract_from_scipy_minimize(m)
return e3, x3
def generate_conformation(self, x1):
for _ in range(self.mut_steps):
x2 = self.mcmc_utils.rand_mutate_big(x1.copy())
c2 = self.engine.lig.mutate(x2)
return c2, x2
|
py | b4052e86486948853be6d677009426c255c361f6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# {project.authors}
# {project.affiliations}
# (c) {project.span} all rights reserved
#
"""
Sanity check: verify that the extension from the {{{project.name}}} package is accessible
"""
def test():
# access the {{{project.name}}} extension
from {project.name} import lib{project.name}
# all done
return
# main
if __name__ == "__main__":
# do...
test()
# end of file
|
py | b4052fcd2e996c7f02458b6754dfa6dd52635a94 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import tensorflow.compat.v1 as tf
from models import resnet_model
class ResNetModelTest(tf.test.TestCase):
def testGetScaledBaseLearningRateOneGpuLrFromParams(self):
"""Verifies setting params.resnet_base_lr pipes through."""
lr = self._get_scaled_base_learning_rate(1,
'parameter_server',
256,
base_lr=.050)
self.assertEqual(lr, .050)
def testGetScaledBaseLearningRateOneGpu(self):
lr = self._get_scaled_base_learning_rate(1, 'parameter_server', 128)
self.assertEqual(lr, .064)
def testGetScaledBaseLearningRateEightGpuReplicated(self):
lr = self._get_scaled_base_learning_rate(8, 'replicated', 256 * 8)
self.assertEqual(lr, .128)
def testGetScaledBaseLearningRateTwoGpuParameter(self):
lr = self._get_scaled_base_learning_rate(2, 'parameter_server', 256 * 2)
self.assertEqual(lr, .256)
def testGetScaledBaseLearningRateTwoGpuUneven(self):
lr = self._get_scaled_base_learning_rate(2, 'replicated', 13)
self.assertEqual(lr, 0.0032500000000000003)
def _get_scaled_base_learning_rate(self,
num_gpus,
variable_update,
batch_size,
base_lr=None):
"""Simplifies testing different learning rate calculations.
Args:
num_gpus: Number of GPUs to be used.
variable_update: Type of variable update used.
batch_size: Total batch size.
base_lr: Base learning rate before scaling.
Returns:
Base learning rate that would be used to create lr schedule.
"""
params = mock.Mock()
params.num_gpus = num_gpus
params.variable_update = variable_update
if base_lr:
params.resnet_base_lr = base_lr
resnet50_model = resnet_model.ResnetModel('resnet50', 50, params=params)
return resnet50_model.get_scaled_base_learning_rate(batch_size)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
py | b405300d19fb463d3e7349fbf24cf402d49a0a75 | from unittest import TestCase
from binascii import unhexlify
from lbry.schema import Claim
class TestOldJSONSchemaCompatibility(TestCase):
def test_old_json_schema_v1(self):
claim = Claim.from_bytes(
b'{"fee": {"LBC": {"amount": 1.0, "address": "bPwGA9h7uijoy5uAvzVPQw9QyLoYZehHJo"}}, "d'
b'escription": "10MB test file to measure download speed on Lbry p2p-network.", "licens'
b'e": "None", "author": "root", "language": "English", "title": "10MB speed test file",'
b' "sources": {"lbry_sd_hash": "bbd1f68374ff9a1044a90d7dd578ce41979211c386caf19e6f49653'
b'6db5f2c96b58fe2c7a6677b331419a117873b539f"}, "content-type": "application/octet-strea'
b'm", "thumbnail": "/home/robert/lbry/speed.jpg"}'
)
stream = claim.stream
self.assertEqual(stream.title, '10MB speed test file')
self.assertEqual(stream.description, '10MB test file to measure download speed on Lbry p2p-network.')
self.assertEqual(stream.license, 'None')
self.assertEqual(stream.author, 'root')
self.assertEqual(stream.langtags, ['en'])
self.assertEqual(stream.source.media_type, 'application/octet-stream')
self.assertEqual(stream.thumbnail.url, '/home/robert/lbry/speed.jpg')
self.assertEqual(
stream.source.sd_hash,
'bbd1f68374ff9a1044a90d7dd578ce41979211c386caf19e'
'6f496536db5f2c96b58fe2c7a6677b331419a117873b539f'
)
self.assertEqual(stream.fee.address, 'bPwGA9h7uijoy5uAvzVPQw9QyLoYZehHJo')
self.assertEqual(stream.fee.lbc, 1)
self.assertEqual(stream.fee.dewies, 100000000)
self.assertEqual(stream.fee.currency, 'LBC')
with self.assertRaisesRegex(ValueError, 'USD can only be returned for USD fees.'):
print(stream.fee.usd)
def test_old_json_schema_v2(self):
claim = Claim.from_bytes(
b'{"license": "Creative Commons Attribution 3.0 United States", "fee": {"LBC": {"amount'
b'": 10, "address": "bFro33qBKxnL1AsjUU9N4AQHp9V62Nhc5L"}}, "ver": "0.0.2", "descriptio'
b'n": "Force P0 State for Nividia Cards! (max mining performance)", "language": "en", "'
b'author": "Mii", "title": "Nividia P0", "sources": {"lbry_sd_hash": "c5ffee0fa5168e166'
b'81b519d9d85446e8d1d818a616bd55540aa7374d2321b51abf2ac3dae1443a03dadcc8f7affaa62"}, "n'
b'sfw": false, "license_url": "https://creativecommons.org/licenses/by/3.0/us/legalcode'
b'", "content-type": "application/x-msdownload"}'
)
stream = claim.stream
self.assertEqual(stream.title, 'Nividia P0')
self.assertEqual(stream.description, 'Force P0 State for Nividia Cards! (max mining performance)')
self.assertEqual(stream.license, 'Creative Commons Attribution 3.0 United States')
self.assertEqual(stream.license_url, 'https://creativecommons.org/licenses/by/3.0/us/legalcode')
self.assertEqual(stream.author, 'Mii')
self.assertEqual(stream.langtags, ['en'])
self.assertEqual(stream.source.media_type, 'application/x-msdownload')
self.assertEqual(
stream.source.sd_hash,
'c5ffee0fa5168e16681b519d9d85446e8d1d818a616bd555'
'40aa7374d2321b51abf2ac3dae1443a03dadcc8f7affaa62'
)
self.assertEqual(stream.fee.address, 'bFro33qBKxnL1AsjUU9N4AQHp9V62Nhc5L')
self.assertEqual(stream.fee.lbc, 10)
self.assertEqual(stream.fee.dewies, 1000000000)
self.assertEqual(stream.fee.currency, 'LBC')
with self.assertRaisesRegex(ValueError, 'USD can only be returned for USD fees.'):
print(stream.fee.usd)
def test_old_json_schema_v3(self):
claim = Claim.from_bytes(
b'{"ver": "0.0.3", "description": "asd", "license": "Creative Commons Attribution 4.0 I'
b'nternational", "author": "sgb", "title": "ads", "language": "en", "sources": {"lbry_s'
b'd_hash": "d83db664c6d7d570aa824300f4869e0bfb560e765efa477aebf566467f8d3a57f4f8c704cab'
b'1308eb75ff8b7e84e3caf"}, "content_type": "video/mp4", "nsfw": false}'
)
stream = claim.stream
self.assertEqual(stream.title, 'ads')
self.assertEqual(stream.description, 'asd')
self.assertEqual(stream.license, 'Creative Commons Attribution 4.0 International')
self.assertEqual(stream.author, 'sgb')
self.assertEqual(stream.langtags, ['en'])
self.assertEqual(stream.source.media_type, 'video/mp4')
self.assertEqual(
stream.source.sd_hash,
'd83db664c6d7d570aa824300f4869e0bfb560e765efa477a'
'ebf566467f8d3a57f4f8c704cab1308eb75ff8b7e84e3caf'
)
class TestTypesV1Compatibility(TestCase):
def test_signed_claim_made_by_ytsync(self):
claim = Claim.from_bytes(unhexlify(
b'080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e'
b'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e'
b'657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206'
b'6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a'
b'2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733'
b'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265'
b'6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657'
b'37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054'
b'77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723'
b'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874'
b'7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0'
b'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468'
b'6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424'
b'34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22'
b'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406'
b'2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c'
b'0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51'
))
stream = claim.stream
self.assertEqual(stream.title, 'Here are 5 Reasons I ❤️ Nextcloud | TLG')
self.assertEqual(
stream.description,
'Find out more about Nextcloud: https://nextcloud.com/\n\nYou can find me on these soci'
'als:\n * Forums: https://forum.heavyelement.io/\n * Podcast: https://offtopical.net\n '
'* Patreon: https://patreon.com/thelinuxgamer\n * Merch: https://teespring.com/stores/o'
'fficial-linux-gamer\n * Twitch: https://twitch.tv/xondak\n * Twitter: https://twitter.'
'com/thelinuxgamer\n\n...\nhttps://www.youtube.com/watch?v=FrTdBCOS_fc'
)
self.assertEqual(stream.license, 'Copyrighted (contact author)')
self.assertEqual(stream.author, 'The Linux Gamer')
self.assertEqual(stream.langtags, ['en'])
self.assertEqual(stream.source.media_type, 'video/mp4')
self.assertEqual(stream.thumbnail.url, 'https://berk.ninja/thumbnails/FrTdBCOS_fc')
self.assertEqual(
stream.source.sd_hash,
'040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc'
'22f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f3'
)
# certificate for above channel
cert = Claim.from_bytes(unhexlify(
b'08011002225e0801100322583056301006072a8648ce3d020106052b8104000a034200043878b1edd4a13'
b'73149909ef03f4339f6da9c2bd2214c040fd2e530463ffe66098eca14fc70b50ff3aefd106049a815f595'
b'ed5a13eda7419ad78d9ed7ae473f17'
))
channel = cert.channel
self.assertEqual(
channel.public_key,
'3056301006072a8648ce3d020106052b8104000a034200043878b1edd4a1373149909ef03f4339f6da9c2b'
'd2214c040fd2e530463ffe66098eca14fc70b50ff3aefd106049a815f595ed5a13eda7419ad78d9ed7ae47'
'3f17'
)
def test_unsigned_with_fee(self):
claim = Claim.from_bytes(unhexlify(
b'080110011ad6010801127c080410011a08727067206d69646922046d6964692a08727067206d696469322'
b'e437265617469766520436f6d6d6f6e73204174747269627574696f6e20342e3020496e7465726e617469'
b'6f6e616c38004224080110011a19553f00bc139bbf40de425f94d51fffb34c1bea6d9171cd374c2500007'
b'0414a0052005a001a54080110011a301f41eb0312aa7e8a5ce49349bc77d811da975833719d751523b19f'
b'123fc3d528d6a94e3446ccddb7b9329f27a9cad7e3221c6170706c69636174696f6e2f782d7a69702d636'
b'f6d70726573736564'
))
stream = claim.stream
self.assertEqual(stream.title, 'rpg midi')
self.assertEqual(stream.description, 'midi')
self.assertEqual(stream.license, 'Creative Commons Attribution 4.0 International')
self.assertEqual(stream.author, 'rpg midi')
self.assertEqual(stream.langtags, ['en'])
self.assertEqual(stream.source.media_type, 'application/x-zip-compressed')
self.assertEqual(
stream.source.sd_hash,
'1f41eb0312aa7e8a5ce49349bc77d811da975833719d7515'
'23b19f123fc3d528d6a94e3446ccddb7b9329f27a9cad7e3'
)
self.assertEqual(stream.fee.address, 'bJUQ9MxS9N6M29zsA5GTpVSDzsnPjMBBX9')
self.assertEqual(stream.fee.lbc, 15)
self.assertEqual(stream.fee.dewies, 1500000000)
self.assertEqual(stream.fee.currency, 'LBC')
with self.assertRaisesRegex(ValueError, 'USD can only be returned for USD fees.'):
print(stream.fee.usd)
|
py | b405303da1dd6843497a9e3243d0c7d1a74d56c5 | #!/usr/bin/env python2
""" The design class """
# upconvert.py - A universal hardware design file format converter using
# Format: upverter.com/resources/open-json-format/
# Development: github.com/upverter/schematic-file-converter
#
# Copyright 2011 Upverter, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.design_attributes import DesignAttributes
from core.components import Components
from core.shape import Point
class Design:
""" The Design class represents the whole schematic, which is also
the top level of the output format. The internal structure of this
class closely matches the JSON output."""
def __init__(self):
self.nets = list()
self.components = Components()
self.component_instances = list()
self.design_attributes = DesignAttributes()
self.layout = None
self.version = dict()
self.set_version("0.1.0","Upverter converter")
def bounds(self):
""" Return the min and max point of a design """
bounds = [net.bounds() for net in self.nets]
bounds.extend([anno.bounds() for anno in
self.design_attributes.annotations])
offset_bounds = lambda (p1, p2), (xo, yo): [Point(p1.x + xo, p1.y + yo),
Point(p2.x + xo, p2.y + yo)]
for comp in self.component_instances:
offsets = [(att.x, att.y) for att in comp.symbol_attributes]
lib_comp = self.components.components[comp.library_id]
bodybounds = [b.bounds() for b in
lib_comp.symbols[comp.symbol_index].bodies]
# the offsets in symbol_attributes will align and apply to the
# library components bodies
bounds.extend([offset_bounds(b, o) for b, o in zip(bodybounds,
offsets)])
# flatten out bounds to just a list of Points
bounds = sum(bounds, [])
x_values = [pt.x for pt in bounds]
y_values = [pt.y for pt in bounds]
# by convention, an empty design will bound just the origin
if len(x_values) == 0:
x_values = [0]
y_values = [0]
return [Point(min(x_values), min(y_values)),
Point(max(x_values), max(y_values))]
def set_version(self, file_version, exporter):
""" Set the file version and exporter """
self.version['file_version'] = file_version
self.version['exporter'] = exporter
def add_component_instance(self, component_instance):
""" Add an instance """
self.component_instances.append(component_instance)
def add_component(self, library_id, component):
""" Add a library part """
self.components.add_component(library_id, component)
def add_net(self, net):
""" Add a net """
self.nets.append(net)
def set_design_attributes(self, design_attributes):
""" Add design level attributes """
self.design_attributes = design_attributes
def generate_netlist(self):
""" The netlist as generated from the schematic. """
pass
def generate_bom(self):
""" The bill of materials as generated from the schematic. """
pass
def json(self):
""" Return a design as JSON """
return {
"version" : self.version,
"nets" : [n.json() for n in self.nets],
"components" : self.components.json(),
"component_instances" :
[i.json() for i in self.component_instances],
"design_attributes" : self.design_attributes.json(),
"layout" : self.layout.json() if self.layout is not None else None
}
|
py | b40531a9d9ffc7e6c486445e54672853ec14b4d8 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from utest.resources import datafilereader
from robotide.controller.ctrlcommands import ExtractScalar, ExtractList
from nose.tools import assert_equal
class TestExtractVariableCommands(unittest.TestCase):
def setUp(self):
self.project_ctrl = datafilereader.construct_project(datafilereader.LOG_MANY_SUITE)
self.datafile = datafilereader.get_ctrl_by_name('Log Many', self.project_ctrl.datafiles)
self.testcase = self.datafile.tests[0]
def tearDown(self):
self.project_ctrl.close()
def test_scalar_extract(self):
row, col = 0, 1
var_name = '${foo}'
var_value = self.testcase.step(row).as_list()[col]
var_comment = ['Something about the variable']
self.testcase.execute(ExtractScalar(var_name, var_value, var_comment, (row, col)))
assert_equal(self.testcase.step(row).as_list()[col], var_name)
var = [var for var in self.testcase.datafile_controller.variables if var.name == var_name][0]
assert_equal(var.value[0], var_value)
assert_equal(var.comment.as_list(), var_comment)
def test_list_extract(self):
row = 0
cols = [2, 3]
var_name = '@{joo}'
var_value = self.testcase.step(row).as_list()[cols[0]:cols[-1]+1]
value_after_list = self.testcase.step(row).as_list()[cols[-1]+1]
var_comment = ['Comment for my test list']
self.testcase.execute(ExtractList(var_name, var_value, var_comment, [(row, col) for col in cols]))
assert_equal(self.testcase.step(row).as_list()[cols[0]], var_name)
var = [var for var in self.testcase.datafile_controller.variables if var.name == var_name][0]
assert_equal(var.value, var_value)
assert_equal(var.comment.as_list(), var_comment)
assert_equal(self.testcase.step(row).as_list()[cols[0]+1], value_after_list)
if __name__ == "__main__":
unittest.main()
|
py | b40531bd42af85ce4e7bd7387e8f667a95d35bca | from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import numpy as np
from pandas._libs import lib
from pandas._typing import Shape
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly, doc
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import missing
from pandas.core.algorithms import take, unique
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
NDArrayBackedExtensionArrayT = TypeVar(
"NDArrayBackedExtensionArrayT", bound="NDArrayBackedExtensionArray"
)
class NDArrayBackedExtensionArray(ExtensionArray):
"""
ExtensionArray that is backed by a single NumPy ndarray.
"""
_ndarray: np.ndarray
def _from_backing_data(
self: NDArrayBackedExtensionArrayT, arr: np.ndarray
) -> NDArrayBackedExtensionArrayT:
"""
Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
This should round-trip:
self == self._from_backing_data(self._ndarray)
"""
raise AbstractMethodError(self)
def _box_func(self, x):
"""
Wrap numpy type in our dtype.type if necessary.
"""
return x
def _validate_scalar(self, value):
# used by NDArrayBackedExtensionIndex.insert
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
def take(
self: NDArrayBackedExtensionArrayT,
indices: Sequence[int],
*,
allow_fill: bool = False,
fill_value: Any = None,
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_data = take(
self._ndarray,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
axis=axis,
)
return self._from_backing_data(new_data)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to a representation
suitable for self._ndarray, raising TypeError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : native representation
Raises
------
TypeError
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# TODO: make this a cache_readonly; for that to work we need to remove
# the _index_data kludge in libreduction
@property
def shape(self) -> Shape:
return self._ndarray.shape
def __len__(self) -> int:
return self.shape[0]
@cache_readonly
def ndim(self) -> int:
return len(self.shape)
@cache_readonly
def size(self) -> int:
return np.prod(self.shape)
@cache_readonly
def nbytes(self) -> int:
return self._ndarray.nbytes
def reshape(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.reshape(*args, **kwargs)
return self._from_backing_data(new_data)
def ravel(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.ravel(*args, **kwargs)
return self._from_backing_data(new_data)
@property
def T(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.T
return self._from_backing_data(new_data)
# ------------------------------------------------------------------------
def equals(self, other) -> bool:
if type(self) is not type(other):
return False
if not is_dtype_equal(self.dtype, other.dtype):
return False
return bool(array_equivalent(self._ndarray, other._ndarray))
def _values_for_argsort(self):
return self._ndarray
def copy(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.copy()
return self._from_backing_data(new_data)
def repeat(
self: NDArrayBackedExtensionArrayT, repeats, axis=None
) -> NDArrayBackedExtensionArrayT:
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(tuple(), dict(axis=axis))
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
def unique(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
@classmethod
@doc(ExtensionArray._concat_same_type)
def _concat_same_type(
cls: Type[NDArrayBackedExtensionArrayT],
to_concat: Sequence[NDArrayBackedExtensionArrayT],
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
dtypes = {str(x.dtype) for x in to_concat}
if len(dtypes) != 1:
raise ValueError("to_concat must have the same dtype (tz)", dtypes)
new_values = [x._ndarray for x in to_concat]
new_values = np.concatenate(new_values, axis=axis)
return to_concat[0]._from_backing_data(new_values)
@doc(ExtensionArray.searchsorted)
def searchsorted(self, value, side="left", sorter=None):
value = self._validate_searchsorted_value(value)
return self._ndarray.searchsorted(value, side=side, sorter=sorter)
def _validate_searchsorted_value(self, value):
return value
@doc(ExtensionArray.shift)
def shift(self, periods=1, fill_value=None, axis=0):
fill_value = self._validate_shift_value(fill_value)
new_values = shift(self._ndarray, periods, axis, fill_value)
return self._from_backing_data(new_values)
def _validate_shift_value(self, fill_value):
# TODO: after deprecation in datetimelikearraymixin is enforced,
# we can remove this and ust validate_fill_value directly
return self._validate_fill_value(fill_value)
def __setitem__(self, key, value):
key = check_array_indexer(self, key)
value = self._validate_setitem_value(value)
self._ndarray[key] = value
def _validate_setitem_value(self, value):
return value
def __getitem__(
self: NDArrayBackedExtensionArrayT, key: Union[int, slice, np.ndarray]
) -> Union[NDArrayBackedExtensionArrayT, Any]:
if lib.is_integer(key):
# fast-path
result = self._ndarray[key]
if self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
key = extract_array(key, extract_numpy=True)
key = check_array_indexer(self, key)
result = self._ndarray[key]
if lib.is_scalar(result):
return self._box_func(result)
result = self._from_backing_data(result)
return result
@doc(ExtensionArray.fillna)
def fillna(
self: NDArrayBackedExtensionArrayT, value=None, method=None, limit=None
) -> NDArrayBackedExtensionArrayT:
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
# TODO: share this with EA base class implementation
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
func = missing.get_fill_func(method)
new_values = func(self._ndarray.copy(), limit=limit, mask=mask)
# TODO: PandasArray didnt used to copy, need tests for this
new_values = self._from_backing_data(new_values)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)
def _wrap_reduction_result(self, axis: Optional[int], result):
if axis is None or self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# ------------------------------------------------------------------------
def __repr__(self) -> str:
if self.ndim == 1:
return super().__repr__()
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
lines = [
format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
", \n"
)
for x in self
]
data = ",\n".join(lines)
class_name = f"<{type(self).__name__}>"
return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
# ------------------------------------------------------------------------
# __array_function__ methods
def putmask(self, mask, value):
"""
Analogue to np.putmask(self, mask, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Raises
------
TypeError
If value cannot be cast to self.dtype.
"""
value = self._validate_setitem_value(value)
np.putmask(self._ndarray, mask, value)
|
py | b4053227c5871eb74deebb8b3bc27cd05b6f71f6 | import unittest
from translator import english_to_french, french_to_english
class TestTranslator(unittest.TestCase):
def test_en_to_fr(self):
self.assertEqual(english_to_french(''), '')
self.assertEqual(english_to_french('Hello'), 'Bonjour')
self.assertNotEqual(english_to_french('Hello'), 'Bon')
def test_fr_to_en(self):
self.assertEqual(french_to_english(''), '')
self.assertEqual(french_to_english('Bonjour'), 'Hello')
self.assertNotEqual(french_to_english('Bon'), 'Hello')
if __name__ == '__main__':
unittest.main()
|
py | b40532f0d4d319bec4bf38a8876bce00142b31bb | #
# This file is part of LiteX.
#
# Copyright (c) 2018-2019 Florent Kermarrec <[email protected]>
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
"""AXI4 Full/Lite support for LiteX"""
from migen import *
from migen.genlib import roundrobin
from migen.genlib.misc import split, displacer, chooser, WaitTimer
from litex.soc.interconnect import stream
from litex.build.generic_platform import *
from litex.soc.interconnect import csr_bus
# AXI Definition -----------------------------------------------------------------------------------
BURST_FIXED = 0b00
BURST_INCR = 0b01
BURST_WRAP = 0b10
BURST_RESERVED = 0b11
RESP_OKAY = 0b00
RESP_EXOKAY = 0b01
RESP_SLVERR = 0b10
RESP_DECERR = 0b11
def ax_description(address_width, id_width):
return [
("addr", address_width),
("burst", 2), # Burst type
("len", 8), # Number of data (-1) transfers (up to 256)
("size", 4), # Number of bytes (-1) of each data transfer (up to 1024 bits)
("lock", 2), # *
("prot", 3), # *
("cache", 4), # *
("qos", 4), # *
("id", id_width)
]
# * present for interconnect with others cores but not used by LiteX.
def w_description(data_width, id_width):
return [
("data", data_width),
("strb", data_width//8),
("id", id_width)
]
def b_description(id_width):
return [
("resp", 2),
("id", id_width)
]
def r_description(data_width, id_width):
return [
("resp", 2),
("data", data_width),
("id", id_width)
]
def _connect_axi(master, slave, keep=None, omit=None):
channel_modes = {
"aw": "master",
"w" : "master",
"b" : "slave",
"ar": "master",
"r" : "slave",
}
r = []
for channel, mode in channel_modes.items():
if mode == "master":
m, s = getattr(master, channel), getattr(slave, channel)
else:
s, m = getattr(master, channel), getattr(slave, channel)
r.extend(m.connect(s, keep=keep, omit=omit))
return r
def connect_to_pads(bus, pads, mode="master", axi_full=False):
assert mode in ["slave", "master"]
r = []
def swap_mode(mode): return "master" if mode == "slave" else "slave"
channel_modes = {
"aw": mode,
"w" : mode,
"b" : swap_mode(mode),
"ar": mode,
"r" : swap_mode(mode),
}
for channel, mode in channel_modes.items():
ch = getattr(bus, channel)
sig_list = [("valid", 1)] + ch.description.payload_layout
if ch in ["w", "r"] and axi_full:
sig_list += [("last", 1)]
for name, width in sig_list:
sig = getattr(ch, name)
pad = getattr(pads, channel + name)
if mode == "master":
r.append(pad.eq(sig))
else:
r.append(sig.eq(pad))
for name, width in [("ready", 1)]:
sig = getattr(ch, name)
pad = getattr(pads, channel + name)
if mode == "master":
r.append(sig.eq(pad))
else:
r.append(pad.eq(sig))
return r
def _axi_layout_flat(axi):
# yields tuples (channel, name, direction)
def get_dir(channel, direction):
if channel in ["b", "r"]:
return {DIR_M_TO_S: DIR_S_TO_M, DIR_S_TO_M: DIR_M_TO_S}[direction]
return direction
for ch in ["aw", "w", "b", "ar", "r"]:
channel = getattr(axi, ch)
for group in channel.layout:
if len(group) == 3:
name, _, direction = group
yield ch, name, get_dir(ch, direction)
else:
_, subgroups = group
for subgroup in subgroups:
name, _, direction = subgroup
yield ch, name, get_dir(ch, direction)
class AXIInterface:
def __init__(self, data_width=32, address_width=32, id_width=1, clock_domain="sys"):
self.data_width = data_width
self.address_width = address_width
self.id_width = id_width
self.clock_domain = clock_domain
self.aw = stream.Endpoint(ax_description(address_width, id_width))
self.w = stream.Endpoint(w_description(data_width, id_width))
self.b = stream.Endpoint(b_description(id_width))
self.ar = stream.Endpoint(ax_description(address_width, id_width))
self.r = stream.Endpoint(r_description(data_width, id_width))
def connect_to_pads(self, pads, mode="master"):
return connect_to_pads(self, pads, mode, axi_full=True)
def get_ios(self, bus_name="wb"):
subsignals = []
for channel in ["aw", "w", "b", "ar", "r"]:
for name in ["valid", "ready"] + (["last"] if channel in ["w", "r"] else []):
subsignals.append(Subsignal(channel + name, Pins(1)))
for name, width in getattr(self, channel).description.payload_layout:
subsignals.append(Subsignal(channel + name, Pins(width)))
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
def connect(self, slave, **kwargs):
return _connect_axi(self, slave, **kwargs)
def layout_flat(self):
return list(_axi_layout_flat(self))
# AXI Lite Definition ------------------------------------------------------------------------------
def ax_lite_description(address_width):
return [("addr", address_width)]
def w_lite_description(data_width):
return [
("data", data_width),
("strb", data_width//8)
]
def b_lite_description():
return [("resp", 2)]
def r_lite_description(data_width):
return [
("resp", 2),
("data", data_width)
]
class AXILiteInterface:
def __init__(self, data_width=32, address_width=32, clock_domain="sys", name=None):
self.data_width = data_width
self.address_width = address_width
self.clock_domain = clock_domain
self.aw = stream.Endpoint(ax_lite_description(address_width), name=name)
self.w = stream.Endpoint(w_lite_description(data_width), name=name)
self.b = stream.Endpoint(b_lite_description(), name=name)
self.ar = stream.Endpoint(ax_lite_description(address_width), name=name)
self.r = stream.Endpoint(r_lite_description(data_width), name=name)
def get_ios(self, bus_name="wb"):
subsignals = []
for channel in ["aw", "w", "b", "ar", "r"]:
for name in ["valid", "ready"]:
subsignals.append(Subsignal(channel + name, Pins(1)))
for name, width in getattr(self, channel).description.payload_layout:
subsignals.append(Subsignal(channel + name, Pins(width)))
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
def connect_to_pads(self, pads, mode="master"):
return connect_to_pads(self, pads, mode)
def connect(self, slave, **kwargs):
return _connect_axi(self, slave, **kwargs)
def layout_flat(self):
return list(_axi_layout_flat(self))
def write(self, addr, data, strb=None):
if strb is None:
strb = 2**len(self.w.strb) - 1
# aw + w
yield self.aw.valid.eq(1)
yield self.aw.addr.eq(addr)
yield self.w.data.eq(data)
yield self.w.valid.eq(1)
yield self.w.strb.eq(strb)
yield
while not (yield self.aw.ready):
yield
yield self.aw.valid.eq(0)
yield self.aw.addr.eq(0)
while not (yield self.w.ready):
yield
yield self.w.valid.eq(0)
yield self.w.strb.eq(0)
# b
yield self.b.ready.eq(1)
while not (yield self.b.valid):
yield
resp = (yield self.b.resp)
yield self.b.ready.eq(0)
return resp
def read(self, addr):
# ar
yield self.ar.valid.eq(1)
yield self.ar.addr.eq(addr)
yield
while not (yield self.ar.ready):
yield
yield self.ar.valid.eq(0)
# r
yield self.r.ready.eq(1)
while not (yield self.r.valid):
yield
data = (yield self.r.data)
resp = (yield self.r.resp)
yield self.r.ready.eq(0)
return (data, resp)
# AXI Stream Definition ----------------------------------------------------------------------------
class AXIStreamInterface(stream.Endpoint):
def __init__(self, data_width=32, keep_width=0, user_width=0):
self.data_width = data_width
self.keep_width = keep_width
self.user_width = user_width
payload_layout = [("data", data_width)]
if self.keep_width:
payload_layout += [("keep", keep_width)]
param_layout = []
if self.user_width:
param_layout += [("user", user_width)]
stream.Endpoint.__init__(self, stream.EndpointDescription(payload_layout, param_layout))
def get_ios(self, bus_name="axi"):
subsignals = [
Subsignal("tvalid", Pins(1)),
Subsignal("tlast", Pins(1)),
Subsignal("tready", Pins(1)),
Subsignal("tdata", Pins(self.data_width)),
]
if self.keep_width:
subsignals += [Subsignal("tkeep", Pins(self.keep_width))]
if self.user_width:
subsignals += [Subsignal("tuser", Pins(self.user_width))]
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
def connect_to_pads(self, pads, mode="master"):
assert mode in ["slave", "master"]
r = []
if mode == "master":
r.append(pads.tvalid.eq(self.valid))
r.append(self.ready.eq(pads.tready))
r.append(pads.tlast.eq(self.last))
r.append(pads.tdata.eq(self.data))
if self.keep_width:
r.append(pads.tkeep.eq(self.keep))
if self.user_width:
r.append(pads.tuser.eq(self.user))
if mode == "slave":
r.append(self.valid.eq(pads.tvalid))
r.append(pads.tready.eq(self.ready))
r.append(self.last.eq(pads.tlast))
r.append(self.data.eq(pads.tdata))
if self.keep_width:
r.append(self.keep.eq(pads.tkeep))
if self.user_width:
r.append(self.user.eq(pads.tuser))
return r
# AXI Bursts to Beats ------------------------------------------------------------------------------
class AXIBurst2Beat(Module):
def __init__(self, ax_burst, ax_beat, capabilities={BURST_FIXED, BURST_INCR, BURST_WRAP}):
assert BURST_FIXED in capabilities
# # #
beat_count = Signal(8)
beat_size = Signal(8 + 4)
beat_offset = Signal((8 + 4, True))
beat_wrap = Signal(8 + 4)
# Compute parameters
self.comb += beat_size.eq(1 << ax_burst.size)
self.comb += beat_wrap.eq(ax_burst.len << ax_burst.size)
# Combinatorial logic
self.comb += [
ax_beat.valid.eq(ax_burst.valid | ~ax_beat.first),
ax_beat.first.eq(beat_count == 0),
ax_beat.last.eq(beat_count == ax_burst.len),
ax_beat.addr.eq(ax_burst.addr + beat_offset),
ax_beat.id.eq(ax_burst.id),
If(ax_beat.ready,
If(ax_beat.last,
ax_burst.ready.eq(1)
)
)
]
# Synchronous logic
self.sync += [
If(ax_beat.valid & ax_beat.ready,
If(ax_beat.last,
beat_count.eq(0),
beat_offset.eq(0)
).Else(
beat_count.eq(beat_count + 1),
If(((ax_burst.burst == BURST_INCR) & (BURST_INCR in capabilities)) |
((ax_burst.burst == BURST_WRAP) & (BURST_WRAP in capabilities)),
beat_offset.eq(beat_offset + beat_size)
)
),
If((ax_burst.burst == BURST_WRAP) & (BURST_WRAP in capabilities),
If((ax_beat.addr & beat_wrap) == beat_wrap,
beat_offset.eq(beat_offset - beat_wrap)
)
)
)
]
# AXI to AXI Lite ----------------------------------------------------------------------------------
class AXI2AXILite(Module):
# Note: Since this AXI bridge will mostly be used to target buses that are not supporting
# simultaneous writes/reads, to reduce ressource usage the AXIBurst2Beat module is shared
# between writes/reads.
def __init__(self, axi, axi_lite):
assert axi.data_width == axi_lite.data_width
assert axi.address_width == axi_lite.address_width
ax_buffer = stream.Buffer(ax_description(axi.address_width, axi.id_width))
ax_burst = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
ax_beat = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
self.comb += ax_burst.connect(ax_buffer.sink)
ax_burst2beat = AXIBurst2Beat(ax_buffer.source, ax_beat)
self.submodules += ax_buffer, ax_burst2beat
_data = Signal(axi.data_width)
_cmd_done = Signal()
_last_ar_aw_n = Signal()
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
NextValue(_cmd_done, 0),
If(axi.ar.valid & axi.aw.valid,
# If last access was a read, do a write
If(_last_ar_aw_n,
axi.aw.connect(ax_burst),
NextValue(_last_ar_aw_n, 0),
NextState("WRITE")
# If last access was a write, do a read
).Else(
axi.ar.connect(ax_burst),
NextValue(_last_ar_aw_n, 1),
NextState("READ"),
)
).Elif(axi.ar.valid,
axi.ar.connect(ax_burst),
NextValue(_last_ar_aw_n, 1),
NextState("READ"),
).Elif(axi.aw.valid,
axi.aw.connect(ax_burst),
NextValue(_last_ar_aw_n, 0),
NextState("WRITE")
)
)
fsm.act("READ",
# ar (read command)
axi_lite.ar.valid.eq(ax_beat.valid & ~_cmd_done),
axi_lite.ar.addr.eq(ax_beat.addr),
ax_beat.ready.eq(axi_lite.ar.ready & ~_cmd_done),
If(ax_beat.valid & ax_beat.last,
If(axi_lite.ar.ready,
ax_beat.ready.eq(0),
NextValue(_cmd_done, 1)
)
),
# r (read data & response)
axi.r.valid.eq(axi_lite.r.valid),
axi.r.last.eq(_cmd_done),
axi.r.resp.eq(RESP_OKAY),
axi.r.id.eq(ax_beat.id),
axi.r.data.eq(axi_lite.r.data),
axi_lite.r.ready.eq(axi.r.ready),
# Exit
If(axi.r.valid & axi.r.last & axi.r.ready,
ax_beat.ready.eq(1),
NextState("IDLE")
)
)
# Always accept write responses.
self.comb += axi_lite.b.ready.eq(1)
fsm.act("WRITE",
# aw (write command)
axi_lite.aw.valid.eq(ax_beat.valid & ~_cmd_done),
axi_lite.aw.addr.eq(ax_beat.addr),
ax_beat.ready.eq(axi_lite.aw.ready & ~_cmd_done),
If(ax_beat.valid & ax_beat.last,
If(axi_lite.aw.ready,
ax_beat.ready.eq(0),
NextValue(_cmd_done, 1)
)
),
# w (write data)
axi_lite.w.valid.eq(axi.w.valid),
axi_lite.w.data.eq(axi.w.data),
axi_lite.w.strb.eq(axi.w.strb),
axi.w.ready.eq(axi_lite.w.ready),
# Exit
If(axi.w.valid & axi.w.last & axi.w.ready,
NextState("WRITE-RESP")
)
)
fsm.act("WRITE-RESP",
axi.b.valid.eq(1),
axi.b.resp.eq(RESP_OKAY),
axi.b.id.eq(ax_beat.id),
If(axi.b.ready,
ax_beat.ready.eq(1),
NextState("IDLE")
)
)
# AXI Lite to AXI ----------------------------------------------------------------------------------
class AXILite2AXI(Module):
def __init__(self, axi_lite, axi, write_id=0, read_id=0, prot=0, burst_type="INCR"):
assert isinstance(axi_lite, AXILiteInterface)
assert isinstance(axi, AXIInterface)
assert axi_lite.data_width == axi.data_width
assert axi_lite.address_width == axi.address_width
# n bytes, encoded as log2(n)
burst_size = log2_int(axi.data_width // 8)
# Burst type has no meaning as we use burst length of 1, but AXI slaves may require certain
# type of bursts, so it is probably safest to use INCR in general.
burst_type = {
"FIXED": 0b00,
"INCR": 0b01,
"WRAP": 0b10,
}[burst_type]
self.comb += [
# aw (write command)
axi.aw.valid.eq(axi_lite.aw.valid),
axi_lite.aw.ready.eq(axi.aw.ready),
axi.aw.addr.eq(axi_lite.aw.addr),
axi.aw.burst.eq(burst_type),
axi.aw.len.eq(0), # 1 transfer per burst
axi.aw.size.eq(burst_size),
axi.aw.lock.eq(0), # Normal access
axi.aw.prot.eq(prot),
axi.aw.cache.eq(0b0011), # Normal Non-cacheable Bufferable
axi.aw.qos.eq(0),
axi.aw.id.eq(write_id),
# w (write data)
axi.w.valid.eq(axi_lite.w.valid),
axi_lite.w.ready.eq(axi.w.ready),
axi.w.data.eq(axi_lite.w.data),
axi.w.strb.eq(axi_lite.w.strb),
axi.w.last.eq(1),
# b (write response)
axi_lite.b.valid.eq(axi.b.valid),
axi_lite.b.resp.eq(axi.b.resp),
axi.b.ready.eq(axi_lite.b.ready),
# ar (read command)
axi.ar.valid.eq(axi_lite.ar.valid),
axi_lite.ar.ready.eq(axi.ar.ready),
axi.ar.addr.eq(axi_lite.ar.addr),
axi.ar.burst.eq(burst_type),
axi.ar.len.eq(0),
axi.ar.size.eq(burst_size),
axi.ar.lock.eq(0),
axi.ar.prot.eq(prot),
axi.ar.cache.eq(0b0011),
axi.ar.qos.eq(0),
axi.ar.id.eq(read_id),
# r (read response & data)
axi_lite.r.valid.eq(axi.r.valid),
axi_lite.r.resp.eq(axi.r.resp),
axi_lite.r.data.eq(axi.r.data),
axi.r.ready.eq(axi_lite.r.ready),
]
# AXI Lite to Wishbone -----------------------------------------------------------------------------
class AXILite2Wishbone(Module):
def __init__(self, axi_lite, wishbone, base_address=0x00000000):
wishbone_adr_shift = log2_int(axi_lite.data_width//8)
assert axi_lite.data_width == len(wishbone.dat_r)
assert axi_lite.address_width == len(wishbone.adr) + wishbone_adr_shift
_data = Signal(axi_lite.data_width)
_r_addr = Signal(axi_lite.address_width)
_w_addr = Signal(axi_lite.address_width)
_last_ar_aw_n = Signal()
self.comb += _r_addr.eq(axi_lite.ar.addr - base_address)
self.comb += _w_addr.eq(axi_lite.aw.addr - base_address)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
If(axi_lite.ar.valid & axi_lite.aw.valid,
# If last access was a read, do a write
If(_last_ar_aw_n,
NextValue(_last_ar_aw_n, 0),
NextState("DO-WRITE")
# If last access was a write, do a read
).Else(
NextValue(_last_ar_aw_n, 1),
NextState("DO-READ")
)
).Elif(axi_lite.ar.valid,
NextValue(_last_ar_aw_n, 1),
NextState("DO-READ")
).Elif(axi_lite.aw.valid,
NextValue(_last_ar_aw_n, 0),
NextState("DO-WRITE")
)
)
fsm.act("DO-READ",
wishbone.stb.eq(1),
wishbone.cyc.eq(1),
wishbone.adr.eq(_r_addr[wishbone_adr_shift:]),
wishbone.sel.eq(2**len(wishbone.sel) - 1),
If(wishbone.ack,
axi_lite.ar.ready.eq(1),
NextValue(_data, wishbone.dat_r),
NextState("SEND-READ-RESPONSE")
)
)
fsm.act("SEND-READ-RESPONSE",
axi_lite.r.valid.eq(1),
axi_lite.r.resp.eq(RESP_OKAY),
axi_lite.r.data.eq(_data),
If(axi_lite.r.ready,
NextState("IDLE")
)
)
fsm.act("DO-WRITE",
wishbone.stb.eq(axi_lite.w.valid),
wishbone.cyc.eq(axi_lite.w.valid),
wishbone.we.eq(1),
wishbone.adr.eq(_w_addr[wishbone_adr_shift:]),
wishbone.sel.eq(axi_lite.w.strb),
wishbone.dat_w.eq(axi_lite.w.data),
If(wishbone.ack,
axi_lite.aw.ready.eq(1),
axi_lite.w.ready.eq(1),
NextState("SEND-WRITE-RESPONSE")
)
)
fsm.act("SEND-WRITE-RESPONSE",
axi_lite.b.valid.eq(1),
axi_lite.b.resp.eq(RESP_OKAY),
If(axi_lite.b.ready,
NextState("IDLE")
)
)
# AXI to Wishbone ----------------------------------------------------------------------------------
class AXI2Wishbone(Module):
def __init__(self, axi, wishbone, base_address=0x00000000):
axi_lite = AXILiteInterface(axi.data_width, axi.address_width)
axi2axi_lite = AXI2AXILite(axi, axi_lite)
axi_lite2wishbone = AXILite2Wishbone(axi_lite, wishbone, base_address)
self.submodules += axi2axi_lite, axi_lite2wishbone
# Wishbone to AXILite ------------------------------------------------------------------------------
class Wishbone2AXILite(Module):
def __init__(self, wishbone, axi_lite, base_address=0x00000000):
wishbone_adr_shift = log2_int(axi_lite.data_width//8)
assert axi_lite.data_width == len(wishbone.dat_r)
assert axi_lite.address_width == len(wishbone.adr) + wishbone_adr_shift
_cmd_done = Signal()
_data_done = Signal()
_addr = Signal(len(wishbone.adr))
self.comb += _addr.eq(wishbone.adr - base_address//4)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
NextValue(_cmd_done, 0),
NextValue(_data_done, 0),
If(wishbone.stb & wishbone.cyc,
If(wishbone.we,
NextState("WRITE")
).Else(
NextState("READ")
)
)
)
fsm.act("WRITE",
# aw (write command)
axi_lite.aw.valid.eq(~_cmd_done),
axi_lite.aw.addr[wishbone_adr_shift:].eq(_addr),
If(axi_lite.aw.valid & axi_lite.aw.ready,
NextValue(_cmd_done, 1)
),
# w (write data)
axi_lite.w.valid.eq(~_data_done),
axi_lite.w.data.eq(wishbone.dat_w),
axi_lite.w.strb.eq(wishbone.sel),
If(axi_lite.w.valid & axi_lite.w.ready,
NextValue(_data_done, 1),
),
# b (write response)
axi_lite.b.ready.eq(_cmd_done & _data_done),
If(axi_lite.b.valid & axi_lite.b.ready,
If(axi_lite.b.resp == RESP_OKAY,
wishbone.ack.eq(1),
NextState("IDLE")
).Else(
NextState("ERROR")
)
)
)
fsm.act("READ",
# ar (read command)
axi_lite.ar.valid.eq(~_cmd_done),
axi_lite.ar.addr[wishbone_adr_shift:].eq(_addr),
If(axi_lite.ar.valid & axi_lite.ar.ready,
NextValue(_cmd_done, 1)
),
# r (read data & response)
axi_lite.r.ready.eq(_cmd_done),
If(axi_lite.r.valid & axi_lite.r.ready,
If(axi_lite.r.resp == RESP_OKAY,
wishbone.dat_r.eq(axi_lite.r.data),
wishbone.ack.eq(1),
NextState("IDLE"),
).Else(
NextState("ERROR")
)
)
)
fsm.act("ERROR",
wishbone.ack.eq(1),
wishbone.err.eq(1),
NextState("IDLE")
)
# Wishbone to AXI ----------------------------------------------------------------------------------
class Wishbone2AXI(Module):
def __init__(self, wishbone, axi, base_address=0x00000000):
axi_lite = AXILiteInterface(axi.data_width, axi.address_width)
wishbone2axi_lite = Wishbone2AXILite(wishbone, axi_lite, base_address)
axi_lite2axi = AXILite2AXI(axi_lite, axi)
self.submodules += wishbone2axi_lite, axi_lite2axi
# AXILite to CSR -----------------------------------------------------------------------------------
def axi_lite_to_simple(axi_lite, port_adr, port_dat_r, port_dat_w=None, port_we=None):
"""Connection of AXILite to simple bus with 1-cycle latency, such as CSR bus or Memory port"""
bus_data_width = axi_lite.data_width
adr_shift = log2_int(bus_data_width//8)
do_read = Signal()
do_write = Signal()
last_was_read = Signal()
comb = []
if port_dat_w is not None:
comb.append(port_dat_w.eq(axi_lite.w.data))
if port_we is not None:
if len(port_we) > 1:
for i in range(bus_data_width//8):
comb.append(port_we[i].eq(axi_lite.w.valid & axi_lite.w.ready & axi_lite.w.strb[i]))
else:
comb.append(port_we.eq(axi_lite.w.valid & axi_lite.w.ready & (axi_lite.w.strb != 0)))
fsm = FSM()
fsm.act("START-TRANSACTION",
# If the last access was a read, do a write, and vice versa.
If(axi_lite.aw.valid & axi_lite.ar.valid,
do_write.eq(last_was_read),
do_read.eq(~last_was_read),
).Else(
do_write.eq(axi_lite.aw.valid),
do_read.eq(axi_lite.ar.valid),
),
# Start reading/writing immediately not to waste a cycle.
If(do_write,
port_adr.eq(axi_lite.aw.addr[adr_shift:]),
If(axi_lite.w.valid,
axi_lite.aw.ready.eq(1),
axi_lite.w.ready.eq(1),
NextState("SEND-WRITE-RESPONSE")
)
).Elif(do_read,
port_adr.eq(axi_lite.ar.addr[adr_shift:]),
axi_lite.ar.ready.eq(1),
NextState("SEND-READ-RESPONSE"),
)
)
fsm.act("SEND-READ-RESPONSE",
NextValue(last_was_read, 1),
# As long as we have correct address port.dat_r will be valid.
port_adr.eq(axi_lite.ar.addr[adr_shift:]),
axi_lite.r.data.eq(port_dat_r),
axi_lite.r.resp.eq(RESP_OKAY),
axi_lite.r.valid.eq(1),
If(axi_lite.r.ready,
NextState("START-TRANSACTION")
)
)
fsm.act("SEND-WRITE-RESPONSE",
NextValue(last_was_read, 0),
axi_lite.b.valid.eq(1),
axi_lite.b.resp.eq(RESP_OKAY),
If(axi_lite.b.ready,
NextState("START-TRANSACTION")
)
)
return fsm, comb
class AXILite2CSR(Module):
def __init__(self, axi_lite=None, bus_csr=None, register=False):
# TODO: unused register argument
if axi_lite is None:
axi_lite = AXILiteInterface()
if bus_csr is None:
bus_csr = csr_bus.Interface()
self.axi_lite = axi_lite
self.csr = bus_csr
fsm, comb = axi_lite_to_simple(
axi_lite = self.axi_lite,
port_adr = self.csr.adr,
port_dat_r = self.csr.dat_r,
port_dat_w = self.csr.dat_w,
port_we = self.csr.we)
self.submodules.fsm = fsm
self.comb += comb
# AXILite SRAM -------------------------------------------------------------------------------------
class AXILiteSRAM(Module):
def __init__(self, mem_or_size, read_only=None, init=None, bus=None):
if bus is None:
bus = AXILiteInterface()
self.bus = bus
bus_data_width = len(self.bus.r.data)
if isinstance(mem_or_size, Memory):
assert(mem_or_size.width <= bus_data_width)
self.mem = mem_or_size
else:
self.mem = Memory(bus_data_width, mem_or_size//(bus_data_width//8), init=init)
if read_only is None:
if hasattr(self.mem, "bus_read_only"):
read_only = self.mem.bus_read_only
else:
read_only = False
# # #
# Create memory port
port = self.mem.get_port(write_capable=not read_only, we_granularity=8,
mode=READ_FIRST if read_only else WRITE_FIRST)
self.specials += self.mem, port
# Generate write enable signal
if not read_only:
self.comb += port.dat_w.eq(self.bus.w.data),
self.comb += [port.we[i].eq(self.bus.w.valid & self.bus.w.ready & self.bus.w.strb[i])
for i in range(bus_data_width//8)]
# Transaction logic
fsm, comb = axi_lite_to_simple(
axi_lite = self.bus,
port_adr = port.adr,
port_dat_r = port.dat_r,
port_dat_w = port.dat_w if not read_only else None,
port_we = port.we if not read_only else None)
self.submodules.fsm = fsm
self.comb += comb
# AXILite Data Width Converter ---------------------------------------------------------------------
class _AXILiteDownConverterWrite(Module):
def __init__(self, master, slave):
assert isinstance(master, AXILiteInterface) and isinstance(slave, AXILiteInterface)
dw_from = len(master.w.data)
dw_to = len(slave.w.data)
ratio = dw_from//dw_to
skip = Signal()
counter = Signal(max=ratio)
aw_ready = Signal()
w_ready = Signal()
resp = Signal.like(master.b.resp)
# # #
# Data path
self.comb += [
slave.aw.addr.eq(master.aw.addr + counter*(dw_to//8)),
Case(counter, {i: slave.w.data.eq(master.w.data[i*dw_to:]) for i in range(ratio)}),
Case(counter, {i: slave.w.strb.eq(master.w.strb[i*dw_to//8:]) for i in range(ratio)}),
master.b.resp.eq(resp),
]
# Control Path
fsm = FSM(reset_state="IDLE")
fsm = ResetInserter()(fsm)
self.submodules.fsm = fsm
# Reset the converter state if master breaks a request, we can do that as
# aw.valid and w.valid are kept high in CONVERT and RESPOND-SLAVE, and
# acknowledged only when moving to RESPOND-MASTER, and then b.valid is 1.
self.comb += fsm.reset.eq(~((master.aw.valid | master.w.valid) | master.b.valid))
fsm.act("IDLE",
NextValue(counter, 0),
NextValue(resp, RESP_OKAY),
If(master.aw.valid & master.w.valid,
NextState("CONVERT")
)
)
fsm.act("CONVERT",
skip.eq(slave.w.strb == 0),
slave.aw.valid.eq(~skip & ~aw_ready),
slave.w.valid.eq(~skip & ~w_ready),
If(slave.aw.ready,
NextValue(aw_ready, 1)
),
If(slave.w.ready,
NextValue(w_ready, 1)
),
# When skipping, we just increment the counter.
If(skip,
NextValue(counter, counter + 1),
# Corner-case: when the last word is being skipped, we must send the response.
If(counter == (ratio - 1),
master.aw.ready.eq(1),
master.w.ready.eq(1),
NextState("RESPOND-MASTER")
)
# Write current word and wait for write response.
).Elif((slave.aw.ready | aw_ready) & (slave.w.ready | w_ready),
NextState("RESPOND-SLAVE")
)
)
fsm.act("RESPOND-SLAVE",
NextValue(aw_ready, 0),
NextValue(w_ready, 0),
If(slave.b.valid,
slave.b.ready.eq(1),
# Errors are sticky, so the first one is always sent.
If((resp == RESP_OKAY) & (slave.b.resp != RESP_OKAY),
NextValue(resp, slave.b.resp)
),
If(counter == (ratio - 1),
master.aw.ready.eq(1),
master.w.ready.eq(1),
NextState("RESPOND-MASTER")
).Else(
NextValue(counter, counter + 1),
NextState("CONVERT")
)
)
)
fsm.act("RESPOND-MASTER",
NextValue(aw_ready, 0),
NextValue(w_ready, 0),
master.b.valid.eq(1),
If(master.b.ready,
NextState("IDLE")
)
)
class _AXILiteDownConverterRead(Module):
def __init__(self, master, slave):
assert isinstance(master, AXILiteInterface) and isinstance(slave, AXILiteInterface)
dw_from = len(master.r.data)
dw_to = len(slave.r.data)
ratio = dw_from//dw_to
skip = Signal()
counter = Signal(max=ratio)
resp = Signal.like(master.r.resp)
# # #
# Data path
# Shift the data word
r_data = Signal(dw_from, reset_less=True)
self.sync += If(slave.r.ready, r_data.eq(master.r.data))
self.comb += master.r.data.eq(Cat(r_data[dw_to:], slave.r.data))
# Connect address, resp
self.comb += [
slave.ar.addr.eq(master.ar.addr + counter*(dw_to//8)),
master.r.resp.eq(resp),
]
# Control Path
fsm = FSM(reset_state="IDLE")
fsm = ResetInserter()(fsm)
self.submodules.fsm = fsm
# Reset the converter state if master breaks a request, we can do that as
# ar.valid is high in CONVERT and RESPOND-SLAVE, and r.valid in RESPOND-MASTER.
self.comb += fsm.reset.eq(~(master.ar.valid | master.r.valid))
fsm.act("IDLE",
NextValue(counter, 0),
NextValue(resp, RESP_OKAY),
If(master.ar.valid,
NextState("CONVERT")
)
)
fsm.act("CONVERT",
slave.ar.valid.eq(1),
If(slave.ar.ready,
NextState("RESPOND-SLAVE")
)
)
fsm.act("RESPOND-SLAVE",
If(slave.r.valid,
# Errors are sticky, so the first one is always sent.
If((resp == RESP_OKAY) & (slave.r.resp != RESP_OKAY),
NextValue(resp, slave.r.resp)
),
# On last word acknowledge ar and hold slave.r.valid until we get master.r.ready.
If(counter == (ratio - 1),
master.ar.ready.eq(1),
NextState("RESPOND-MASTER")
# Acknowledge the response and continue conversion.
).Else(
slave.r.ready.eq(1),
NextValue(counter, counter + 1),
NextState("CONVERT")
)
)
)
fsm.act("RESPOND-MASTER",
master.r.valid.eq(1),
If(master.r.ready,
slave.r.ready.eq(1),
NextState("IDLE")
)
)
class AXILiteDownConverter(Module):
def __init__(self, master, slave):
self.submodules.write = _AXILiteDownConverterWrite(master, slave)
self.submodules.read = _AXILiteDownConverterRead(master, slave)
class AXILiteUpConverter(Module):
# TODO: we could try joining multiple master accesses into single slave access would require
# checking if address changes and a way to flush on single access
def __init__(self, master, slave):
assert isinstance(master, AXILiteInterface) and isinstance(slave, AXILiteInterface)
dw_from = len(master.r.data)
dw_to = len(slave.r.data)
ratio = dw_to//dw_from
master_align = log2_int(master.data_width//8)
slave_align = log2_int(slave.data_width//8)
wr_word = Signal(log2_int(ratio))
rd_word = Signal(log2_int(ratio))
wr_word_r = Signal(log2_int(ratio))
rd_word_r = Signal(log2_int(ratio))
# # #
self.comb += master.connect(slave, omit={"addr", "strb", "data"})
# Address
self.comb += [
slave.aw.addr[slave_align:].eq(master.aw.addr[slave_align:]),
slave.ar.addr[slave_align:].eq(master.ar.addr[slave_align:]),
]
# Data path
wr_cases, rd_cases = {}, {}
for i in range(ratio):
strb_from = i * dw_from//8
strb_to = (i+1) * dw_from//8
data_from = i * dw_from
data_to = (i+1) * dw_from
wr_cases[i] = [
slave.w.strb[strb_from:strb_to].eq(master.w.strb),
slave.w.data[data_from:data_to].eq(master.w.data),
]
rd_cases[i] = [
master.r.data.eq(slave.r.data[data_from:data_to]),
]
# Switch current word based on the last valid master address.
self.sync += If(master.aw.valid, wr_word_r.eq(wr_word))
self.sync += If(master.ar.valid, rd_word_r.eq(rd_word))
self.comb += [
Case(master.aw.valid, {
0: wr_word.eq(wr_word_r),
1: wr_word.eq(master.aw.addr[master_align:slave_align]),
}),
Case(master.ar.valid, {
0: rd_word.eq(rd_word_r),
1: rd_word.eq(master.ar.addr[master_align:slave_align]),
}),
]
self.comb += Case(wr_word, wr_cases)
self.comb += Case(rd_word, rd_cases)
class AXILiteConverter(Module):
"""AXILite data width converter"""
def __init__(self, master, slave):
self.master = master
self.slave = slave
# # #
dw_from = len(master.r.data)
dw_to = len(slave.r.data)
ratio = dw_from/dw_to
if ratio > 1:
self.submodules += AXILiteDownConverter(master, slave)
elif ratio < 1:
self.submodules += AXILiteUpConverter(master, slave)
else:
self.comb += master.connect(slave)
# AXILite Clock Domain Crossing --------------------------------------------------------------------
class AXILiteClockDomainCrossing(Module):
"""AXILite Clock Domain Crossing"""
def __init__(self, master, slave, cd_from="sys", cd_to="sys"):
# Same Clock Domain, direct connection.
if cd_from == cd_to:
self.comb += [
# Write.
master.aw.connect(slave.aw),
master.w.connect(slave.w),
slave.b.connect(master.b),
# Read.
master.ar.connect(slave.ar),
slave.r.connect(master.r),
]
# Clock Domain Crossing.
else:
# Write.
aw_cdc = stream.ClockDomainCrossing(master.aw.description, cd_from, cd_to)
w_cdc = stream.ClockDomainCrossing(master.w.description, cd_from, cd_to)
b_cdc = stream.ClockDomainCrossing(master.b.description, cd_to, cd_from)
self.submodules += aw_cdc, w_cdc, b_cdc
self.comb += [
master.aw.connect(aw_cdc.sink),
aw_cdc.source.connect(slave.aw),
master.w.connect(w_cdc.sink),
w_cdc.source.connect(slave.w),
slave.b.connect(b_cdc.sink),
b_cdc.source.connect(master.b),
]
# Read.
ar_cdc = stream.ClockDomainCrossing(master.ar.description, cd_from, cd_to)
r_cdc = stream.ClockDomainCrossing(master.r.description, cd_to, cd_from)
self.submodules += ar_cdc, r_cdc
self.comb += [
master.ar.connect(ar_cdc.sink),
ar_cdc.source.connect(slave.ar),
slave.r.connect(r_cdc.sink),
r_cdc.source.connect(master.r),
]
# AXILite Timeout ----------------------------------------------------------------------------------
class AXILiteTimeout(Module):
"""Protect master against slave timeouts (master _has_ to respond correctly)"""
def __init__(self, master, cycles):
self.error = Signal()
wr_error = Signal()
rd_error = Signal()
# # #
self.comb += self.error.eq(wr_error | rd_error)
wr_timer = WaitTimer(int(cycles))
rd_timer = WaitTimer(int(cycles))
self.submodules += wr_timer, rd_timer
def channel_fsm(timer, wait_cond, error, response):
fsm = FSM(reset_state="WAIT")
fsm.act("WAIT",
timer.wait.eq(wait_cond),
# done is updated in `sync`, so we must make sure that `ready` has not been issued
# by slave during that single cycle, by checking `timer.wait`.
If(timer.done & timer.wait,
error.eq(1),
NextState("RESPOND")
)
)
fsm.act("RESPOND", *response)
return fsm
self.submodules.wr_fsm = channel_fsm(
timer = wr_timer,
wait_cond = (master.aw.valid & ~master.aw.ready) | (master.w.valid & ~master.w.ready),
error = wr_error,
response = [
master.aw.ready.eq(master.aw.valid),
master.w.ready.eq(master.w.valid),
master.b.valid.eq(~master.aw.valid & ~master.w.valid),
master.b.resp.eq(RESP_SLVERR),
If(master.b.valid & master.b.ready,
NextState("WAIT")
)
])
self.submodules.rd_fsm = channel_fsm(
timer = rd_timer,
wait_cond = master.ar.valid & ~master.ar.ready,
error = rd_error,
response = [
master.ar.ready.eq(master.ar.valid),
master.r.valid.eq(~master.ar.valid),
master.r.resp.eq(RESP_SLVERR),
master.r.data.eq(2**len(master.r.data) - 1),
If(master.r.valid & master.r.ready,
NextState("WAIT")
)
])
# AXILite Interconnect -----------------------------------------------------------------------------
class _AXILiteRequestCounter(Module):
def __init__(self, request, response, max_requests=256):
self.counter = counter = Signal(max=max_requests)
self.full = full = Signal()
self.empty = empty = Signal()
self.stall = stall = Signal()
self.ready = self.empty
self.comb += [
full.eq(counter == max_requests - 1),
empty.eq(counter == 0),
stall.eq(request & full),
]
self.sync += [
If(request & response,
counter.eq(counter)
).Elif(request & ~full,
counter.eq(counter + 1)
).Elif(response & ~empty,
counter.eq(counter - 1)
),
]
class AXILiteInterconnectPointToPoint(Module):
def __init__(self, master, slave):
self.comb += master.connect(slave)
class AXILiteArbiter(Module):
"""AXI Lite arbiter
Arbitrate between master interfaces and connect one to the target. New master will not be
selected until all requests have been responded to. Arbitration for write and read channels is
done separately.
"""
def __init__(self, masters, target):
self.submodules.rr_write = roundrobin.RoundRobin(len(masters), roundrobin.SP_CE)
self.submodules.rr_read = roundrobin.RoundRobin(len(masters), roundrobin.SP_CE)
def get_sig(interface, channel, name):
return getattr(getattr(interface, channel), name)
# Mux master->slave signals
for channel, name, direction in target.layout_flat():
rr = self.rr_write if channel in ["aw", "w", "b"] else self.rr_read
if direction == DIR_M_TO_S:
choices = Array(get_sig(m, channel, name) for m in masters)
self.comb += get_sig(target, channel, name).eq(choices[rr.grant])
# Connect slave->master signals
for channel, name, direction in target.layout_flat():
rr = self.rr_write if channel in ["aw", "w", "b"] else self.rr_read
if direction == DIR_S_TO_M:
source = get_sig(target, channel, name)
for i, m in enumerate(masters):
dest = get_sig(m, channel, name)
if name == "ready":
self.comb += dest.eq(source & (rr.grant == i))
else:
self.comb += dest.eq(source)
# Allow to change rr.grant only after all requests from a master have been responded to.
self.submodules.wr_lock = wr_lock = _AXILiteRequestCounter(
request=target.aw.valid & target.aw.ready, response=target.b.valid & target.b.ready)
self.submodules.rd_lock = rd_lock = _AXILiteRequestCounter(
request=target.ar.valid & target.ar.ready, response=target.r.valid & target.r.ready)
# Switch to next request only if there are no responses pending.
self.comb += [
self.rr_write.ce.eq(~(target.aw.valid | target.w.valid | target.b.valid) & wr_lock.ready),
self.rr_read.ce.eq(~(target.ar.valid | target.r.valid) & rd_lock.ready),
]
# Connect bus requests to round-robin selectors.
self.comb += [
self.rr_write.request.eq(Cat(*[m.aw.valid | m.w.valid | m.b.valid for m in masters])),
self.rr_read.request.eq(Cat(*[m.ar.valid | m.r.valid for m in masters])),
]
class AXILiteDecoder(Module):
"""AXI Lite decoder
Decode master access to particular slave based on its decoder function.
slaves: [(decoder, slave), ...]
List of slaves with address decoders, where `decoder` is a function:
decoder(Signal(address_width - log2(data_width//8))) -> Signal(1)
that returns 1 when the slave is selected and 0 otherwise.
"""
def __init__(self, master, slaves, register=False):
# TODO: unused register argument
addr_shift = log2_int(master.data_width//8)
channels = {
"write": {"aw", "w", "b"},
"read": {"ar", "r"},
}
# Reverse mapping: directions[channel] -> "write"/"read".
directions = {ch: d for d, chs in channels.items() for ch in chs}
def new_slave_sel():
return {"write": Signal(len(slaves)), "read": Signal(len(slaves))}
slave_sel_dec = new_slave_sel()
slave_sel_reg = new_slave_sel()
slave_sel = new_slave_sel()
# We need to hold the slave selected until all responses come back.
# TODO: we could reuse arbiter counters
locks = {
"write": _AXILiteRequestCounter(
request=master.aw.valid & master.aw.ready,
response=master.b.valid & master.b.ready),
"read": _AXILiteRequestCounter(
request=master.ar.valid & master.ar.ready,
response=master.r.valid & master.r.ready),
}
self.submodules += locks.values()
def get_sig(interface, channel, name):
return getattr(getattr(interface, channel), name)
# # #
# Decode slave addresses.
for i, (decoder, bus) in enumerate(slaves):
self.comb += [
slave_sel_dec["write"][i].eq(decoder(master.aw.addr[addr_shift:])),
slave_sel_dec["read"][i].eq(decoder(master.ar.addr[addr_shift:])),
]
# Dhange the current selection only when we've got all responses.
for channel in locks.keys():
self.sync += If(locks[channel].ready, slave_sel_reg[channel].eq(slave_sel_dec[channel]))
# We have to cut the delaying select.
for ch, final in slave_sel.items():
self.comb += If(locks[ch].ready,
final.eq(slave_sel_dec[ch])
).Else(
final.eq(slave_sel_reg[ch])
)
# Connect master->slaves signals except valid/ready.
for i, (_, slave) in enumerate(slaves):
for channel, name, direction in master.layout_flat():
if direction == DIR_M_TO_S:
src = get_sig(master, channel, name)
dst = get_sig(slave, channel, name)
# Mask master control signals depending on slave selection.
if name in ["valid", "ready"]:
src = src & slave_sel[directions[channel]][i]
self.comb += dst.eq(src)
# Connect slave->master signals masking not selected slaves.
for channel, name, direction in master.layout_flat():
if direction == DIR_S_TO_M:
dst = get_sig(master, channel, name)
masked = []
for i, (_, slave) in enumerate(slaves):
src = get_sig(slave, channel, name)
# Mask depending on channel.
mask = Replicate(slave_sel[directions[channel]][i], len(dst))
masked.append(src & mask)
self.comb += dst.eq(reduce(or_, masked))
class AXILiteInterconnectShared(Module):
"""AXI Lite shared interconnect"""
def __init__(self, masters, slaves, register=False, timeout_cycles=1e6):
# TODO: data width
shared = AXILiteInterface()
self.submodules.arbiter = AXILiteArbiter(masters, shared)
self.submodules.decoder = AXILiteDecoder(shared, slaves)
if timeout_cycles is not None:
self.submodules.timeout = AXILiteTimeout(shared, timeout_cycles)
class AXILiteCrossbar(Module):
"""AXI Lite crossbar
MxN crossbar for M masters and N slaves.
"""
def __init__(self, masters, slaves, register=False, timeout_cycles=1e6):
matches, busses = zip(*slaves)
access_m_s = [[AXILiteInterface() for j in slaves] for i in masters] # a[master][slave]
access_s_m = list(zip(*access_m_s)) # a[slave][master]
# Decode each master into its access row.
for slaves, master in zip(access_m_s, masters):
slaves = list(zip(matches, slaves))
self.submodules += AXILiteDecoder(master, slaves, register)
# Arbitrate each access column onto its slave.
for masters, bus in zip(access_s_m, busses):
self.submodules += AXILiteArbiter(masters, bus)
|
py | b405344adff19308cc3661ea00344c45d725e1cc | import copy
import textwrap
import unittest
from typing import Any, Dict # noqa: F401
from mock import patch, MagicMock
from neo4j.v1 import GraphDatabase
from metadata_service import create_app
from metadata_service.entity.popular_table import PopularTable
from metadata_service.entity.table_detail import (Application, Column, Table, Tag,
Watermark, Source, Statistics, User)
from metadata_service.entity.tag_detail import TagDetail
from metadata_service.exception import NotFoundException
from metadata_service.proxy.neo4j_proxy import Neo4jProxy
from metadata_service.util import UserResourceRel
class TestNeo4jProxy(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app(config_module_class='metadata_service.config.LocalConfig')
self.app_context = self.app.app_context()
self.app_context.push()
table_entry = {'db': {'name': 'hive'},
'clstr': {
'name': 'gold'},
'schema': {
'name': 'foo_schema'},
'tbl': {
'name': 'foo_table'},
'tbl_dscrpt': {
'description': 'foo description'}
}
col1 = copy.deepcopy(table_entry) # type: Dict[Any, Any]
col1['col'] = {'name': 'bar_id_1',
'type': 'varchar',
'sort_order': 0}
col1['col_dscrpt'] = {'description': 'bar col description'}
col1['col_stats'] = [{'stat_name': 'avg', 'start_epoch': 1, 'end_epoch': 1, 'stat_val': '1'}]
col2 = copy.deepcopy(table_entry) # type: Dict[Any, Any]
col2['col'] = {'name': 'bar_id_2',
'type': 'bigint',
'sort_order': 1}
col2['col_dscrpt'] = {'description': 'bar col2 description'}
col2['col_stats'] = [{'stat_name': 'avg', 'start_epoch': 2, 'end_epoch': 2, 'stat_val': '2'}]
table_level_results = MagicMock()
table_level_results.single.return_value = {
'wmk_records': [
{
'key': 'hive://gold.test_schema/test_table/high_watermark/',
'partition_key': 'ds',
'partition_value': 'fake_value',
'create_time': 'fake_time',
},
{
'key': 'hive://gold.test_schema/test_table/low_watermark/',
'partition_key': 'ds',
'partition_value': 'fake_value',
'create_time': 'fake_time',
}
],
'application': {
'application_url': 'airflow_host/admin/airflow/tree?dag_id=test_table',
'description': 'DAG generating a table',
'name': 'Airflow',
'id': 'dag/task_id'
},
'last_updated_timestamp': 1,
'owner_records': [
{
'key': '[email protected]',
'email': '[email protected]'
}
],
'tag_records': [
{
'key': 'test',
'tag_type': 'default'
}
],
'src': {
'source': '/source_file_loc',
'key': 'some key',
'source_type': 'github'
}
}
table_writer = {
'application_url': 'airflow_host/admin/airflow/tree?dag_id=test_table',
'description': 'DAG generating a table',
'name': 'Airflow',
'id': 'dag/task_id'
}
last_updated_timestamp = '01'
self.col_usage_return_value = [
col1,
col2
]
self.table_level_return_value = table_level_results
self.table_writer = table_writer
self.last_updated_timestamp = last_updated_timestamp
def tearDown(self) -> None:
pass
def test_get_table(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = [self.col_usage_return_value, [], self.table_level_return_value]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table = neo4j_proxy.get_table(table_uri='dummy_uri')
expected = Table(database='hive', cluster='gold', schema='foo_schema', name='foo_table',
tags=[Tag(tag_name='test', tag_type='default')],
table_readers=[], description='foo description',
watermarks=[Watermark(watermark_type='high_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time'),
Watermark(watermark_type='low_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time')],
columns=[Column(name='bar_id_1', description='bar col description', col_type='varchar',
sort_order=0, stats=[Statistics(start_epoch=1,
end_epoch=1,
stat_type='avg',
stat_val='1')]),
Column(name='bar_id_2', description='bar col2 description', col_type='bigint',
sort_order=1, stats=[Statistics(start_epoch=2,
end_epoch=2,
stat_type='avg',
stat_val='2')])],
owners=[User(email='[email protected]')],
table_writer=Application(application_url=self.table_writer['application_url'],
description=self.table_writer['description'],
name=self.table_writer['name'],
id=self.table_writer['id']),
last_updated_timestamp=1,
source=Source(source='/source_file_loc',
source_type='github'),
is_view=False)
self.assertEqual(str(expected), str(table))
def test_get_table_view_only(self) -> None:
col_usage_return_value = copy.deepcopy(self.col_usage_return_value)
for col in col_usage_return_value:
col['tbl']['is_view'] = True
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = [col_usage_return_value, [], self.table_level_return_value]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table = neo4j_proxy.get_table(table_uri='dummy_uri')
expected = Table(database='hive', cluster='gold', schema='foo_schema', name='foo_table',
tags=[Tag(tag_name='test', tag_type='default')],
table_readers=[], description='foo description',
watermarks=[Watermark(watermark_type='high_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time'),
Watermark(watermark_type='low_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time')],
columns=[Column(name='bar_id_1', description='bar col description', col_type='varchar',
sort_order=0, stats=[Statistics(start_epoch=1,
end_epoch=1,
stat_type='avg',
stat_val='1')]),
Column(name='bar_id_2', description='bar col2 description', col_type='bigint',
sort_order=1, stats=[Statistics(start_epoch=2,
end_epoch=2,
stat_type='avg',
stat_val='2')])],
owners=[User(email='[email protected]')],
table_writer=Application(application_url=self.table_writer['application_url'],
description=self.table_writer['description'],
name=self.table_writer['name'],
id=self.table_writer['id']),
last_updated_timestamp=1,
source=Source(source='/source_file_loc',
source_type='github'),
is_view=True)
self.assertEqual(str(expected), str(table))
def test_get_table_with_valid_description(self) -> None:
"""
Test description is returned for table
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = dict(description='sample description')
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_table_description(table_uri='test_table')
table_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=table_description_query,
param_dict={'tbl_key': 'test_table'})
self.assertEquals(table_description, 'sample description')
def test_get_table_with_no_description(self) -> None:
"""
Test None is returned for table with no description
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_table_description(table_uri='test_table')
table_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=table_description_query,
param_dict={'tbl_key': 'test_table'})
self.assertIsNone(table_description)
def test_put_table_description(self) -> None:
"""
Test updating table description
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.put_table_description(table_uri='test_table',
description='test_description')
self.assertEquals(mock_run.call_count, 2)
self.assertEquals(mock_commit.call_count, 1)
def test_get_column_with_valid_description(self) -> None:
"""
Test description is returned for column
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = dict(description='sample description')
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
col_description = neo4j_proxy.get_column_description(table_uri='test_table',
column_name='test_column')
column_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:COLUMN]->(c:Column {name: $column_name})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=column_description_query,
param_dict={'tbl_key': 'test_table',
'column_name': 'test_column'})
self.assertEquals(col_description, 'sample description')
def test_get_column_with_no_description(self) -> None:
"""
Test None is returned for column with no description
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
col_description = neo4j_proxy.get_column_description(table_uri='test_table',
column_name='test_column')
column_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:COLUMN]->(c:Column {name: $column_name})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=column_description_query,
param_dict={'tbl_key': 'test_table',
'column_name': 'test_column'})
self.assertIsNone(col_description)
def test_put_column_description(self) -> None:
"""
Test updating column description
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.put_column_description(table_uri='test_table',
column_name='test_column',
description='test_description')
self.assertEquals(mock_run.call_count, 2)
self.assertEquals(mock_commit.call_count, 1)
def test_add_owner(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_owner(table_uri='dummy_uri',
owner='tester')
# we call neo4j twice in add_owner call
self.assertEquals(mock_run.call_count, 2)
self.assertEquals(mock_commit.call_count, 1)
def test_delete_owner(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_owner(table_uri='dummy_uri',
owner='tester')
# we only call neo4j once in delete_owner call
self.assertEquals(mock_run.call_count, 1)
self.assertEquals(mock_commit.call_count, 1)
def test_add_tag(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_tag(table_uri='dummy_uri',
tag='hive')
# we call neo4j twice in add_tag call
self.assertEquals(mock_run.call_count, 3)
self.assertEquals(mock_commit.call_count, 1)
def test_delete_tag(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_tag(table_uri='dummy_uri',
tag='hive')
# we only call neo4j once in delete_tag call
self.assertEquals(mock_run.call_count, 1)
self.assertEquals(mock_commit.call_count, 1)
def test_get_tags(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{'tag_name': {'key': 'tag1'}, 'tag_count': 2},
{'tag_name': {'key': 'tag2'}, 'tag_count': 1}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_tags()
expected = [
TagDetail(tag_name='tag1', tag_count=2),
TagDetail(tag_name='tag2', tag_count=1),
]
self.assertEqual(actual.__repr__(), expected.__repr__())
def test_get_neo4j_latest_updated_ts(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = {
'ts': {
'latest_timestmap': '1000'
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertEquals(neo4j_last_updated_ts, '1000')
mock_execute.return_value.single.return_value = {
'ts': {
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertEqual(neo4j_last_updated_ts, 0)
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertIsNone(neo4j_last_updated_ts)
def test_get_popular_tables(self) -> None:
# Test cache hit
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [{'table_key': 'foo'}, {'table_key': 'bar'}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertEqual(neo4j_proxy._get_popular_tables_uris(2), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_popular_tables_uris(2), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_popular_tables_uris(2), ['foo', 'bar'])
self.assertEquals(mock_execute.call_count, 1)
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{'database_name': 'db', 'cluster_name': 'clstr', 'schema_name': 'sch', 'table_name': 'foo',
'table_description': 'test description'},
{'database_name': 'db', 'cluster_name': 'clstr', 'schema_name': 'sch', 'table_name': 'bar'}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_popular_tables(num_entries=2)
expected = [
PopularTable(database='db', cluster='clstr', schema='sch', name='foo', description='test description'),
PopularTable(database='db', cluster='clstr', schema='sch', name='bar'),
]
self.assertEqual(actual.__repr__(), expected.__repr__())
def test_get_users(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = {
'user_record': {
'employee_type': 'teamMember',
'full_name': 'test_full_name',
'is_active': 'True',
'github_username': 'test-github',
'slack_id': 'test_id',
'last_name': 'test_last_name',
'first_name': 'test_first_name',
'team_name': 'test_team',
'email': 'test_email',
},
'manager_record': {
'full_name': 'test_manager_fullname'
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_user = neo4j_proxy.get_user_detail(user_id='test_email')
self.assertEquals(neo4j_user.email, 'test_email')
def test_get_resources_by_user_relation(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{
'tbl': {
'name': 'table_name'
},
'db': {
'name': 'db_name'
},
'clstr': {
'name': 'cluster'
},
'schema': {
'name': 'schema'
},
}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
result = neo4j_proxy.get_table_by_user_relation(user_email='test_user',
relation_type=UserResourceRel.follow)
self.assertEqual(len(result['table']), 1)
self.assertEqual(result['table'][0].name, 'table_name')
self.assertEqual(result['table'][0].database, 'db_name')
self.assertEqual(result['table'][0].cluster, 'cluster')
self.assertEqual(result['table'][0].schema, 'schema')
def test_add_resource_relation_by_user(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_table_relation_by_user(table_uri='dummy_uri',
user_email='tester',
relation_type=UserResourceRel.follow)
self.assertEquals(mock_run.call_count, 2)
self.assertEquals(mock_commit.call_count, 1)
def test_delete_resource_relation_by_user(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_table_relation_by_user(table_uri='dummy_uri',
user_email='tester',
relation_type=UserResourceRel.follow)
self.assertEquals(mock_run.call_count, 1)
self.assertEquals(mock_commit.call_count, 1)
def test_get_invalid_user(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertRaises(NotFoundException, neo4j_proxy.get_user_detail, user_id='invalid_email')
if __name__ == '__main__':
unittest.main()
|
py | b4053581d274eacf06eacc3d26ee4485eff8288c | __author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '6/30/2020 10:34 AM'
"""
https://zhuanlan.zhihu.com/p/69858335
"""
def topoSort(graph):
in_degrees = dict((u,0) for u in graph) #初始化所有顶点入度为0
num = len(in_degrees)
for u in graph:
for v in graph[u]:
in_degrees[v] += 1 #计算每个顶点的入度
Q = [u for u in in_degrees if in_degrees[u] == 0] # 筛选入度为0的顶点
Seq = []
while Q:
u = Q.pop() #默认从最后一个删除
Seq.append(u)
for v in graph[u]:
in_degrees[v] -= 1 #移除其所有出边
if in_degrees[v] == 0:
Q.append(v) #再次筛选入度为0的顶点
if len(Seq) == num: #输出的顶点数是否与图中的顶点数相等
return Seq
else:
return None
G = {
'a':'bf',
'b':'cdf',
'c':'d',
'd':'ef',
'e':'f',
'f':''
}
print(topoSort(G)) |
py | b405372da2327e7d0a6ff63445ba6e9bf4304f55 | import setuptools
import os
from setuptools import setup
loc = os.path.dirname(os.path.abspath(__file__))
with open(loc + '/README.md') as readme:
info = readme.read()
with open(loc + '/requirements.txt') as f:
required = f.read().splitlines()
setup(
name='dadvisor',
version='1.0',
description=info,
author='Patrick Vogel',
author_email='[email protected]',
packages=setuptools.find_packages(),
install_requires=required,
url='https://github.com/dadvisor/core',
) |
py | b40537ae34104e2a74449e52203a6b6a602983f9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .dnc import DNC
from .ffsam import FFSAM
from .sdnc import SDNC
from .sam import SAM
from .memory import Memory
from .sparse_memory import SparseMemory
from .sparse_temporal_memory import SparseTemporalMemory
|
py | b40538fc0f11b678d5a99f36e81f91f85fc1efc4 | def exchange(array, index):
current_list1 = []
current_list2 = []
current_list = []
if int(index) < 0 or int(index) > (len(array) - 1):
print("Invalid index")
return array
elif int(index) < (len(array) - 1):
current_list1 = array[int(index) + 1:]
current_list2 = array[:int(index) + 1]
current_list = current_list1 + current_list2
return current_list
elif int(index) == (len(array) - 1):
current_list = array
return current_list
def max_min_even_odd(array, number_type, type_):
list_elements = []
max_number = 0
min_number = 1001
is_valid = False
for index, ele in enumerate(array, 0):
if number_type == 'even':
if type_ == "max":
if int(ele) % 2 == 0 and max_number <= int(ele):
max_number = int(ele)
list_elements.append(int(index))
is_valid = True
elif type_ == "min":
if int(ele) % 2 == 0 and min_number >= int(ele):
min_number = int(ele)
list_elements.append(int(index))
is_valid = True
elif number_type == 'odd':
if type_ == "max":
if int(ele) % 2 != 0 and max_number <= int(ele):
max_number = int(ele)
list_elements.append(int(index))
is_valid = True
elif type_ == "min":
if int(ele) % 2 != 0 and min_number >= int(ele):
min_number = int(ele)
list_elements.append(int(index))
is_valid = True
if not is_valid:
return print('No matches')
elif type_ == "max":
max_ = max(list_elements)
return print(max_)
elif type_ == "min":
min_ = max(list_elements)
return print(min_)
def first_last_even_odd(array, type_, count, num_type):
element_list = []
len_arr = len(array)
if int(count) > len(array):
return print('Invalid count')
for ele in array:
if num_type == 'even':
if type_ == "first":
if int(ele) % 2 == 0 and len(element_list) <= int(count) - 1:
element_list.append(int(ele))
elif type_ == "last":
if int(ele) % 2 == 0:
element_list.append(int(ele))
elif num_type == 'odd':
if type_ == "first":
if int(ele) % 2 != 0 and len(element_list) <= int(count) - 1:
element_list.append(int(ele))
elif type_ == "last":
if int(ele) % 2 != 0:
element_list.append(int(ele))
if type_ == 'last' and len(element_list) >= int(count):
last_list = element_list[len(element_list) - int(count):]
return print(last_list)
else:
return print(element_list)
input_array = input().split()
command = input()
current_list = []
while command != 'end':
current_command = command.split()
if current_command[0] == 'exchange':
current_list = exchange(input_array, current_command[1])
input_array = current_list
elif current_command[0] in ['max', 'min']:
max_min_even_odd(input_array, current_command[1], current_command[0])
elif current_command[0] in ['first', 'last']:
first_last_even_odd(input_array, current_command[0], current_command[1], current_command[2])
command = input()
final_list = [int(i) for i in current_list]
print(final_list)
|
py | b4053ad68b98f716bda6da4d34eae1c9ba34293c | import requests
from appcfg import get_config
from gatekeeper.api.auth import adminOnly
from gatekeeper.exceptions import InternalServerError, PluginNotFoundError
config = get_config(__name__)
PLUGINMANAGER_API_URL = config["apis"]["pluginmanager"]
def _getPluginById(id):
response = requests.get(PLUGINMANAGER_API_URL + "/" + id)
if response.status_code == 404:
raise PluginNotFoundError(id)
plugin = response.json()
# Map field names
fieldNameMap = {
"uuid": "id",
"last_heartbeat_at": "lastHeartbeatAt",
"registered_at": "registeredAt",
}
for originalFieldName, newFieldName in fieldNameMap.items():
plugin[newFieldName] = plugin.pop(originalFieldName)
return plugin
@adminOnly
def getPlugins():
pluginIds = requests.get(PLUGINMANAGER_API_URL).json()
return [_getPluginById(id) for id in pluginIds]
@adminOnly
def getPluginById(id):
return _getPluginById(id)
def _validateRequestStatus(status, pluginId):
"""
Depending on a plugin manager response status code, either raise an appropriate
exception or
return.
"""
if status != 200:
if status == 404:
raise PluginNotFoundError(pluginId)
raise InternalServerError(
detail="The plugin manager replied with status code '{:d}'".format(status)
)
@adminOnly
def shutdownPluginById(id):
status = requests.delete(PLUGINMANAGER_API_URL + "/" + id).status_code
_validateRequestStatus(status, id)
@adminOnly
def changePluginStateById(id, body):
status = requests.put(
"{}/{}/lifecycle".format(PLUGINMANAGER_API_URL, id),
json={"target_state": body["targetState"]},
).status_code
_validateRequestStatus(status, id)
|
py | b4053b7777cfbdc0dd4b8bc93525c9b79ceb4853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Tomasz Czaja'
__version__ = '0.0.1'
import sys
import time
from pathlib import Path
import signal
import RPi.GPIO as GPIO
from PIL import Image, ImageDraw, ImageFont
from ST7789 import ST7789
from audioplayer import AudioPlayer
class RfidJukebox(object):
# Hardcoded list of files
FILES = {
'3373707988': "07. Dans Les Jardins de Baya.mp3",
'1': "01. Awaya Baka.mp3",
'2': "02. Braighe Locheil (The Brais of Loch Eil).mp3"
}
SPI_SPEED_MHZ = 80
_st7789 = ST7789(
rotation=90, # Needed to display the right way up on Pirate Audio
port=0, # SPI port
cs=1, # SPI port Chip-select channel
dc=9, # BCM pin used for data/command
backlight=13,
spi_speed_hz=SPI_SPEED_MHZ * 1000 * 1000
)
# The buttons on Pirate Audio are connected to pins 5, 6, 16 and 24
# Boards prior to 23 January 2020 used 5, 6, 16 and 20
# try changing 24 to 20 if your Y button doesn't work.
BUTTONS = [5, 6, 16, 24]
# These correspond to buttons A, B, X and Y respectively
LABELS = ['A', 'B', 'X', 'Y']
# Stuff for drawing on screen
_image = None
_draw = None
_font = None
@property
def font(self):
return self._font
@font.setter
def font(self, new_font):
self._font = new_font
# Player settings
_last_selected_key = None
_min_volume = 0
_max_volume = 100
_volume = 50
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, new_volume):
self._volume = new_volume
_player = None
@property
def player(self):
return self._player
def __init__(self):
"""
Init the _player
:return: void
"""
# Set up RPi.GPIO with the "BCM" numbering scheme
GPIO.setmode(GPIO.BCM)
# Buttons connect to ground when pressed, so we should set them up
# with a "PULL UP", which weakly pulls the input signal to 3.3V.
GPIO.setup(self.BUTTONS, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Loop through out buttons and attach the "handle_button" function to each
# We're watching the "FALLING" edge (transition from 3.3V to Ground) and
# picking a generous bouncetime of 100ms to smooth out button presses.
for pin in self.BUTTONS:
GPIO.add_event_detect(pin, GPIO.FALLING, self._handle_button, bouncetime=100)
# Get initial value - first in the dictionary
self._last_selected_key = list(self.FILES.keys())[0]
# Set image and draw objects
self._image = Image.new("RGB", (240, 240), (0, 0, 0))
self._draw = ImageDraw.Draw(self._image)
# Set font type and size
self._font = ImageFont.truetype("/home/pi/Fonts/FreeMono.ttf", 42)
# Draw default background
self._draw_background()
label_length = self._font.getsize('version')[0]
label_x_pos = int(round(240 / 2 - label_length / 2))
self._draw.text((label_x_pos, 100), 'version', font=self.font, fill=(255, 255, 255, 255))
label_length = self._font.getsize(str(__version__))[0]
label_x_pos = int(round(240 / 2 - label_length / 2))
self._draw.text((label_x_pos, 135), __version__, font=self.font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
def _get_previous_key(self):
temp = list(self.FILES.keys())
try:
key = temp[temp.index(self._last_selected_key) - 1]
except (ValueError, IndexError):
# If no more keys - use last one - this is probably obsolete
key = temp[-1]
return key
def _get_next_key(self):
temp = list(self.FILES.keys())
try:
key = temp[temp.index(self._last_selected_key) + 1]
except (ValueError, IndexError):
# If no more keys - use first one
key = temp[0]
return key
def _draw_background(self):
self._draw.rectangle((0, 0, 240, 240), (0, 0, 0)) # Draw background
# Draw related _image if exists
if self._player and self._last_selected_key:
picture = Path(f'/home/pi/Pictures/{self._last_selected_key}.jpg')
if picture.is_file():
with Image.open(str(picture)) as im:
im_resized = im.resize((240, 240))
self._image.paste(im_resized)
# Left navigation button
self._draw.polygon([(25, 20), (10, 30), (25, 40)], fill=(0x60, 0x60, 0x60), outline=(255, 255, 255))
self._draw.polygon([(40, 20), (25, 30), (40, 40)], fill=(0x60, 0x60, 0x60), outline=(255, 255, 255))
# Right navigation button
self._draw.polygon([(240 - 25, 20), (240 - 10, 30), (240 - 25, 40)], fill=(0x60, 0x60, 0x60),
outline=(255, 255, 255))
self._draw.polygon([(240 - 40, 20), (240 - 25, 30), (240 - 40, 40)], fill=(0x60, 0x60, 0x60),
outline=(255, 255, 255))
def play_song(self, key):
if key in self.FILES:
audio_file = Path(f'/home/pi/Music/{self.FILES[key]}')
if audio_file.is_file():
# Stop _player if running
if self._player:
self._player.stop()
# Play audio file
print(f"Playing {audio_file.name} ({key})")
self._last_selected_key = key
self._draw_background()
self._draw.text((100, 140), str(key), font=self.font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
self._player = AudioPlayer(f"Music/{audio_file.name}")
self._player.volume = self._volume
self._player.play()
def play_next_song(self):
next_key = self._get_next_key()
self.play_song(next_key)
def play_previous_song(self):
prev_key = self._get_previous_key()
self.play_song(prev_key)
def _draw_volume_indicators(self, new_volume):
self._draw_background()
label_length = self._font.getsize(str(new_volume))[0]
label_x_pos = int(round(240 / 2 - label_length / 2)) # Volume label start pos
self._draw.text((label_x_pos, 140), str(new_volume), font=self._font,
fill=(255, 255, 255, 255)) # Draw _volume label
volume_bar_x = int(round(10 + (220 * new_volume / self._max_volume)))
self._draw.rectangle((10, 200, volume_bar_x, 210), (0x30, 0x30, 0x30)) # Draw _volume bar
self._st7789.display(self._image)
# "handle_button" will be called every time a button is pressed
# It receives one argument: the associated input pin.
def _handle_button(self, pin):
label = self.LABELS[self.BUTTONS.index(pin)]
print("Button press detected on pin: {} label: {}".format(pin, label))
if label == 'B':
# Decrease volume
new_volume = self._volume - 10
if new_volume < self._min_volume:
new_volume = self._min_volume
self._volume = new_volume # Store _volume for new instances of _player
# Draw value and _volume bar
self._draw_volume_indicators(new_volume)
# Set new volume for player
if self._player:
self._player.volume = new_volume
elif label == 'Y':
# Increase volume
new_volume = self._volume + 10
if new_volume > self._max_volume:
new_volume = self._max_volume
self._volume = new_volume # Store _volume for new instances of _player
# Draw value and _volume bar
self._draw_volume_indicators(new_volume)
# Set new volume for player
if self._player:
self._player.volume = new_volume
elif label == 'A':
# Play previous song
self.play_previous_song()
message = "Prev song"
self._draw_background()
self._draw.text((10, 140), message, font=self._font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
elif label == 'X':
# Play next song
self.play_next_song()
message = "Next song"
self._draw_background()
self._draw.text((10, 140), message, font=self._font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
jukebox = RfidJukebox()
while True:
try:
value = input("Enter song key:\n")
if value.isdigit():
jukebox.play_song(value)
time.sleep(0.3)
except KeyboardInterrupt:
if jukebox.player:
jukebox.player.stop()
print("Bye")
sys.exit()
|
py | b4053c0004601d1fb437338d6cad3236528912e4 | """Test structuring of collections and primitives."""
from typing import Any, Dict, FrozenSet, List, MutableSet, Optional, Set, Tuple, Union
import attr
from hypothesis import assume, given
from hypothesis.strategies import (
binary,
booleans,
data,
floats,
frozensets,
integers,
just,
lists,
one_of,
sampled_from,
sets,
text,
tuples,
)
from pytest import raises
from cattrs import BaseConverter
from cattrs._compat import copy_with, is_bare, is_union_type
from cattrs.errors import IterableValidationError, StructureHandlerNotFoundError
from ._compat import change_type_param
from .untyped import (
dicts_of_primitives,
enums_of_primitives,
lists_of_primitives,
primitive_strategies,
seqs_of_primitives,
)
NoneType = type(None)
ints_and_type = tuples(integers(), just(int))
floats_and_type = tuples(floats(allow_nan=False), just(float))
strs_and_type = tuples(text(), just(str))
bytes_and_type = tuples(binary(), just(bytes))
primitives_and_type = one_of(
ints_and_type, floats_and_type, strs_and_type, bytes_and_type
)
mut_set_types = sampled_from([Set, MutableSet])
set_types = one_of(mut_set_types, just(FrozenSet))
def create_generic_type(generic_types, param_type):
"""Create a strategy for generating parameterized generic types."""
return one_of(
generic_types,
generic_types.map(lambda t: t[Any]),
generic_types.map(lambda t: t[param_type]),
)
mut_sets_of_primitives = primitive_strategies.flatmap(
lambda e: tuples(sets(e[0]), create_generic_type(mut_set_types, e[1]))
)
frozen_sets_of_primitives = primitive_strategies.flatmap(
lambda e: tuples(frozensets(e[0]), create_generic_type(just(FrozenSet), e[1]))
)
sets_of_primitives = one_of(mut_sets_of_primitives, frozen_sets_of_primitives)
@given(primitives_and_type)
def test_structuring_primitives(primitive_and_type):
"""Test just structuring a primitive value."""
converter = BaseConverter()
val, t = primitive_and_type
assert converter.structure(val, t) == val
assert converter.structure(val, Any) == val
@given(seqs_of_primitives)
def test_structuring_seqs(seq_and_type):
"""Test structuring sequence generic types."""
converter = BaseConverter()
iterable, t = seq_and_type
converted = converter.structure(iterable, t)
for x, y in zip(iterable, converted):
assert x == y
@given(sets_of_primitives, set_types)
def test_structuring_sets(set_and_type, set_type):
"""Test structuring generic sets."""
converter = BaseConverter()
set_, input_set_type = set_and_type
if input_set_type not in (Set, FrozenSet, MutableSet):
set_type = set_type[input_set_type.__args__[0]]
converted = converter.structure(set_, set_type)
assert converted == set_
# Set[int] can't be used with isinstance any more.
non_generic = set_type.__origin__ if set_type.__origin__ is not None else set_type
assert isinstance(converted, non_generic)
converted = converter.structure(set_, Any)
assert converted == set_
assert isinstance(converted, type(set_))
@given(sets_of_primitives)
def test_stringifying_sets(set_and_type):
"""Test structuring generic sets and converting the contents to str."""
converter = BaseConverter()
set_, input_set_type = set_and_type
if is_bare(input_set_type):
input_set_type = input_set_type[str]
else:
input_set_type = copy_with(input_set_type, str)
converted = converter.structure(set_, input_set_type)
assert len(converted) == len(set_)
for e in set_:
assert str(e) in converted
@given(lists(primitives_and_type, min_size=1), booleans())
def test_structuring_hetero_tuples(list_of_vals_and_types, detailed_validation):
"""Test structuring heterogenous tuples."""
converter = BaseConverter(detailed_validation=detailed_validation)
types = tuple(e[1] for e in list_of_vals_and_types)
vals = [e[0] for e in list_of_vals_and_types]
t = Tuple[types]
converted = converter.structure(vals, t)
assert isinstance(converted, tuple)
for x, y in zip(vals, converted):
assert x == y
for x, y in zip(types, converted):
assert isinstance(y, x)
@given(lists(primitives_and_type))
def test_stringifying_tuples(list_of_vals_and_types):
"""Stringify all elements of a heterogeneous tuple."""
converter = BaseConverter()
vals = [e[0] for e in list_of_vals_and_types]
t = Tuple[(str,) * len(list_of_vals_and_types)]
converted = converter.structure(vals, t)
assert isinstance(converted, tuple)
for x, y in zip(vals, converted):
assert str(x) == y
for x in converted:
# this should just be unicode, but in python2, '' is not unicode
assert isinstance(x, str)
@given(dicts_of_primitives)
def test_structuring_dicts(dict_and_type):
converter = BaseConverter()
d, t = dict_and_type
converted = converter.structure(d, t)
assert converted == d
assert converted is not d
@given(dicts_of_primitives, data())
def test_structuring_dicts_opts(dict_and_type, data):
"""Structure dicts, but with optional primitives."""
converter = BaseConverter()
d, t = dict_and_type
assume(not is_bare(t))
t.__args__ = (t.__args__[0], Optional[t.__args__[1]])
d = {k: v if data.draw(booleans()) else None for k, v in d.items()}
converted = converter.structure(d, t)
assert converted == d
assert converted is not d
@given(dicts_of_primitives)
def test_stringifying_dicts(dict_and_type):
converter = BaseConverter()
d, t = dict_and_type
converted = converter.structure(d, Dict[str, str])
for k, v in d.items():
assert converted[str(k)] == str(v)
@given(primitives_and_type)
def test_structuring_optional_primitives(primitive_and_type):
"""Test structuring Optional primitive types."""
converter = BaseConverter()
val, type = primitive_and_type
assert converter.structure(val, Optional[type]) == val
assert converter.structure(None, Optional[type]) is None
@given(lists_of_primitives().filter(lambda lp: not is_bare(lp[1])), booleans())
def test_structuring_lists_of_opt(list_and_type, detailed_validation: bool):
"""Test structuring lists of Optional primitive types."""
converter = BaseConverter(detailed_validation=detailed_validation)
l, t = list_and_type
l.append(None)
args = t.__args__
is_optional = args[0] is Optional or (
is_union_type(args[0])
and len(args[0].__args__) == 2
and args[0].__args__[1] is NoneType
)
if not is_bare(t) and (args[0] not in (Any, str) and not is_optional):
with raises(
(TypeError, ValueError)
if not detailed_validation
else IterableValidationError
):
converter.structure(l, t)
optional_t = Optional[args[0]]
# We want to create a generic type annotation with an optional
# type parameter.
t = change_type_param(t, optional_t)
converted = converter.structure(l, t)
for x, y in zip(l, converted):
assert x == y
t.__args__ = args
@given(lists_of_primitives())
def test_stringifying_lists_of_opt(list_and_type):
"""Test structuring Optional primitive types into strings."""
converter = BaseConverter()
l, t = list_and_type
l.append(None)
converted = converter.structure(l, List[Optional[str]])
for x, y in zip(l, converted):
if x is None:
assert x is y
else:
assert str(x) == y
@given(lists(integers()))
def test_structuring_primitive_union_hook(ints):
"""Registering a union loading hook works."""
converter = BaseConverter()
def structure_hook(val, cl):
"""Even ints are passed through, odd are stringified."""
return val if val % 2 == 0 else str(val)
converter.register_structure_hook(Union[str, int], structure_hook)
converted = converter.structure(ints, List[Union[str, int]])
for x, y in zip(ints, converted):
if x % 2 == 0:
assert x == y
else:
assert str(x) == y
def test_structure_hook_func():
"""testing the hook_func method"""
converter = BaseConverter()
def can_handle(cls):
return cls.__name__.startswith("F")
def handle(obj, cls):
return "hi"
class Foo(object):
pass
class Bar(object):
pass
converter.register_structure_hook_func(can_handle, handle)
assert converter.structure(10, Foo) == "hi"
with raises(StructureHandlerNotFoundError) as exc:
converter.structure(10, Bar)
assert exc.value.type_ is Bar
@given(data(), enums_of_primitives())
def test_structuring_enums(data, enum):
"""Test structuring enums by their values."""
converter = BaseConverter()
val = data.draw(sampled_from(list(enum)))
assert converter.structure(val.value, enum) == val
def test_structuring_unsupported():
"""Loading unsupported classes should throw."""
converter = BaseConverter()
with raises(StructureHandlerNotFoundError) as exc:
converter.structure(1, BaseConverter)
assert exc.value.type_ is BaseConverter
with raises(StructureHandlerNotFoundError) as exc:
converter.structure(1, Union[int, str])
assert exc.value.type_ is Union[int, str]
def test_subclass_registration_is_honored():
"""If a subclass is registered after a superclass,
that subclass handler should be dispatched for
structure
"""
converter = BaseConverter()
class Foo(object):
def __init__(self, value):
self.value = value
class Bar(Foo):
pass
converter.register_structure_hook(Foo, lambda obj, cls: cls("foo"))
assert converter.structure(None, Foo).value == "foo"
assert converter.structure(None, Bar).value == "foo"
converter.register_structure_hook(Bar, lambda obj, cls: cls("bar"))
assert converter.structure(None, Foo).value == "foo"
assert converter.structure(None, Bar).value == "bar"
def test_structure_union_edge_case():
converter = BaseConverter()
@attr.s(auto_attribs=True)
class A:
a1: Any
a2: Optional[Any] = None
@attr.s(auto_attribs=True)
class B:
b1: Any
b2: Optional[Any] = None
assert converter.structure([{"a1": "foo"}, {"b1": "bar"}], List[Union[A, B]]) == [
A("foo"),
B("bar"),
]
|
py | b4053c658d7c656b45e676d63152f99a1dd334c4 | ## The path where the dataset is present
DATASET_PATH = './datasets/cell_images/'
## The path to store the train images
TRAIN_PATH = './datasets/cimages_train'
## The path to store the validation images
VAL_PATH = './datasets/cimages_val'
## The path to store the test images
TEST_PATH = './datasets/cimages_test'
## The ratio of data to go into training set
TRAIN_SPLIT = 0.8
## The ratio of data to go into validation set
VAL_SPLIT = 0.1
## The number of epochs
EPOCHS = 50
## The learning rate
LR = 1e-1
## The default batch size
BS = 64
|
py | b4053e6a49a2d2dda8ef7eb7bf5c30050de097fd | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.accelerated_shift_net.accelerated_InnerShiftTriple import AcceleratedInnerShiftTriple
from models.shift_net.InnerCos import InnerCos
from models.shift_net.InnerShiftTriple import InnerShiftTriple
from models.soft_shift_net.innerSoftShiftTriple import InnerSoftShiftTriple
from .unet import UnetSkipConnectionBlock
from .modules import *
################################### *************************** #####################################
################################### This the original Shift_net #####################################
################################### *************************** #####################################
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_shift_block = UnetSkipConnectionShiftTriple(ngf * 2, ngf * 4, opt, innerCos_list, shift_list, mask_global, input_nc=None, \
submodule=unet_block, norm_layer=norm_layer) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Mention: the TripleBlock differs in `upconv` defination.
# 'cos' means that we add a `innerCos` layer in the block.
class UnetSkipConnectionShiftTriple(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionShiftTriple, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = InnerShiftTriple(opt.fixed_mask, opt.shift_sz, opt.stride, opt.mask_thred, opt.triple_weight)
shift.set_mask(mask_global, 3)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCos = InnerCos(strength=opt.strength, skip=opt.skip)
innerCos.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCos)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
# shift triple differs in here. It is `*3` not `*2`.
upconv = nn.ConvTranspose2d(inner_nc * 3, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
# shift should be placed after uprelu
# NB: innerCos are placed before shift. So need to add the latent gredient to
# to former part.
up = [uprelu, innerCos, shift, innerCos, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
################################### *************************** #####################################
################################### This the accelerated Shift_net #####################################
################################### *************************** #####################################
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class AcceleratedUnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(AcceleratedUnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True)
print(unet_block)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_shift_block = AcceleratedUnetSkipConnectionShiftTriple(ngf * 2, ngf * 4, opt, innerCos_list, shift_list,
mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Mention: the TripleBlock differs in `upconv` defination.
# 'cos' means that we add a `innerCos` layer in the block.
class AcceleratedUnetSkipConnectionShiftTriple(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_dropout=False):
super(AcceleratedUnetSkipConnectionShiftTriple, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = AcceleratedInnerShiftTriple(opt.fixed_mask, opt.shift_sz, opt.stride, opt.mask_thred,
opt.triple_weight)
shift.set_mask(mask_global, 3)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCos = InnerCos(strength=opt.strength, skip=opt.skip)
innerCos.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCos)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
# shift triple differs in here. It is `*3` not `*2`.
upconv = nn.ConvTranspose2d(inner_nc * 3, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
# shift should be placed after uprelu
# NB: innerCos are placed before shift. So need to add the latent gredient to
# to former part.
up = [uprelu, innerCos, shift, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
class SoftUnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(SoftUnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True)
print(unet_block)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_shift_block = SoftUnetSkipConnectionBlock(ngf * 2, ngf * 4, opt, innerCos_list, shift_list,
mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer, shift_layer=True) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# construct network from the inside to the outside.
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class SoftUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(SoftUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = InnerSoftShiftTriple(opt.fixed_mask, opt.shift_sz, opt.stride, opt.mask_thred, opt.triple_weight)
shift.set_mask(mask_global, 3)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCosBefore = InnerCos(strength=opt.strength, skip=opt.skip)
innerCosBefore.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCosBefore)
innerCosAfter = InnerCos(strength=opt.strength, skip=opt.skip)
innerCosAfter.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCosAfter)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
# shift triple differs in here. It is `*3` not `*2`.
upconv = nn.ConvTranspose2d(inner_nc * 3, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
# shift should be placed after uprelu
# NB: innerCos are placed before shift. So need to add the latent gredient to
# to former part.
up = [uprelu, innerCosBefore, shift, innerCosAfter, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
class InceptionUnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(InceptionUnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = InceptionUnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = InceptionUnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = InceptionUnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_shift_block = InceptionShiftUnetSkipConnectionBlock(ngf * 2, ngf * 4, opt=opt, innerCos_list=innerCos_list, shift_list=shift_list,
mask_global=mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer, shift_layer=True) # passing in unet_shift_block # innerCos_list=None, shift_list=None, mask_global=None, input_nc=None, opt=None,\submodule=None, shift_layer=False, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False
unet_block = InceptionUnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer)
unet_block = InceptionUnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# construct network from the inside to the outside.
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class InceptionUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(InceptionUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = InceptionDown(input_nc, inner_nc) # nn.Conv2d(input_nc, inner_nc, kernel_size=4,stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,kernel_size=4, stride=2,padding=1)
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = InceptionUp(inner_nc, outer_nc) #nn.ConvTranspose2d(inner_nc, outer_nc,kernel_size=4, stride=2,padding=1)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
upconv = InceptionUp(inner_nc * 2, outer_nc) #nn.ConvTranspose2d(inner_nc * 2, outer_nc,kernel_size=4, stride=2,padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
# construct network from the inside to the outside.
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class InceptionShiftUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, innerCos_list=None, shift_list=None, mask_global=None, input_nc=None, opt=None,\
submodule=None, shift_layer=False, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(InceptionShiftUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
if shift_layer:
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = AcceleratedInnerShiftTriple(opt.fixed_mask, opt.shift_sz, opt.stride, opt.mask_thred, opt.triple_weight)
shift.set_mask(mask_global, 3)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCosBefore = InnerCos(strength=opt.strength, skip=opt.skip)
innerCosBefore.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCosBefore)
innerCosAfter = InnerCos(strength=opt.strength, skip=opt.skip)
innerCosAfter.set_mask(mask_global, 3) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCosAfter)
downconv = InceptionDown(input_nc, inner_nc) # nn.Conv2d(input_nc, inner_nc, kernel_size=4,stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,kernel_size=4, stride=2,padding=1)
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = InceptionUp(inner_nc, outer_nc) #nn.ConvTranspose2d(inner_nc, outer_nc,kernel_size=4, stride=2,padding=1)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
upconv = InceptionUp(inner_nc * 3, outer_nc) #nn.ConvTranspose2d(inner_nc * 2, outer_nc,kernel_size=4, stride=2,padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, innerCosBefore, shift, innerCosAfter, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel |
py | b4053f17b541d8d8f4696d97b6e74e196af8ad2d | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedFTP = ServiceMaker("Twisted FTP", "twisted.tap.ftp", "An FTP server.", "ftp")
|
py | b4053f5e6d4af990b0d2eeb03a69e68d58a710ed | #from distutils.core import setup
from setuptools import setup
setup(name='MEL',
version= '1.0.0',
description='Master Equation based Lumping code for integrating single PESs into global kinetic schemes',
author='Luna Pratali Maffei',
author_email= '[email protected]',
packages =['MEL'],
entry_points={'console_scripts':['MEL = MEL.run:main']
},
keywords = ['Master Equation', 'Lumping', 'PES', 'Rate Constants'])
'''
$ python setup.py install
''' |
py | b405407cd15a7485b431c5d37445480b8ab75f44 | from .cmdline import CmdUtils
class AzResourceGroupUtils:
@staticmethod
def get_group(sub_id: str, group_name: str):
return CmdUtils.get_command_output(
[
"az",
"group",
"show",
"--name",
group_name,
"--subscription",
sub_id
]
)
@staticmethod
def get_group_locks(sub_id: str, group_name: str) -> list:
"""
{
"id": "/subscriptions/b0844137-4c2f-4091-b7f1-bc64c8b60e9c/resourceGroups/testgp/providers/Microsoft.Authorization/locks/test",
"level": "ReadOnly",
"name": "test",
"notes": "some note",
"owners": null,
"resourceGroup": "testgp",
"type": "Microsoft.Authorization/locks"
}
"""
return CmdUtils.get_command_output(
[
"az",
"group",
"lock",
"list",
"--resource-group",
group_name,
"--subscription",
sub_id
]
)
@staticmethod
def delete_group_lock(lock_name:str, sub_id: str, group_name: str) -> list:
return CmdUtils.get_command_output(
[
"az",
"group",
"lock",
"delete",
"--name",
lock_name,
"--resource-group",
group_name,
"--subscription",
sub_id
]
)
@staticmethod
def update_group_tags(sub_id: str, group_name: str, set_command:str):
return CmdUtils.get_command_output(
[
"az",
"group",
"update",
"--resource-group",
group_name,
"--set",
set_command,
"--subscription",
sub_id
]
)
@staticmethod
def get_tag_content(group:dict, tag:str) -> str:
return_value = None
if "tags" in group and group["tags"]:
if tag in group["tags"]:
return_value = group["tags"][tag]
return return_value
@staticmethod
def group_exists(sub_id: str, group_name: str):
return CmdUtils.get_command_output(
[
"az",
"group",
"exists",
"--name",
group_name,
"--subscription",
sub_id
]
)
@staticmethod
def get_groups(sub_id:str):
return CmdUtils.get_command_output(
[
"az",
"group",
"list",
"--subscription",
sub_id
]
)
@staticmethod
def delete_group(group_name:str, sub_id: str):
print("Delete Resource Group: {} in {}".format(
group_name,
sub_id
))
# Get locks and delete if any
lock_list = AzResourceGroupUtils.get_group_locks(sub_id, group_name)
if len(lock_list):
for lock in lock_list:
print("Deleting group lock -", lock["name"])
AzResourceGroupUtils.delete_group_lock(lock["name"],sub_id, group_name)
CmdUtils.get_command_output(
[
"az",
"group",
"delete",
"--name",
group_name,
"--subscription",
sub_id,
"--no-wait",
"--yes"
]
)
|
py | b40540cb755b57f1a3bce48c72b4a89fc8c5f958 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Forest Fires dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.structured import forest_fires
class ForestFiresTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = forest_fires.ForestFires
SPLITS = {
"train": 1,
}
DL_EXTRACT_RESULT = "forestfires.csv"
if __name__ == "__main__":
testing.test_main()
|
py | b405410890c176ce17fcdb7224b4802b894b9aae | """
Django settings for musicstore project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')es48=du_l3)b_gg)jn%m1r*f2k%9(tjz&x#*=m13ks1zz^2yv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'musicapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'musicstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'musicstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'musicdb',
'USER' : 'postgres',
'PASSWORD' : 'fi5nowtH',
'HOST' : 'localhost',
'PORT' : '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL='loginmessage'
LOGOUT_REDIRECT_URL='logoutmessage' |
py | b405414f2a7b43e6b0e131f6e20d834eb33c052e | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Query parser implementation
"""
import datetime
import traceback
from typing import Optional
from sql_metadata import Parser
from metadata.config.common import ConfigModel
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.type.queryParserData import QueryParserData
from metadata.generated.schema.type.tableQuery import TableQuery
from metadata.ingestion.api.processor import Processor, ProcessorStatus
from metadata.utils.logger import ingestion_logger
logger = ingestion_logger()
class QueryParserProcessor(Processor):
"""
Extension of the `Processor` class
Args:
config (QueryParserProcessorConfig):
metadata_config (MetadataServerConfig):
Attributes:
config (QueryParserProcessorConfig):
metadata_config (MetadataServerConfig):
status (ProcessorStatus):
"""
config: ConfigModel
status: ProcessorStatus
def __init__(
self,
config: ConfigModel,
metadata_config: OpenMetadataConnection,
):
self.config = config
self.metadata_config = metadata_config
self.status = ProcessorStatus()
@classmethod
def create(
cls, config_dict: dict, metadata_config: OpenMetadataConnection, **kwargs
):
config = ConfigModel.parse_obj(config_dict)
return cls(config, metadata_config)
def process(self, record: TableQuery) -> Optional[QueryParserData]:
query_parser_data = None
try:
if not record.query:
return
start_date = record.analysisDate.__root__
if isinstance(record.analysisDate, str):
start_date = datetime.datetime.strptime(
str(record.analysisDate), "%Y-%m-%d %H:%M:%S"
).date()
parser = Parser(record.query)
columns_dict = {} if parser.columns_dict is None else parser.columns_dict
query_parser_data = QueryParserData(
tables=parser.tables,
tableAliases=parser.tables_aliases,
columns=columns_dict,
database=record.database,
databaseSchema=record.databaseSchema,
sql=record.query,
date=start_date.strftime("%Y-%m-%d"),
serviceName=record.serviceName,
)
# pylint: disable=broad-except
except Exception as err:
if hasattr(record, "sql"):
logger.debug(record.sql)
logger.debug(traceback.format_exc())
logger.error(err)
return query_parser_data
def close(self):
pass
def get_status(self) -> ProcessorStatus:
return self.status
|
py | b40541883122828e743461f4ffd745ff234dae34 | # from django.core.urlresolvers import reverse
# from django.test import override_settings
# from rest_framework import status
# from rest_framework.test import APITestCase
# from tests.python.accounts.test_models import UserFactory
# from tests.python.accounts.test_views import get_basic_auth_header
# @override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
# class BaseTests(APITestCase):
# def setUp(self):
# self.user = UserFactory.create(email='[email protected]',
# first_name='Test',
# last_name='User')
# self.user.set_password('test')
# self.user.save()
# def test_get_protected_page(self):
# # Ensure we can login with given credentials.
# url = reverse('accounts:login')
# self.client.credentials(HTTP_AUTHORIZATION=get_basic_auth_header('[email protected]', 'test'))
# response = self.client.post(url, format='json')
# self.assertTrue('token' in response.data)
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# self.client.credentials(HTTP_AUTHORIZATION='Token {}'.format(response.data['token']))
# # user confirmed account unsuccessfully
# url = reverse('base:protected_data')
# response = self.client.get(url)
# self.assertEqual(response.data['data'], 'THIS IS THE PROTECTED STRING FROM SERVER')
# def test_get_main_page(self):
# response = self.client.get(reverse('index'))
# self.assertEqual(response.status_code, status.HTTP_200_OK) |
py | b4054207d0f1708e9c6fbe575f39cff59be0913f | """Support for Z-Wave fans."""
import logging
import math
from homeassistant.components.fan import (
DOMAIN as FAN_DOMAIN,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.percentage import (
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .const import DATA_UNSUBSCRIBE, DOMAIN
from .entity import ZWaveDeviceEntity
_LOGGER = logging.getLogger(__name__)
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
SPEED_RANGE = (1, 99) # off is not included
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Fan from Config Entry."""
@callback
def async_add_fan(values):
"""Add Z-Wave Fan."""
fan = ZwaveFan(values)
async_add_entities([fan])
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(hass, f"{DOMAIN}_new_{FAN_DOMAIN}", async_add_fan)
)
class ZwaveFan(ZWaveDeviceEntity, FanEntity):
"""Representation of a Z-Wave fan."""
async def async_set_percentage(self, percentage):
"""Set the speed percentage of the fan."""
if percentage is None:
# Value 255 tells device to return to previous value
zwave_speed = 255
elif percentage == 0:
zwave_speed = 0
else:
zwave_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
self.values.primary.send_value(zwave_speed)
async def async_turn_on(
self, speed=None, percentage=None, preset_mode=None, **kwargs
):
"""Turn the device on."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
self.values.primary.send_value(0)
@property
def is_on(self):
"""Return true if device is on (speed above 0)."""
return self.values.primary.value > 0
@property
def percentage(self):
"""Return the current speed.
The Z-Wave speed value is a byte 0-255. 255 means previous value.
The normal range of the speed is 0-99. 0 means off.
"""
return ranged_value_to_percentage(SPEED_RANGE, self.values.primary.value)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
|
py | b405430e38a7eaec82d5f10655cbc8cfc81ec205 | """Tests related to StripeSubscription model."""
import pytest
from django.contrib.auth import get_user_model
from django.utils import timezone
from datetime import timedelta
from .. import models, factories
User = get_user_model()
@pytest.mark.parametrize(
"status", ["incomplete", "incomplete_expired", "active", "past_due", "canceled"]
)
def test_sync_active(customer, paid_plan, status):
"""Only an active StripeSubscription syncs the Plan and current_period_end to the Customer"""
subscription = factories.StripeSubscriptionFactory(
customer=customer,
status=status,
price_id=paid_plan.price_id,
current_period_end=timezone.now() + timedelta(days=30),
dont_sync_to_customer=True,
)
customer.refresh_from_db()
assert (
customer.plan
== models.Plan.objects.filter(type=models.Plan.Type.FREE_DEFAULT).first()
)
subscription.sync_to_customer()
customer.refresh_from_db()
if status == "active":
assert customer.plan == paid_plan
assert customer.current_period_end == subscription.current_period_end
else:
assert customer.plan != paid_plan
assert customer.current_period_end != subscription.current_period_end
@pytest.mark.parametrize(
"status", ["incomplete", "incomplete_expired", "active", "past_due", "canceled"]
)
def test_sync_canceled(customer, paid_plan, status):
"""Only a canceled or incomplete_expired StripeSubscription downgrades
the Customer to free_default and zeroes out current_period_end"""
subscription = factories.StripeSubscriptionFactory(
customer=customer,
status="active",
price_id=paid_plan.price_id,
current_period_end=timezone.now() + timedelta(days=30),
)
assert customer.plan == paid_plan
subscription.status = status
subscription.save()
subscription.sync_to_customer()
customer.refresh_from_db()
free_default = models.Plan.objects.get(type=models.Plan.Type.FREE_DEFAULT)
if status in (
models.StripeSubscription.Status.CANCELED,
models.StripeSubscription.Status.INCOMPLETE,
models.StripeSubscription.Status.INCOMPLETE_EXPIRED,
):
assert customer.plan == free_default
assert customer.current_period_end == None
else:
assert customer.plan == paid_plan
assert customer.current_period_end == subscription.current_period_end
|
py | b4054310c012fd14f20f6ceb72c6117475feba8c | """
Generate Python bytecode from a Abstract Syntax Tree.
"""
# NOTE TO READERS: All the ugly and "obvious" isinstance assertions here are to
# help the annotator. To it, unfortunately, everything is not so obvious. If
# you figure out a way to remove them, great, but try a translation first,
# please.
import struct
from rpython.rlib.objectmodel import specialize
from pypy.interpreter.astcompiler import ast, assemble, symtable, consts, misc
from pypy.interpreter.astcompiler import optimize # For side effects
from pypy.interpreter.pyparser.error import SyntaxError
from pypy.tool import stdlib_opcode as ops
C_INT_MAX = (2 ** (struct.calcsize('i') * 8)) / 2 - 1
def compile_ast(space, module, info):
"""Generate a code object from AST."""
symbols = symtable.SymtableBuilder(space, module, info)
return TopLevelCodeGenerator(space, module, symbols, info).assemble()
MAX_STACKDEPTH_CONTAINERS = 100
name_ops_default = misc.dict_to_switch({
ast.Load: ops.LOAD_NAME,
ast.Store: ops.STORE_NAME,
ast.Del: ops.DELETE_NAME
})
name_ops_fast = misc.dict_to_switch({
ast.Load: ops.LOAD_FAST,
ast.Store: ops.STORE_FAST,
ast.Del: ops.DELETE_FAST
})
name_ops_deref = misc.dict_to_switch({
ast.Load: ops.LOAD_DEREF,
ast.Store: ops.STORE_DEREF,
ast.Del: ops.DELETE_DEREF,
})
name_ops_global = misc.dict_to_switch({
ast.Load: ops.LOAD_GLOBAL,
ast.Store: ops.STORE_GLOBAL,
ast.Del: ops.DELETE_GLOBAL
})
unary_operations = misc.dict_to_switch({
ast.Invert: ops.UNARY_INVERT,
ast.Not: ops.UNARY_NOT,
ast.UAdd: ops.UNARY_POSITIVE,
ast.USub: ops.UNARY_NEGATIVE
})
binary_operations = misc.dict_to_switch({
ast.Add: ops.BINARY_ADD,
ast.Sub: ops.BINARY_SUBTRACT,
ast.Mult: ops.BINARY_MULTIPLY,
ast.Div: ops.BINARY_TRUE_DIVIDE,
ast.Mod: ops.BINARY_MODULO,
ast.Pow: ops.BINARY_POWER,
ast.LShift: ops.BINARY_LSHIFT,
ast.RShift: ops.BINARY_RSHIFT,
ast.BitOr: ops.BINARY_OR,
ast.BitAnd: ops.BINARY_AND,
ast.BitXor: ops.BINARY_XOR,
ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE,
ast.MatMult: ops.BINARY_MATRIX_MULTIPLY
})
inplace_operations = misc.dict_to_switch({
ast.Add: ops.INPLACE_ADD,
ast.Sub: ops.INPLACE_SUBTRACT,
ast.Mult: ops.INPLACE_MULTIPLY,
ast.Div: ops.INPLACE_TRUE_DIVIDE,
ast.Mod: ops.INPLACE_MODULO,
ast.Pow: ops.INPLACE_POWER,
ast.LShift: ops.INPLACE_LSHIFT,
ast.RShift: ops.INPLACE_RSHIFT,
ast.BitOr: ops.INPLACE_OR,
ast.BitAnd: ops.INPLACE_AND,
ast.BitXor: ops.INPLACE_XOR,
ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE,
ast.MatMult: ops.INPLACE_MATRIX_MULTIPLY
})
compare_operations = misc.dict_to_switch({
ast.Eq: (ops.COMPARE_OP, 2),
ast.NotEq: (ops.COMPARE_OP, 3),
ast.Lt: (ops.COMPARE_OP, 0),
ast.LtE: (ops.COMPARE_OP, 1),
ast.Gt: (ops.COMPARE_OP, 4),
ast.GtE: (ops.COMPARE_OP, 5),
ast.In: (ops.CONTAINS_OP, 0),
ast.NotIn: (ops.CONTAINS_OP, 1),
ast.Is: (ops.IS_OP, 0),
ast.IsNot: (ops.IS_OP, 1)
})
class __extend__(ast.AST):
_literal_type = False
class __extend__(
ast.Constant,
ast.Tuple,
ast.List,
ast.ListComp,
ast.Dict,
ast.DictComp,
ast.Set,
ast.SetComp,
ast.GeneratorExp,
ast.JoinedStr,
ast.FormattedValue
):
_literal_type = True
class __extend__(ast.GeneratorExp):
def build_container_and_load_iter(self, codegen):
codegen.comprehension_load_iter()
def get_generators(self):
return self.generators
def accept_comp_iteration(self, codegen, index):
self.elt.walkabout(codegen)
codegen.emit_op(ops.YIELD_VALUE)
codegen.emit_op(ops.POP_TOP)
class __extend__(ast.ListComp):
def build_container_and_load_iter(self, codegen):
single = False
if len(self.generators) == 1:
gen, = self.generators
assert isinstance(gen, ast.comprehension)
if not gen.ifs:
single = True
if single:
codegen.comprehension_load_iter()
codegen.emit_op(ops.BUILD_LIST_FROM_ARG)
else:
codegen.emit_op_arg(ops.BUILD_LIST, 0)
codegen.comprehension_load_iter()
def get_generators(self):
return self.generators
def accept_comp_iteration(self, codegen, index):
self.elt.walkabout(codegen)
codegen.emit_op_arg(ops.LIST_APPEND, index + 1)
class __extend__(ast.SetComp):
def build_container_and_load_iter(self, codegen):
codegen.emit_op_arg(ops.BUILD_SET, 0)
codegen.comprehension_load_iter()
def get_generators(self):
return self.generators
def accept_comp_iteration(self, codegen, index):
self.elt.walkabout(codegen)
codegen.emit_op_arg(ops.SET_ADD, index + 1)
class __extend__(ast.DictComp):
def build_container_and_load_iter(self, codegen):
codegen.emit_op_arg(ops.BUILD_MAP, 0)
codegen.comprehension_load_iter()
def get_generators(self):
return self.generators
def accept_comp_iteration(self, codegen, index):
self.key.walkabout(codegen)
self.value.walkabout(codegen)
codegen.emit_op_arg(ops.MAP_ADD, index + 1)
# These are frame blocks.
fblock_kind_to_str = []
for i, name in enumerate("F_WHILE_LOOP F_FOR_LOOP F_TRY_EXCEPT F_FINALLY_TRY F_FINALLY_END F_WITH F_ASYNC_WITH F_HANDLER_CLEANUP F_POP_VALUE F_EXCEPTION_HANDLER".split()):
globals()[name] = i
fblock_kind_to_str.append(name)
del name, i
class FrameBlockInfo(object):
def __init__(self, kind, block, end, datum):
self.kind = kind
self.block = block
self.end = end
self.datum = datum # an ast node needed for specific kinds of blocks
def __repr__(self):
# for debugging
return "<FrameBlockInfo kind=%s block=%s end=%s>" % (fblock_kind_to_str[self.kind], self.block, self.end)
def update_pos_expr(func):
def updater(self, expr):
assert isinstance(expr, ast.expr)
if expr.lineno > 1:
new_lineno = expr.lineno
else:
new_lineno = self.lineno
old_lineno = self.lineno
self.lineno = new_lineno
try:
return func(self, expr)
finally:
self.lineno = old_lineno
updater.func_name = func.func_name + "_pos_updater"
return updater
class PythonCodeGenerator(assemble.PythonCodeMaker):
"""Base code generator.
A subclass of this is created for every scope to be compiled. It walks
across the AST tree generating bytecode as needed.
"""
def __init__(self, space, name, tree, lineno, symbols, compile_info,
qualname):
self.scope = symbols.find_scope(tree)
assemble.PythonCodeMaker.__init__(self, space, name, lineno,
self.scope, compile_info)
self.symbols = symbols
self.frame_blocks = []
self.interactive = False
self.temporary_name_counter = 1
if isinstance(self.scope, symtable.FunctionScope):
self.qualname = qualname + '.<locals>'
else:
self.qualname = qualname
self._allow_top_level_await = compile_info.flags & consts.PyCF_ALLOW_TOP_LEVEL_AWAIT
self._compile(tree)
def _compile(self, tree):
"""Override in subclasses to compile a scope."""
raise NotImplementedError
def sub_scope(self, kind, name, node, lineno):
"""Convenience function for compiling a sub scope."""
if self.scope.lookup(name) == symtable.SCOPE_GLOBAL_EXPLICIT:
qualname = name
elif self.qualname:
qualname = '%s.%s' % (self.qualname, name)
else:
qualname = name
generator = kind(self.space, name, node, lineno, self.symbols,
self.compile_info, qualname)
return generator.assemble(), qualname
def push_frame_block(self, kind, block, end=None, datum=None):
self.frame_blocks.append(FrameBlockInfo(kind, block, end, datum))
def pop_frame_block(self, kind, block):
fblock = self.frame_blocks.pop()
assert fblock.kind == kind and fblock.block is block, \
"mismatched frame blocks"
def unwind_fblock(self, fblock, preserve_tos):
""" Unwind a frame block. If preserve_tos is true, the TOS before
popping the blocks will be restored afterwards, unless another return,
break or continue is found. In which case, the TOS will be popped."""
kind = fblock.kind
if kind == F_FOR_LOOP:
if preserve_tos:
self.emit_op(ops.ROT_TWO)
self.emit_op(ops.POP_TOP) # pop iterator
elif kind == F_WHILE_LOOP or kind == F_EXCEPTION_HANDLER:
pass
elif kind == F_TRY_EXCEPT:
self.emit_op(ops.POP_BLOCK)
elif kind == F_FINALLY_TRY:
self.emit_op(ops.POP_BLOCK)
if preserve_tos:
self.push_frame_block(F_POP_VALUE, None)
# emit the finally block, restoring the line number when done
finallyblock = fblock.datum
assert isinstance(finallyblock, ast.Try)
assert finallyblock.finalbody
saved_lineno = self.lineno
self._visit_body(finallyblock.finalbody)
self.lineno = saved_lineno
if preserve_tos:
self.pop_frame_block(F_POP_VALUE, None)
elif kind == F_FINALLY_END:
if preserve_tos:
self.emit_op(ops.ROT_TWO)
self.emit_op(ops.POP_TOP) # remove SApplicationException
self.emit_op(ops.POP_EXCEPT)
elif kind == F_WITH or kind == F_ASYNC_WITH:
self.emit_op(ops.POP_BLOCK)
if preserve_tos:
self.emit_op(ops.ROT_TWO)
self.call_exit_with_nones()
if kind == F_ASYNC_WITH:
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
self.emit_op(ops.POP_TOP)
elif kind == F_HANDLER_CLEANUP:
if fblock.datum:
self.emit_op(ops.POP_BLOCK)
self.emit_op(ops.POP_EXCEPT)
if fblock.datum:
self.load_const(self.space.w_None)
excepthandler = fblock.datum
assert isinstance(excepthandler, ast.ExceptHandler)
self.name_op(excepthandler.name, ast.Store, excepthandler)
self.name_op(excepthandler.name, ast.Del, excepthandler)
elif kind == F_POP_VALUE:
if preserve_tos:
self.emit_op(ops.ROT_TWO)
self.emit_op(ops.POP_TOP)
else:
assert 0, "unreachable"
def unwind_fblock_stack(self, preserve_tos, find_loop_block=False):
""" Unwind block stack. If find_loop_block is True, return the first
loop block, otherwise return None. """
# XXX This is a bit ridiculous, but we really need to remove the
# blocks and then re-add them for the benefit of unwinding a try with
# a finally block, which will emit the code of the finally block in
# situ, which might then do more unwinding!
if not self.frame_blocks:
return None
fblock_top = self.frame_blocks[-1]
if find_loop_block and (fblock_top.kind == F_WHILE_LOOP
or fblock_top.kind == F_FOR_LOOP):
return fblock_top
fblock = self.frame_blocks.pop()
self.unwind_fblock(fblock, preserve_tos)
res = self.unwind_fblock_stack(preserve_tos, find_loop_block)
self.frame_blocks.append(fblock)
return res
def error(self, msg, node):
# NB: SyntaxError's offset is 1-based!
raise SyntaxError(msg, node.lineno, node.col_offset + 1,
filename=self.compile_info.filename)
def name_op(self, identifier, ctx, node):
"""Generate an operation appropriate for the scope of the identifier."""
# node is used only for the possible syntax error
self.check_forbidden_name(identifier, node, ctx)
scope = self.scope.lookup(identifier)
op = ops.NOP
container = self.names
if scope == symtable.SCOPE_LOCAL:
if self.scope.can_be_optimized:
container = self.var_names
op = name_ops_fast(ctx)
elif scope == symtable.SCOPE_FREE:
op = name_ops_deref(ctx)
if op == ops.LOAD_DEREF and isinstance(self, ClassCodeGenerator):
op = ops.LOAD_CLASSDEREF
container = self.free_vars
elif scope == symtable.SCOPE_CELL:
op = name_ops_deref(ctx)
container = self.cell_vars
elif scope == symtable.SCOPE_GLOBAL_IMPLICIT:
if self.scope.optimized:
op = name_ops_global(ctx)
elif scope == symtable.SCOPE_GLOBAL_EXPLICIT:
op = name_ops_global(ctx)
if op == ops.NOP:
op = name_ops_default(ctx)
self.emit_op_arg(op, self.add_name(container, identifier))
def possible_docstring(self, node):
if isinstance(node, ast.Expr) and self.compile_info.optimize < 2:
expr_value = node.value
if isinstance(expr_value, ast.Constant) and self.space.isinstance_w(expr_value.value, self.space.w_unicode):
return expr_value
return None
def ensure_docstring_constant(self, body):
# If there's a docstring, store it as the first constant.
if body:
doc_expr = self.possible_docstring(body[0])
else:
doc_expr = None
if doc_expr is not None:
self.add_const(doc_expr.value)
self.scope.doc_removable = True
return True
else:
self.add_const(self.space.w_None)
return False
def _get_code_flags(self):
return 0
def _check_async_function(self):
"""Returns true if 'await' is allowed."""
return False
def _handle_body(self, body):
"""Compile a list of statements, handling doc strings if needed."""
if body:
start = 0
doc_expr = self.possible_docstring(body[0])
if doc_expr is not None:
start = 1
doc_expr.walkabout(self)
if doc_expr.lineno > 0:
self.update_position(doc_expr.lineno)
self.name_op("__doc__", ast.Store, doc_expr)
self.scope.doc_removable = True
self._visit_body(body, start)
return True
else:
return False
def _maybe_setup_annotations(self):
# if the scope contained an annotated variable assignment,
# this will emit the requisite SETUP_ANNOTATIONS
if self.scope.contains_annotated and not isinstance(self, AbstractFunctionCodeGenerator):
return self.emit_op(ops.SETUP_ANNOTATIONS)
def call_exit_with_nones(self):
self.load_const(self.space.w_None)
self.emit_op(ops.DUP_TOP)
self.emit_op(ops.DUP_TOP)
self.emit_op_arg(ops.CALL_FUNCTION, 3)
def visit_Module(self, mod):
if not self._handle_body(mod.body):
self.first_lineno = self.lineno = 1
def visit_Interactive(self, mod):
self.interactive = True
self._visit_body(mod.body)
def visit_Expression(self, mod):
self.add_none_to_final_return = False
mod.body.walkabout(self)
def _visit_body(self, body, start=0):
if body is None:
return
for i in range(start, len(body)):
stmt = body[i]
if stmt is not None:
assert isinstance(stmt, ast.stmt)
if stmt.lineno > 0:
self.update_position(stmt.lineno)
stmt.walkabout(self)
def _make_function(self, code, oparg=0, qualname=None):
"""Emit the opcodes to turn a code object into a function."""
w_qualname = self.space.newtext(qualname or code.co_name)
if code.co_freevars:
oparg = oparg | 0x08
# Load cell and free vars to pass on.
for free in code.co_freevars:
free_scope = self.scope.lookup(free)
if free_scope in (symtable.SCOPE_CELL,
symtable.SCOPE_CELL_CLASS):
index = self.cell_vars[free]
else:
index = self.free_vars[free]
self.emit_op_arg(ops.LOAD_CLOSURE, index)
self.emit_op_arg(ops.BUILD_TUPLE, len(code.co_freevars))
self.load_const(code)
self.load_const(w_qualname)
self.emit_op_arg(ops.MAKE_FUNCTION, oparg)
def _visit_kwonlydefaults(self, args):
defaults = 0
keys_w = []
for i, default in enumerate(args.kw_defaults):
if default:
kwonly = args.kwonlyargs[i]
assert isinstance(kwonly, ast.arg)
mangled = self.scope.mangle(kwonly.arg)
keys_w.append(self.space.newtext(mangled))
default.walkabout(self)
defaults += 1
if keys_w:
w_tup = self.space.newtuple(keys_w)
self.load_const(w_tup)
self.emit_op_arg(ops.BUILD_CONST_KEY_MAP, len(keys_w))
return defaults
def _visit_arg_annotation(self, name, ann, names):
if ann:
ann.walkabout(self)
names.append(self.scope.mangle(name))
def _visit_arg_annotations(self, args, names):
if args:
for arg in args:
assert isinstance(arg, ast.arg)
self._visit_arg_annotation(arg.arg, arg.annotation, names)
@specialize.argtype(1)
def _visit_annotations(self, func, args, returns):
space = self.space
names = []
self._visit_arg_annotations(args.posonlyargs, names)
self._visit_arg_annotations(args.args, names)
vararg = args.vararg
if vararg:
self._visit_arg_annotation(vararg.arg, vararg.annotation,
names)
self._visit_arg_annotations(args.kwonlyargs, names)
kwarg = args.kwarg
if kwarg:
self._visit_arg_annotation(kwarg.arg, kwarg.annotation,
names)
self._visit_arg_annotation("return", returns, names)
l = len(names)
if l:
if l > 65534:
self.error("too many annotations", func)
w_tup = space.newtuple([space.newtext(name) for name in names])
self.load_const(w_tup)
self.emit_op_arg(ops.BUILD_CONST_KEY_MAP, l)
return l
def _visit_defaults(self, defaults):
assert len(defaults) > 0
w_tup = self._tuple_of_consts(defaults)
if w_tup:
self.update_position(defaults[-1].lineno)
self.load_const(w_tup)
else:
self.visit_sequence(defaults)
self.emit_op_arg(ops.BUILD_TUPLE, len(defaults))
@specialize.arg(2)
def _visit_function(self, func, function_code_generator):
# Load decorators first, but apply them after the function is created.
if func.decorator_list:
for dec in func.decorator_list:
if dec.lineno > 0:
self.update_position(dec.lineno)
dec.walkabout(self)
if func.lineno > 0:
self.update_position(func.lineno)
args = func.args
assert isinstance(args, ast.arguments)
oparg = 0
if args.defaults is not None and len(args.defaults):
oparg = oparg | 0x01
self._visit_defaults(args.defaults)
if args.kwonlyargs:
kw_default_count = self._visit_kwonlydefaults(args)
if kw_default_count:
oparg = oparg | 0x02
num_annotations = self._visit_annotations(func, args, func.returns)
if num_annotations:
oparg = oparg | 0x04
code, qualname = self.sub_scope(function_code_generator, func.name,
func, func.lineno)
self._make_function(code, oparg, qualname=qualname)
# Apply decorators.
if func.decorator_list:
for i in range(len(func.decorator_list)):
self.emit_op_arg(ops.CALL_FUNCTION, 1)
self.name_op(func.name, ast.Store, func)
def visit_FunctionDef(self, func):
self._visit_function(func, FunctionCodeGenerator)
def visit_AsyncFunctionDef(self, func):
self._visit_function(func, AsyncFunctionCodeGenerator)
@update_pos_expr
def visit_Lambda(self, lam):
args = lam.args
assert isinstance(args, ast.arguments)
oparg = 0
if args.defaults is not None and len(args.defaults):
oparg = oparg | 0x01
self._visit_defaults(args.defaults)
if args.kwonlyargs:
kw_default_count = self._visit_kwonlydefaults(args)
if kw_default_count:
oparg = oparg | 0x02
code, qualname = self.sub_scope(
LambdaCodeGenerator, "<lambda>", lam, lam.lineno)
self._make_function(code, oparg, qualname=qualname)
def visit_ClassDef(self, cls):
self.visit_sequence(cls.decorator_list)
# 1. compile the class body into a code object
code, qualname = self.sub_scope(
ClassCodeGenerator, cls.name, cls, cls.lineno)
# 2. load the 'build_class' function
self.emit_op(ops.LOAD_BUILD_CLASS)
# 3. load a function (or closure) made from the code object
self._make_function(code, qualname=qualname)
# 4. load class name
self.load_const(self.space.newtext(cls.name))
# 5. generate the rest of the code for the call
self._make_call(2, cls.bases, cls.keywords)
# 6. apply decorators
if cls.decorator_list:
for i in range(len(cls.decorator_list)):
self.emit_op_arg(ops.CALL_FUNCTION, 1)
# 7. store into <name>
self.name_op(cls.name, ast.Store, cls)
def visit_AugAssign(self, assign):
target = assign.target
if isinstance(target, ast.Attribute):
self.check_forbidden_name(target.attr, target)
target.value.walkabout(self)
self.emit_op(ops.DUP_TOP)
self.emit_op_name(ops.LOAD_ATTR, self.names, target.attr)
assign.value.walkabout(self)
self.emit_op(inplace_operations(assign.op))
self.emit_op(ops.ROT_TWO)
self.emit_op_name(ops.STORE_ATTR, self.names, target.attr)
elif isinstance(target, ast.Subscript):
target.value.walkabout(self)
target.slice.walkabout(self)
self.emit_op(ops.DUP_TOP_TWO)
self.emit_op(ops.BINARY_SUBSCR)
assign.value.walkabout(self)
self.emit_op(inplace_operations(assign.op))
self.emit_op(ops.ROT_THREE)
self.emit_op(ops.STORE_SUBSCR)
elif isinstance(target, ast.Name):
self.name_op(target.id, ast.Load, target)
assign.value.walkabout(self)
self.emit_op(inplace_operations(assign.op))
self.name_op(target.id, ast.Store, target)
else:
self.error("illegal expression for augmented assignment", assign)
def visit_Assert(self, asrt):
if self.compile_info.optimize >= 1:
return
assert self.compile_info.optimize == 0
if isinstance(asrt.test, ast.Tuple):
test = asrt.test
assert isinstance(test, ast.Tuple)
if len(test.elts) > 0:
misc.syntax_warning(
self.space,
"assertion is always true, perhaps remove parentheses?",
self.compile_info.filename,
asrt.lineno,
asrt.col_offset
)
end = self.new_block()
asrt.test.accept_jump_if(self, True, end)
self.emit_op(ops.LOAD_ASSERTION_ERROR)
if asrt.msg:
asrt.msg.walkabout(self)
self.emit_op_arg(ops.CALL_FUNCTION, 1)
self.emit_op_arg(ops.RAISE_VARARGS, 1)
self.use_next_block(end)
def _binop(self, op):
return binary_operations(op)
@update_pos_expr
def visit_BinOp(self, binop):
binop.left.walkabout(self)
binop.right.walkabout(self)
self.emit_op(self._binop(binop.op))
def visit_Return(self, ret):
preserve_tos = ret.value is not None and not isinstance(ret.value, ast.Constant)
if preserve_tos:
ret.value.walkabout(self)
self.unwind_fblock_stack(preserve_tos)
if ret.value is None:
self.load_const(self.space.w_None)
elif not preserve_tos:
ret.value.walkabout(self) # Constant
self.emit_op(ops.RETURN_VALUE)
def visit_Delete(self, delete):
self.visit_sequence(delete.targets)
def visit_If(self, if_):
end = self.new_block()
test_constant = if_.test.as_constant_truth(
self.space, self.compile_info)
if test_constant == optimize.CONST_FALSE:
with self.all_dead_code():
self._visit_body(if_.body)
self._visit_body(if_.orelse)
elif test_constant == optimize.CONST_TRUE:
self._visit_body(if_.body)
with self.all_dead_code():
self._visit_body(if_.orelse)
else:
if if_.orelse:
otherwise = self.new_block()
else:
otherwise = end
if_.test.accept_jump_if(self, False, otherwise)
self._visit_body(if_.body)
if if_.orelse:
self.emit_jump(ops.JUMP_FORWARD, end)
self.use_next_block(otherwise)
self._visit_body(if_.orelse)
self.use_next_block(end)
def visit_Break(self, br):
loop_fblock = self.unwind_fblock_stack(False, find_loop_block=True)
if loop_fblock is None:
self.error("'break' not properly in loop", br)
self.unwind_fblock(loop_fblock, False)
assert loop_fblock.end is not None
self.emit_jump(ops.JUMP_ABSOLUTE, loop_fblock.end, True)
def visit_Continue(self, cont):
loop_fblock = self.unwind_fblock_stack(False, find_loop_block=True)
if loop_fblock is None:
self.error("'continue' not properly in loop", cont)
self.emit_jump(ops.JUMP_ABSOLUTE, loop_fblock.block, True)
def visit_For(self, fr):
start = self.new_block()
cleanup = self.new_block()
end = self.new_block()
# self.emit_jump(ops.SETUP_LOOP, end)
self.push_frame_block(F_FOR_LOOP, start, end)
fr.iter.walkabout(self)
self.emit_op(ops.GET_ITER)
self.use_next_block(start)
self.emit_jump(ops.FOR_ITER, cleanup)
fr.target.walkabout(self)
self._visit_body(fr.body)
self.emit_jump(ops.JUMP_ABSOLUTE, start, True)
self.use_next_block(cleanup)
self.pop_frame_block(F_FOR_LOOP, start)
self._visit_body(fr.orelse)
self.use_next_block(end)
def visit_AsyncFor(self, fr):
if not self._check_async_function():
self.error("'async for' outside async function", fr)
b_start = self.new_block()
b_except = self.new_block()
b_end = self.new_block()
fr.iter.walkabout(self)
self.emit_op(ops.GET_AITER)
self.use_next_block(b_start)
self.push_frame_block(F_FOR_LOOP, b_start, b_end)
self.emit_jump(ops.SETUP_EXCEPT, b_except)
self.emit_op(ops.GET_ANEXT)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
self.emit_op(ops.POP_BLOCK)
fr.target.walkabout(self)
self._visit_body(fr.body)
self.emit_jump(ops.JUMP_ABSOLUTE, b_start, True)
self.pop_frame_block(F_FOR_LOOP, b_start)
# except block for errors from __anext__
self.use_next_block(b_except)
self.emit_op(ops.END_ASYNC_FOR)
self._visit_body(fr.orelse)
self.use_next_block(b_end)
def visit_While(self, wh):
test_constant = wh.test.as_constant_truth(self.space, self.compile_info)
if test_constant == optimize.CONST_FALSE:
with self.all_dead_code():
end = self.new_block()
loop = self.new_block()
self.push_frame_block(F_WHILE_LOOP, loop, end)
self._visit_body(wh.body)
self.pop_frame_block(F_WHILE_LOOP, loop)
self._visit_body(wh.orelse)
else:
end = self.new_block()
anchor = None
if test_constant == optimize.CONST_NOT_CONST:
anchor = self.new_block()
loop = self.new_block()
self.push_frame_block(F_WHILE_LOOP, loop, end)
self.use_next_block(loop)
if test_constant == optimize.CONST_NOT_CONST:
wh.test.accept_jump_if(self, False, anchor)
self._visit_body(wh.body)
self.emit_jump(ops.JUMP_ABSOLUTE, loop, True)
if test_constant == optimize.CONST_NOT_CONST:
self.use_next_block(anchor)
self.pop_frame_block(F_WHILE_LOOP, loop)
self._visit_body(wh.orelse)
self.use_next_block(end)
def _visit_try_except(self, tr):
body = self.new_block()
exc = self.new_block()
otherwise = self.new_block()
end = self.new_block()
# XXX CPython uses SETUP_FINALLY here too
self.emit_jump(ops.SETUP_EXCEPT, exc)
body = self.use_next_block(body)
self.push_frame_block(F_TRY_EXCEPT, body)
self._visit_body(tr.body)
self.emit_op(ops.POP_BLOCK)
self.pop_frame_block(F_TRY_EXCEPT, body)
self.emit_jump(ops.JUMP_FORWARD, otherwise)
self.use_next_block(exc)
self.push_frame_block(F_EXCEPTION_HANDLER, None)
for i, handler in enumerate(tr.handlers):
assert isinstance(handler, ast.ExceptHandler)
self.update_position(handler.lineno)
next_except = self.new_block()
if handler.type:
self.emit_op(ops.DUP_TOP)
handler.type.walkabout(self)
self.emit_jump(ops.JUMP_IF_NOT_EXC_MATCH, next_except, True)
else:
if i != len(tr.handlers) - 1:
self.error(
"bare 'except:' must be the last except block", handler)
self.emit_op(ops.POP_TOP)
if handler.name:
## generate the equivalent of:
##
## try:
## # body
## except type as name:
## try:
## # body
## finally:
## name = None
## del name
#
cleanup_end = self.new_block()
self.name_op(handler.name, ast.Store, handler)
self.emit_op(ops.POP_TOP)
# second try
self.emit_jump(ops.SETUP_FINALLY, cleanup_end)
cleanup_body = self.use_next_block()
self.push_frame_block(F_HANDLER_CLEANUP, cleanup_body, None, handler)
# second # body
self._visit_body(handler.body)
self.pop_frame_block(F_HANDLER_CLEANUP, cleanup_body)
self.emit_op(ops.POP_BLOCK)
self.emit_op(ops.POP_EXCEPT)
# name = None; del name
self.load_const(self.space.w_None)
self.name_op(handler.name, ast.Store, handler)
self.name_op(handler.name, ast.Del, handler)
self.emit_jump(ops.JUMP_FORWARD, end)
# finally
self.use_next_block(cleanup_end)
# this is a hack! we emit a NOP to distinguish this from a
# "regular" finally. the reason for that is that we do not want
# to emit a line trace event if sys.settrace is set for the
# following instructions, and the interpeter can use the NOP to
# detect this case. CPython has really complicated and broken
# logic for this situation instead. See code in
# FinallyBlock.handle.
self.emit_op(ops.NOP)
# name = None; del name
self.load_const(self.space.w_None)
self.name_op(handler.name, ast.Store, handler)
self.name_op(handler.name, ast.Del, handler)
self.emit_op(ops.RERAISE)
else:
self.emit_op(ops.POP_TOP)
self.emit_op(ops.POP_TOP)
cleanup_body = self.use_next_block()
self.push_frame_block(F_HANDLER_CLEANUP, cleanup_body)
self._visit_body(handler.body)
self.pop_frame_block(F_HANDLER_CLEANUP, cleanup_body)
self.emit_op(ops.POP_EXCEPT)
self.emit_jump(ops.JUMP_FORWARD, end)
#
self.use_next_block(next_except)
self.pop_frame_block(F_EXCEPTION_HANDLER, None)
# pypy difference: get rid of exception
self.emit_op(ops.POP_TOP)
self.emit_op(ops.POP_TOP)
self.emit_op(ops.RERAISE) # reraise uses the SApplicationException
self.use_next_block(otherwise)
self._visit_body(tr.orelse)
self.use_next_block(end)
def _visit_try_finally(self, tr):
body = self.new_block()
end = self.new_block()
exit = self.new_block()
# try block
self.emit_jump(ops.SETUP_FINALLY, end)
self.use_next_block(body)
self.push_frame_block(F_FINALLY_TRY, body, end, tr)
if tr.handlers:
self._visit_try_except(tr)
else:
self._visit_body(tr.body)
self.emit_op(ops.POP_BLOCK)
self.pop_frame_block(F_FINALLY_TRY, body)
self._visit_body(tr.finalbody)
self.emit_jump(ops.JUMP_FORWARD, exit)
# finally block, exceptional case
self.use_next_block(end)
self.push_frame_block(F_FINALLY_END, end)
self._visit_body(tr.finalbody)
self.pop_frame_block(F_FINALLY_END, end)
self.emit_op(ops.RERAISE)
self.use_next_block(exit)
def visit_Try(self, tr):
if tr.finalbody:
return self._visit_try_finally(tr)
else:
return self._visit_try_except(tr)
def _import_as(self, alias, imp):
# in CPython this is roughly compile_import_as
# The IMPORT_NAME opcode was already generated. This function
# merely needs to bind the result to a name.
# If there is a dot in name, we need to split it and emit a
# IMPORT_FROM for each name.
source_name = alias.name
dot = source_name.find(".")
if dot > 0:
# Consume the base module name to get the first attribute
while True:
start = dot + 1
dot = source_name.find(".", start)
if dot < 0:
end = len(source_name)
else:
end = dot
attr = source_name[start:end]
self.emit_op_name(ops.IMPORT_FROM, self.names, attr)
if dot < 0:
break
self.emit_op(ops.ROT_TWO)
self.emit_op(ops.POP_TOP)
self.name_op(alias.asname, ast.Store, imp)
self.emit_op(ops.POP_TOP)
return
self.name_op(alias.asname, ast.Store, imp)
def visit_Import(self, imp):
for alias in imp.names:
assert isinstance(alias, ast.alias)
level = 0
self.load_const(self.space.newint(level))
self.load_const(self.space.w_None)
self.emit_op_name(ops.IMPORT_NAME, self.names, alias.name)
# If there's no asname then we store the root module. If there is
# an asname, _import_as stores the last module of the chain into it.
if alias.asname:
self._import_as(alias, imp)
else:
dot = alias.name.find(".")
if dot < 0:
store_name = alias.name
else:
store_name = alias.name[:dot]
self.name_op(store_name, ast.Store, imp)
def visit_ImportFrom(self, imp):
space = self.space
first = imp.names[0]
assert isinstance(first, ast.alias)
star_import = len(imp.names) == 1 and first.name == "*"
# Various error checking for future imports.
if imp.module == "__future__":
last_line, last_offset = self.compile_info.last_future_import
if imp.lineno > last_line or \
imp.lineno == last_line and imp.col_offset > last_offset:
self.error("__future__ statements must appear at beginning "
"of file", imp)
if star_import:
self.error("* not valid in __future__ imports", imp)
compiler = space.createcompiler()
for alias in imp.names:
assert isinstance(alias, ast.alias)
if alias.name not in compiler.future_flags.compiler_features:
if alias.name == "braces":
self.error("not a chance", imp)
self.error("future feature %s is not defined" %
(alias.name,), imp)
self.load_const(space.newint(imp.level))
names_w = [None]*len(imp.names)
for i in range(len(imp.names)):
alias = imp.names[i]
assert isinstance(alias, ast.alias)
names_w[i] = space.newtext(alias.name)
self.load_const(space.newtuple(names_w))
if imp.module:
mod_name = imp.module
else:
# In the case of a relative import.
mod_name = ""
self.emit_op_name(ops.IMPORT_NAME, self.names, mod_name)
if star_import:
self.emit_op(ops.IMPORT_STAR)
else:
for alias in imp.names:
assert isinstance(alias, ast.alias)
self.emit_op_name(ops.IMPORT_FROM, self.names, alias.name)
if alias.asname:
store_name = alias.asname
else:
store_name = alias.name
self.name_op(store_name, ast.Store, imp)
self.emit_op(ops.POP_TOP)
def visit_Assign(self, assign):
# paranoia assert in this stmt subclass: make sure that the lineno is
# already set, should be done by _visit_body
assert assign.lineno < 1 or self.lineno == assign.lineno
if self._optimize_unpacking(assign):
return
assign.value.walkabout(self)
duplications = len(assign.targets) - 1
for i in range(len(assign.targets)):
if i < duplications:
self.emit_op(ops.DUP_TOP)
assign.targets[i].walkabout(self)
def _optimize_unpacking(self, assign):
"""Try to optimize out BUILD_TUPLE and UNPACK_SEQUENCE opcodes."""
if len(assign.targets) != 1:
return False
targets = assign.targets[0].as_node_list(self.space)
if targets is None:
return False
values = assign.value.as_node_list(self.space)
if values is None:
return False
targets_count = len(targets)
values_count = len(values)
if targets_count != values_count:
return False
for value in values:
if isinstance(value, ast.Starred):
return False # more complicated
for target in targets:
if not isinstance(target, ast.Name):
if isinstance(target, ast.Starred):
# these require extra checks
return False
break
else:
self.visit_sequence(values)
seen_names = {}
for i in range(targets_count - 1, -1, -1):
target = targets[i]
assert isinstance(target, ast.Name)
if target.id not in seen_names:
seen_names[target.id] = True
self.name_op(target.id, ast.Store, target)
else:
self.emit_op(ops.POP_TOP)
return True
if values_count > 3:
return False
self.visit_sequence(values)
if values_count == 2:
self.emit_op(ops.ROT_TWO)
elif values_count == 3:
self.emit_op(ops.ROT_THREE)
self.emit_op(ops.ROT_TWO)
self.visit_sequence(targets)
return True
def _annotation_evaluate(self, item):
# PEP 526 requires that some things be evaluated, to avoid bugs
# where a non-assigning variable annotation references invalid items
# this is effectively a NOP, but will fail if e.g. item is an
# Attribute and one of the chained names does not exist
item.walkabout(self)
self.emit_op(ops.POP_TOP)
def _annotation_eval_slice(self, target):
if isinstance(target, ast.Slice):
for val in [target.lower, target.upper, target.step]:
if val:
self._annotation_evaluate(val)
elif isinstance(target, ast.Tuple):
for val in target.elts:
self._annotation_eval_slice(val)
else:
self._annotation_evaluate(target)
def visit_AnnAssign(self, assign):
target = assign.target
# if there's an assignment to be done, do it
if assign.value:
assign.value.walkabout(self)
target.walkabout(self)
# the PEP requires that certain parts of the target be evaluated at runtime
# to avoid silent annotation-related errors
if isinstance(target, ast.Name):
# if it's just a simple name and we're not in a function, store
# the annotation in __annotations__
if assign.simple and not isinstance(self.scope, symtable.FunctionScope):
assign.annotation.walkabout(self)
self.emit_op_arg(ops.LOAD_NAME, self.add_name(self.names, '__annotations__'))
name = target.id
w_name = self.space.newtext(self.scope.mangle(name))
self.load_const(misc.intern_if_common_string(self.space, w_name))
self.emit_op(ops.STORE_SUBSCR)
elif isinstance(target, ast.Attribute):
# the spec requires that `a.b: int` evaluates `a`
# and in a non-function scope, also evaluates `int`
# (N.B.: if the target is of the form `a.b.c`, `a.b` will be evaluated)
if not assign.value:
attr = target.value
self._annotation_evaluate(attr)
elif isinstance(target, ast.Subscript):
if not assign.value:
# similar to the above, `a[0:5]: int` evaluates the name and the slice argument
# and if not in a function, also evaluates the annotation
sl = target.slice
self._annotation_evaluate(target.value)
self._annotation_eval_slice(sl)
else:
self.error("can't handle annotation with %s" % (target,), target)
# if this is not in a function, evaluate the annotation
if not (assign.simple or isinstance(self.scope, symtable.FunctionScope)):
self._annotation_evaluate(assign.annotation)
def visit_With(self, wih):
self.handle_withitem(wih, 0, is_async=False)
@specialize.argtype(1)
def handle_withitem(self, wih, pos, is_async):
body_block = self.new_block()
cleanup = self.new_block()
exit = self.new_block()
witem = wih.items[pos]
assert isinstance(witem, ast.withitem)
witem.context_expr.walkabout(self)
if not is_async:
self.emit_jump(ops.SETUP_WITH, cleanup)
fblock_kind = F_WITH
else:
self.emit_op(ops.BEFORE_ASYNC_WITH)
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
self.emit_jump(ops.SETUP_ASYNC_WITH, cleanup)
fblock_kind = F_ASYNC_WITH
self.use_next_block(body_block)
self.push_frame_block(fblock_kind, body_block, cleanup)
if witem.optional_vars:
witem.optional_vars.walkabout(self)
else:
self.emit_op(ops.POP_TOP)
if pos == len(wih.items) - 1:
self._visit_body(wih.body)
else:
self.handle_withitem(wih, pos + 1, is_async=is_async)
self.emit_op(ops.POP_BLOCK)
self.pop_frame_block(fblock_kind, body_block)
# end of body, successful outcome, start cleanup
self.call_exit_with_nones()
if is_async:
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
self.emit_op(ops.POP_TOP)
self.emit_jump(ops.JUMP_ABSOLUTE, exit, True)
# exceptional outcome
self.use_next_block(cleanup)
self.emit_op(ops.WITH_EXCEPT_START)
if is_async:
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
exit2 = self.new_block()
self.emit_jump(ops.POP_JUMP_IF_TRUE, exit2, True)
self.emit_op(ops.RERAISE)
self.use_next_block(exit2)
self.emit_op(ops.POP_TOP)
self.emit_op(ops.POP_EXCEPT)
self.use_next_block(exit)
def visit_AsyncWith(self, wih):
if not self._check_async_function():
self.error("'async with' outside async function", wih)
self.handle_withitem(wih, 0, is_async=True)
def visit_Raise(self, rais):
arg = 0
if rais.exc:
rais.exc.walkabout(self)
arg += 1
if rais.cause:
rais.cause.walkabout(self)
arg += 1
self.emit_op_arg(ops.RAISE_VARARGS, arg)
def visit_Global(self, glob):
# Handled in symbol table building.
pass
def visit_Nonlocal(self, glob):
# Handled in symbol table building.
pass
def visit_Pass(self, pas):
pass
def visit_Expr(self, expr):
if self.interactive:
expr.value.walkabout(self)
self.emit_op(ops.PRINT_EXPR)
elif not isinstance(expr.value, ast.Constant):
expr.value.walkabout(self)
self.emit_op(ops.POP_TOP)
@update_pos_expr
def visit_Yield(self, yie):
if yie.value:
yie.value.walkabout(self)
else:
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_VALUE)
@update_pos_expr
def visit_YieldFrom(self, yfr):
yfr.value.walkabout(self)
self.emit_op(ops.GET_YIELD_FROM_ITER)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
@update_pos_expr
def visit_Await(self, aw):
if not self._check_async_function():
self.error("'await' outside async function", aw)
aw.value.walkabout(self)
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
@update_pos_expr
def visit_Constant(self, const):
self.load_const(const.value)
@update_pos_expr
def visit_UnaryOp(self, op):
op.operand.walkabout(self)
self.emit_op(unary_operations(op.op))
@update_pos_expr
def visit_BoolOp(self, op):
if op.op == ast.And:
instr = ops.JUMP_IF_FALSE_OR_POP
else:
instr = ops.JUMP_IF_TRUE_OR_POP
end = self.new_block()
we_are_and = op.op == ast.And
last = len(op.values) - 1
for index in range(last):
value = op.values[index]
truth = value.as_constant_truth(
self.space, self.compile_info)
if truth == optimize.CONST_NOT_CONST:
value.walkabout(self)
self.emit_jump(instr, end, True)
continue
if (truth != optimize.CONST_TRUE) == we_are_and:
last = index
with self.all_dead_code(): # error checking
for i in range(index + 1, len(op.values)):
op.values[i].walkabout(self)
break
else:
with self.all_dead_code(): # error checking
value.walkabout(self)
op.values[last].walkabout(self)
self.use_next_block(end)
@update_pos_expr
def visit_Compare(self, comp):
self._check_compare(comp)
comp.left.walkabout(self)
ops_count = len(comp.ops)
cleanup = None
if ops_count > 1:
cleanup = self.new_block()
comp.comparators[0].walkabout(self)
for i in range(1, ops_count):
self.emit_op(ops.DUP_TOP)
self.emit_op(ops.ROT_THREE)
opcode, op_kind = compare_operations(comp.ops[i - 1])
self.emit_op_arg(opcode, op_kind)
self.emit_jump(ops.JUMP_IF_FALSE_OR_POP, cleanup, True)
if i < (ops_count - 1):
comp.comparators[i].walkabout(self)
last_op, last_comparator = comp.ops[-1], comp.comparators[-1]
if not self._optimize_comparator(last_op, last_comparator):
last_comparator.walkabout(self)
opcode, op_kind = compare_operations(last_op)
self.emit_op_arg(opcode, op_kind)
if ops_count > 1:
end = self.new_block()
self.emit_jump(ops.JUMP_FORWARD, end)
self.use_next_block(cleanup)
self.emit_op(ops.ROT_TWO)
self.emit_op(ops.POP_TOP)
self.use_next_block(end)
def _is_literal(self, node):
# to-do(isidentical): maybe include list, dict, sets?
if not isinstance(node, ast.Constant):
return False
for singleton in [
self.space.w_None,
self.space.w_True,
self.space.w_False,
self.space.w_Ellipsis
]:
if self.space.is_w(node.value, singleton):
return False
return True
def _check_compare(self, node):
left = node.left
for i in range(min(len(node.ops), len(node.comparators))):
op = node.ops[i]
right = node.comparators[i]
if op in (ast.Is, ast.IsNot) and (self._is_literal(left) or self._is_literal(right)):
if op is ast.Is:
operator, replacement = "is", "=="
else:
operator, replacement = "is not", "!="
misc.syntax_warning(
self.space,
'"%s" with a literal. Did you mean "%s"?'
% (operator, replacement),
self.compile_info.filename,
node.lineno,
node.col_offset
)
left = right
def _optimize_comparator(self, op, node):
"""Fold lists/sets of constants in the context of "in"/"not in".
lists are folded into tuples, sets into frozensets, otherwise
returns False
"""
if op in (ast.In, ast.NotIn):
is_list = isinstance(node, ast.List)
if is_list or isinstance(node, ast.Set):
w_const = self._tuple_of_consts(node.elts)
if w_const is not None:
if not is_list:
from pypy.objspace.std.setobject import (
W_FrozensetObject)
w_const = W_FrozensetObject(self.space, w_const)
self.load_const(w_const)
return True
return False
def _tuple_of_consts(self, elts):
"""Return a tuple of consts from elts if possible, or None"""
count = len(elts) if elts is not None else 0
consts_w = [None] * count
for i in range(count):
w_value = elts[i].as_constant(self.space, self.compile_info)
if w_value is None:
# Not all constants
return None
consts_w[i] = w_value
return self.space.newtuple(consts_w)
@update_pos_expr
def visit_IfExp(self, ifexp):
end = self.new_block()
otherwise = self.new_block()
ifexp.test.accept_jump_if(self, False, otherwise)
ifexp.body.walkabout(self)
self.emit_jump(ops.JUMP_FORWARD, end)
self.use_next_block(otherwise)
ifexp.orelse.walkabout(self)
self.use_next_block(end)
def _visit_starunpack(self, node, elts, build_op, add_op, extend_op, is_tuple=False):
elt_count = len(elts) if elts else 0
contains_starred = False
for i in range(elt_count):
elt = elts[i]
if isinstance(elt, ast.Starred):
contains_starred = True
break
if not contains_starred:
if elt_count > MAX_STACKDEPTH_CONTAINERS:
if is_tuple:
self.emit_op_arg(ops.BUILD_LIST, 0)
add_op = ops.LIST_APPEND
else:
self.emit_op_arg(build_op, 0)
for elt in elts:
elt.walkabout(self)
self.emit_op_arg(add_op, 1)
if is_tuple:
self.emit_op(ops.LIST_TO_TUPLE)
else:
for i in range(elt_count):
elt = elts[i]
elt.walkabout(self)
if is_tuple:
self.emit_op_arg(ops.BUILD_TUPLE, elt_count)
else:
self.emit_op_arg(build_op, elt_count)
return
seen_star = False
for i in range(elt_count):
elt = elts[i]
if isinstance(elt, ast.Starred):
if not seen_star:
self.emit_op_arg(build_op, i)
seen_star = True
elt.value.walkabout(self)
self.emit_op_arg(extend_op, 1)
else:
elt.walkabout(self)
if seen_star:
self.emit_op_arg(add_op, 1)
assert seen_star
if is_tuple:
self.emit_op(ops.LIST_TO_TUPLE)
def _visit_assignment(self, node, elts, ctx):
elt_count = len(elts) if elts else 0
if ctx == ast.Store:
seen_star = False
for i in range(elt_count):
elt = elts[i]
is_starred = isinstance(elt, ast.Starred)
if is_starred and not seen_star:
if i >= 1 << 8 or elt_count - i - 1 >= (C_INT_MAX >> 8):
self.error("too many expressions in star-unpacking "
"assignment", node)
self.emit_op_arg(ops.UNPACK_EX,
i + ((elt_count - i - 1) << 8))
seen_star = True
elts[i] = elt.value
elif is_starred:
self.error("multiple starred expressions in assignment", elt)
if not seen_star:
self.emit_op_arg(ops.UNPACK_SEQUENCE, elt_count)
self.visit_sequence(elts)
def visit_Starred(self, star):
if star.ctx != ast.Store:
self.error("can't use starred expression here",
star)
self.error("starred assignment target must be in a list or tuple", star)
@update_pos_expr
def visit_Tuple(self, tup):
if tup.ctx == ast.Store:
self._visit_assignment(tup, tup.elts, tup.ctx)
elif tup.ctx == ast.Load:
self._visit_starunpack(tup, tup.elts, ops.BUILD_LIST, ops.LIST_APPEND, ops.LIST_EXTEND, is_tuple=True)
else:
self.visit_sequence(tup.elts)
@update_pos_expr
def visit_List(self, l):
if l.ctx == ast.Store:
self._visit_assignment(l, l.elts, l.ctx)
elif l.ctx == ast.Load:
self._visit_starunpack(l, l.elts, ops.BUILD_LIST, ops.LIST_APPEND, ops.LIST_EXTEND)
else:
self.visit_sequence(l.elts)
@update_pos_expr
def visit_Dict(self, d):
containers = 0
elements = 0
is_unpacking = False
all_constant_keys_w = None
if d.values:
unpacking_anywhere = False
for key in d.keys:
if key is None:
unpacking_anywhere = True
break
if not unpacking_anywhere and len(d.keys) > MAX_STACKDEPTH_CONTAINERS:
# do it in a small amount of stack
self.emit_op_arg(ops.BUILD_MAP, 0)
for i in range(len(d.values)):
key = d.keys[i]
assert key is not None
key.walkabout(self)
d.values[i].walkabout(self)
self.emit_op_arg(ops.MAP_ADD, 1)
return
assert len(d.keys) < 0xffff
all_constant_keys_w = []
for key in d.keys:
if key is None:
constant_key = None
else:
constant_key = key.as_constant(
self.space, self.compile_info)
if constant_key is None:
all_constant_keys_w = None
break
else:
all_constant_keys_w.append(constant_key)
for i in range(len(d.values)):
key = d.keys[i]
is_unpacking = key is None
if elements == 0xFFFF or (elements and is_unpacking):
assert all_constant_keys_w is None
self.emit_op_arg(ops.BUILD_MAP, elements)
if containers > 0:
self.emit_op(ops.DICT_UPDATE)
else:
containers = 1
elements = 0
if is_unpacking:
if containers == 0:
self.emit_op_arg(ops.BUILD_MAP, 0)
containers = 1
assert all_constant_keys_w is None
d.values[i].walkabout(self)
self.emit_op(ops.DICT_UPDATE)
else:
if not all_constant_keys_w:
key.walkabout(self)
d.values[i].walkabout(self)
elements += 1
if elements or containers == 0:
if all_constant_keys_w:
w_tup = self.space.newtuple(all_constant_keys_w)
self.load_const(w_tup)
self.emit_op_arg(ops.BUILD_CONST_KEY_MAP, elements)
containers = 1
else:
self.emit_op_arg(ops.BUILD_MAP, elements)
if containers > 0:
self.emit_op(ops.DICT_UPDATE)
else:
containers = 1
assert containers == 1
@update_pos_expr
def visit_Set(self, s):
self._visit_starunpack(s, s.elts, ops.BUILD_SET, ops.SET_ADD, ops.SET_UPDATE)
@update_pos_expr
def visit_Name(self, name):
self.name_op(name.id, name.ctx, name)
def visit_keyword(self, keyword):
if keyword.arg is not None:
self.load_const(self.space.newtext(keyword.arg))
keyword.value.walkabout(self)
def _load_constant_tuple(self, content_w):
self.load_const(self.space.newtuple(content_w[:]))
def _make_call(self, nargs_pushed, args, keywords):
space = self.space
CallCodeGenerator(self, nargs_pushed, args, keywords).emit_call()
@update_pos_expr
def visit_Call(self, call):
if self._optimize_method_call(call):
return
self._check_caller(call.func)
call.func.walkabout(self)
self._make_call(0, call.args, call.keywords)
def _check_caller(self, func):
if func._literal_type:
misc.syntax_warning(
self.space,
"'%s' object is not callable; perhaps you "
"missed a comma?" % func._get_type_name(self.space),
self.compile_info.filename,
func.lineno,
func.col_offset
)
def _call_has_no_star_args(self, call):
if call.args is not None:
for elt in call.args:
if isinstance(elt, ast.Starred):
return False
if call.keywords is not None:
for kw in call.keywords:
assert isinstance(kw, ast.keyword)
if kw.arg is None:
return False
return True
def _call_has_simple_args(self, call):
return self._call_has_no_star_args(call) and not call.keywords
def _optimize_method_call(self, call):
space = self.space
if not self._call_has_no_star_args(call) or \
not isinstance(call.func, ast.Attribute):
return False
attr_lookup = call.func
assert isinstance(attr_lookup, ast.Attribute)
attr_lookup.value.walkabout(self)
self.emit_op_name(ops.LOAD_METHOD, self.names, attr_lookup.attr)
self.visit_sequence(call.args)
arg_count = len(call.args) if call.args is not None else 0
if not call.keywords:
self.emit_op_arg(ops.CALL_METHOD, arg_count)
else:
keyword_names_w = []
for kw in call.keywords:
assert isinstance(kw, ast.keyword)
assert kw.arg # checked by self._call_has_no_star_args
w_name = space.newtext(kw.arg)
keyword_names_w.append(misc.intern_if_common_string(space, w_name))
kw.value.walkabout(self)
self._load_constant_tuple(keyword_names_w)
self.emit_op_arg(ops.CALL_METHOD_KW, len(keyword_names_w) + arg_count)
return True
@update_pos_expr
def visit_ListComp(self, lc):
self._compile_comprehension(lc, "<listcomp>",
ComprehensionCodeGenerator)
def _comp_generator(self, node, generators, gen_index):
gen = generators[gen_index]
assert isinstance(gen, ast.comprehension)
if gen.is_async:
self._comp_async_generator(node, generators, gen_index)
else:
self._comp_sync_generator(node, generators, gen_index)
def _comp_sync_generator(self, node, generators, gen_index):
start = self.new_block()
if_cleanup = self.new_block()
anchor = self.new_block()
gen = generators[gen_index]
assert isinstance(gen, ast.comprehension)
if gen_index > 0:
gen.iter.walkabout(self)
self.emit_op(ops.GET_ITER)
self.use_next_block(start)
self.emit_jump(ops.FOR_ITER, anchor)
self.use_next_block()
gen.target.walkabout(self)
if gen.ifs:
for if_ in gen.ifs:
if_.accept_jump_if(self, False, if_cleanup)
self.use_next_block()
gen_index += 1
if gen_index < len(generators):
self._comp_generator(node, generators, gen_index)
else:
node.accept_comp_iteration(self, gen_index)
self.use_next_block(if_cleanup)
self.emit_jump(ops.JUMP_ABSOLUTE, start, True)
self.use_next_block(anchor)
def _comp_async_generator(self, node, generators, gen_index):
b_start = self.new_block()
b_except = self.new_block()
b_if_cleanup = self.new_block()
gen = generators[gen_index]
assert isinstance(gen, ast.comprehension)
if gen_index > 0:
gen.iter.walkabout(self)
self.emit_op(ops.GET_AITER)
self.use_next_block(b_start)
self.emit_jump(ops.SETUP_EXCEPT, b_except)
self.emit_op(ops.GET_ANEXT)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
self.emit_op(ops.POP_BLOCK)
gen.target.walkabout(self)
if gen.ifs:
for if_ in gen.ifs:
if_.accept_jump_if(self, False, b_if_cleanup)
self.use_next_block()
gen_index += 1
if gen_index < len(generators):
self._comp_generator(node, generators, gen_index)
else:
node.accept_comp_iteration(self, gen_index)
self.use_next_block(b_if_cleanup)
self.emit_jump(ops.JUMP_ABSOLUTE, b_start, True)
self.use_next_block(b_except)
self.emit_op(ops.END_ASYNC_FOR)
def _compile_comprehension(self, node, name, sub_scope):
is_async_function = self.scope.is_coroutine
code, qualname = self.sub_scope(sub_scope, name, node, node.lineno)
is_async_comprehension = self.symbols.find_scope(node).is_coroutine
if is_async_comprehension and not is_async_function:
if not isinstance(node, ast.GeneratorExp):
if self.allows_top_level_await():
self.is_async_seen = True
else:
self.error("asynchronous comprehension outside of "
"an asynchronous function", node)
self.update_position(node.lineno)
self._make_function(code, qualname=qualname)
first_comp = node.get_generators()[0]
assert isinstance(first_comp, ast.comprehension)
first_comp.iter.walkabout(self)
if first_comp.is_async:
self.emit_op(ops.GET_AITER)
else:
self.emit_op(ops.GET_ITER)
self.emit_op_arg(ops.CALL_FUNCTION, 1)
if is_async_comprehension and sub_scope is not GenExpCodeGenerator:
self.emit_op(ops.GET_AWAITABLE)
self.load_const(self.space.w_None)
self.emit_op(ops.YIELD_FROM)
@update_pos_expr
def visit_GeneratorExp(self, genexp):
self._compile_comprehension(genexp, "<genexpr>", GenExpCodeGenerator)
@update_pos_expr
def visit_SetComp(self, setcomp):
self._compile_comprehension(setcomp, "<setcomp>",
ComprehensionCodeGenerator)
@update_pos_expr
def visit_DictComp(self, dictcomp):
self._compile_comprehension(dictcomp, "<dictcomp>",
ComprehensionCodeGenerator)
def check_forbidden_name(self, name, node, ctx=ast.Store):
if misc.check_forbidden_name(self.space, name):
if ctx == ast.Store:
self.error("cannot assign to " + name, node)
else:
assert ctx == ast.Del
self.error("cannot delete " + name, node)
@update_pos_expr
def visit_Attribute(self, attr):
names = self.names
ctx = attr.ctx
attr.value.walkabout(self)
if ctx == ast.Load:
self.emit_op_name(ops.LOAD_ATTR, names, attr.attr)
return
self.check_forbidden_name(attr.attr, attr, ctx)
if ctx == ast.Store:
self.emit_op_name(ops.STORE_ATTR, names, attr.attr)
elif ctx == ast.Del:
self.emit_op_name(ops.DELETE_ATTR, names, attr.attr)
else:
raise AssertionError("unknown context")
@update_pos_expr
def visit_Slice(self, slc):
if slc.lower:
slc.lower.walkabout(self)
else:
self.load_const(self.space.w_None)
if slc.upper:
slc.upper.walkabout(self)
else:
self.load_const(self.space.w_None)
arg = 2
if slc.step:
slc.step.walkabout(self)
arg += 1
self.emit_op_arg(ops.BUILD_SLICE, arg)
@update_pos_expr
def visit_Subscript(self, sub):
ctx = sub.ctx
if ctx == ast.Load:
self._check_subscripter(sub.value)
self._check_index(sub, sub.value, sub.slice)
op = ops.BINARY_SUBSCR
elif ctx == ast.Store:
op = ops.STORE_SUBSCR
elif ctx == ast.Del:
op = ops.DELETE_SUBSCR
else:
assert 0
sub.value.walkabout(self)
sub.slice.walkabout(self)
self.emit_op(op)
def _check_subscripter(self, sub):
if (
isinstance(sub, ast.Constant)
and (
self.space.isinstance_w(sub.value, self.space.w_tuple)
or self.space.isinstance_w(sub.value, self.space.w_unicode)
or self.space.isinstance_w(sub.value, self.space.w_bytes)
)
):
return None
elif (type(sub) is not ast.Constant and type(sub) is not ast.Set and
type(sub) is not ast.SetComp and
type(sub) is not ast.GeneratorExp and
type(sub) is not ast.Lambda):
return None
misc.syntax_warning(
self.space,
"'%s' object is not subscriptable; perhaps"
" you missed a comma?" % sub._get_type_name(self.space),
self.compile_info.filename,
sub.lineno,
sub.col_offset
)
def _check_index(self, node, sub, index):
if not index._literal_type:
return None
if isinstance(index, ast.Constant) and self.space.isinstance_w(
index.value, self.space.w_int
):
return None
if not (
isinstance(sub, ast.Constant)
and (
self.space.isinstance_w(sub.value, self.space.w_tuple)
or self.space.isinstance_w(sub.value, self.space.w_unicode)
or self.space.isinstance_w(sub.value, self.space.w_bytes)
)
):
return None
if (
type(sub) is not ast.Constant and
type(sub) is not ast.Tuple and
type(sub) is not ast.List and
type(sub) is not ast.ListComp and
type(sub) is not ast.JoinedStr and
type(sub) is not ast.FormattedValue
):
return None
# not quotes (on purpose to comply with TypeErrors)
misc.syntax_warning(
self.space,
"%s indices must be integers or slices, "
"not %s; perhaps you missed a comma?" % (
sub._get_type_name(self.space),
index._get_type_name(self.space)
),
self.compile_info.filename,
node.lineno,
node.col_offset
)
@update_pos_expr
def visit_JoinedStr(self, joinedstr):
for node in joinedstr.values:
node.walkabout(self)
if len(joinedstr.values) != 1:
self.emit_op_arg(ops.BUILD_STRING, len(joinedstr.values))
@update_pos_expr
def visit_FormattedValue(self, fmt):
fmt.value.walkabout(self)
arg = 0
if fmt.conversion == ord('s'): arg = consts.FVC_STR
if fmt.conversion == ord('r'): arg = consts.FVC_REPR
if fmt.conversion == ord('a'): arg = consts.FVC_ASCII
if fmt.format_spec is not None:
arg |= consts.FVS_HAVE_SPEC
fmt.format_spec.walkabout(self)
self.emit_op_arg(ops.FORMAT_VALUE, arg)
@update_pos_expr
def visit_NamedExpr(self, namedexpr):
namedexpr.value.walkabout(self)
self.emit_op(ops.DUP_TOP)
namedexpr.target.walkabout(self)
def _revdb_metavar(self, node):
# moved in its own function for the import statement
from pypy.interpreter.reverse_debugging import dbstate
if not dbstate.standard_code:
self.emit_op_arg(ops.LOAD_REVDB_VAR, node.metavar)
return True
return False
@update_pos_expr
def visit_RevDBMetaVar(self, node):
if self.space.reverse_debugging and self._revdb_metavar(node):
return
self.error("Unknown character ('$NUM' is only valid in the "
"reverse-debugger)", node)
def allows_top_level_await(self):
return (
self._allow_top_level_await
and isinstance(self.scope, symtable.ModuleScope)
)
class TopLevelCodeGenerator(PythonCodeGenerator):
def __init__(self, space, tree, symbols, compile_info):
self.is_async_seen = False
PythonCodeGenerator.__init__(self, space, "<module>", tree, -1,
symbols, compile_info, qualname=None)
def _compile(self, tree):
if isinstance(tree, ast.Module):
if tree.body:
self.first_lineno = tree.body[0].lineno
else:
self.first_lineno = self.lineno = 1
self._maybe_setup_annotations()
tree.walkabout(self)
def _get_code_flags(self):
flags = 0
if not self.cell_vars and not self.free_vars:
flags |= consts.CO_NOFREE
if self.scope.doc_removable:
flags |= consts.CO_KILL_DOCSTRING
if self.is_async_seen:
flags |= consts.CO_COROUTINE
return flags
def _check_async_function(self):
top_level = self.allows_top_level_await()
if top_level:
self.is_async_seen = True
return top_level
class AbstractFunctionCodeGenerator(PythonCodeGenerator):
def _get_code_flags(self):
scope = self.scope
assert isinstance(scope, symtable.FunctionScope)
flags = consts.CO_NEWLOCALS
if scope.optimized:
flags |= consts.CO_OPTIMIZED
if scope.nested:
flags |= consts.CO_NESTED
if scope.is_generator and not scope.is_coroutine:
flags |= consts.CO_GENERATOR
if not scope.is_generator and scope.is_coroutine:
flags |= consts.CO_COROUTINE
if scope.is_generator and scope.is_coroutine:
flags |= consts.CO_ASYNC_GENERATOR
if scope.has_yield_inside_try:
flags |= consts.CO_YIELD_INSIDE_TRY
if scope.has_variable_arg:
flags |= consts.CO_VARARGS
if scope.has_keywords_arg:
flags |= consts.CO_VARKEYWORDS
if scope.doc_removable:
flags |= consts.CO_KILL_DOCSTRING
if not self.cell_vars and not self.free_vars:
flags |= consts.CO_NOFREE
return PythonCodeGenerator._get_code_flags(self) | flags
def _init_argcounts(self, args):
if args.posonlyargs:
self.argcount += len(args.posonlyargs)
self.posonlyargcount = len(args.posonlyargs)
if args.args:
self.argcount += len(args.args)
if args.kwonlyargs:
self.kwonlyargcount = len(args.kwonlyargs)
class FunctionCodeGenerator(AbstractFunctionCodeGenerator):
def _compile(self, func):
assert isinstance(func, ast.FunctionDef)
self.first_lineno = func.lineno
if func.decorator_list and func.decorator_list[0].lineno > 0:
self.first_lineno = func.decorator_list[0].lineno
has_docstring = self.ensure_docstring_constant(func.body)
args = func.args
assert isinstance(args, ast.arguments)
self._init_argcounts(args)
start = 1 if has_docstring else 0
self._visit_body(func.body, start)
class AsyncFunctionCodeGenerator(AbstractFunctionCodeGenerator):
def _compile(self, func):
assert isinstance(func, ast.AsyncFunctionDef)
self.first_lineno = func.lineno
if func.decorator_list and func.decorator_list[0].lineno > 0:
self.first_lineno = func.decorator_list[0].lineno
has_docstring = self.ensure_docstring_constant(func.body)
args = func.args
assert isinstance(args, ast.arguments)
self._init_argcounts(args)
start = 1 if has_docstring else 0
self._visit_body(func.body, start)
def _check_async_function(self):
return True
class LambdaCodeGenerator(AbstractFunctionCodeGenerator):
def _compile(self, lam):
assert isinstance(lam, ast.Lambda)
args = lam.args
assert isinstance(args, ast.arguments)
self._init_argcounts(args)
# Prevent a string from being the first constant and thus a docstring.
self.add_const(self.space.w_None)
lam.body.walkabout(self)
self.emit_op(ops.RETURN_VALUE)
class ComprehensionCodeGenerator(AbstractFunctionCodeGenerator):
def _compile(self, node):
self.argcount = 1
assert isinstance(node, ast.expr)
self.update_position(node.lineno)
node.build_container_and_load_iter(self)
self._comp_generator(node, node.get_generators(), 0)
self._end_comp()
def comprehension_load_iter(self):
self.emit_op_arg(ops.LOAD_FAST, 0)
def _end_comp(self):
self.emit_op(ops.RETURN_VALUE)
def _check_async_function(self):
return True
class GenExpCodeGenerator(ComprehensionCodeGenerator):
def _end_comp(self):
pass
def _get_code_flags(self):
flags = ComprehensionCodeGenerator._get_code_flags(self)
return flags | consts.CO_GENERATOR
class ClassCodeGenerator(PythonCodeGenerator):
def _compile(self, cls):
assert isinstance(cls, ast.ClassDef)
self.ensure_docstring_constant(cls.body)
self.first_lineno = cls.lineno
if cls.decorator_list and cls.decorator_list[0].lineno > 0:
self.first_lineno = cls.decorator_list[0].lineno
self.lineno = self.first_lineno
self.argcount = 1
# load (global) __name__ ...
self.name_op("__name__", ast.Load, None)
# ... and store it as __module__
self.name_op("__module__", ast.Store, None)
# store the qualname
w_qualname = self.space.newtext(self.qualname)
self.load_const(w_qualname)
self.name_op("__qualname__", ast.Store, None)
self._maybe_setup_annotations()
# compile the body proper
self._handle_body(cls.body)
# return the (empty) __class__ cell
scope = self.scope.lookup("__class__")
if scope == symtable.SCOPE_CELL_CLASS:
# Return the cell where to store __class__
self.emit_op_arg(ops.LOAD_CLOSURE, self.cell_vars["__class__"])
self.emit_op(ops.DUP_TOP)
self.name_op("__classcell__", ast.Store, None)
else:
# This happens when nobody references the cell
self.load_const(self.space.w_None)
self.emit_op(ops.RETURN_VALUE)
def _get_code_flags(self):
flags = 0
if self.scope.doc_removable:
flags |= consts.CO_KILL_DOCSTRING
return PythonCodeGenerator._get_code_flags(self) | flags
class CallCodeGenerator(object):
def __init__(self, codegenerator, nargs_pushed, args, keywords):
self.space = codegenerator.space
self.codegenerator = codegenerator
self.nargs_pushed = nargs_pushed
self.args = args
self.keywords = keywords
self.have_starargs = False
# the number of dictionaries on the stack
self.have_kwargs = False
self.keyword_names_w = []
self.seen_keyword_names = {}
def _make_starargs_list(self):
if not self.have_starargs:
self.codegenerator.emit_op_arg(ops.BUILD_LIST, self.nargs_pushed)
self.have_starargs = True
self.nargs_pushed = 0
else:
assert self.nargs_pushed == 0
def _push_args(self):
if len(self.args) == 1 and not self.nargs_pushed:
arg = self.args[0]
if isinstance(arg, ast.Starred):
arg.value.walkabout(self.codegenerator)
self.have_starargs = True
return
for elt in self.args:
if isinstance(elt, ast.Starred):
# we have a *arg
self._make_starargs_list()
elt.value.walkabout(self.codegenerator)
self.codegenerator.emit_op_arg(ops.LIST_EXTEND, 1)
continue
if self.nargs_pushed >= MAX_STACKDEPTH_CONTAINERS // 2:
# stack depth getting too big
self._make_starargs_list()
elt.walkabout(self.codegenerator)
if self.have_starargs:
self.codegenerator.emit_op_arg(ops.LIST_APPEND, 1)
else:
self.nargs_pushed += 1
if self.have_starargs:
self.codegenerator.emit_op(ops.LIST_TO_TUPLE)
def _pack_kwargs_into_dict(self):
if self.keyword_names_w:
self.codegenerator._load_constant_tuple(self.keyword_names_w)
# XXX use BUILD_MAP for size 1?
self.codegenerator.emit_op_arg(ops.BUILD_CONST_KEY_MAP, len(self.keyword_names_w))
self.keyword_names_w = []
if self.have_kwargs: # we already have a map, merge the new one in
self.codegenerator.emit_op(ops.DICT_MERGE)
self.have_kwargs = True
def _push_kwargs(self):
for kw in self.keywords:
assert isinstance(kw, ast.keyword)
self.codegenerator.check_forbidden_name(kw.arg, kw)
if kw.arg is None:
# if we see **args or if the number of keywords is huge,
# pack up keywords on the stack so far
self._pack_kwargs_into_dict()
if not self.have_kwargs:
# no kwargs, build an empty dict
self.codegenerator.emit_op_arg(ops.BUILD_MAP, 0)
kw.value.walkabout(self.codegenerator)
self.codegenerator.emit_op(ops.DICT_MERGE)
self.have_kwargs = True
continue
if kw.arg in self.seen_keyword_names:
self.codegenerator.error(
"keyword argument repeated: '%s'" % (kw.arg, ), kw)
self.seen_keyword_names[kw.arg] = None
if len(self.keyword_names_w) > MAX_STACKDEPTH_CONTAINERS // 2:
self._pack_kwargs_into_dict()
w_name = self.space.newtext(kw.arg)
self.keyword_names_w.append(misc.intern_if_common_string(self.space, w_name))
kw.value.walkabout(self.codegenerator)
def _make_starargs_at_end(self):
if self.nargs_pushed == 0:
self.codegenerator._load_constant_tuple([])
else:
self.codegenerator.emit_op_arg(ops.BUILD_TUPLE, self.nargs_pushed)
self.have_starargs = True
def _push_tuple_positional_args_if_necessary(self):
if self.have_starargs:
# can't use CALL_FUNCTION_KW anyway, because we already have a
# tuple as the positional args
return
# we might get away with using CALL_FUNCTION_KW if there are no **kwargs
for kw in self.keywords:
assert isinstance(kw, ast.keyword)
if kw.arg is None:
# we found a **kwarg, thus we're using CALL_FUNCTION_EX, we
# need to pack up positional arguments first
self._make_starargs_at_end()
break
if not self.have_starargs and len(self.keywords) > MAX_STACKDEPTH_CONTAINERS // 2:
# we have a huge amount of keyword args, thus we also need to use
# CALL_FUNCTION_EX
self._make_starargs_at_end()
def emit_call(self):
keywords = self.keywords
codegenerator = self.codegenerator
space = self.space
if self.args is not None:
self._push_args()
# Repeat procedure for keyword args
if keywords is None or len(keywords) == 0:
if not self.have_starargs:
# no *args, no keyword args, no **kwargs
codegenerator.emit_op_arg(ops.CALL_FUNCTION, self.nargs_pushed)
return
else:
self._push_tuple_positional_args_if_necessary()
self._push_kwargs()
if not self.have_kwargs and not self.have_starargs:
# can use CALL_FUNCTION_KW
assert len(self.keyword_names_w) > 0 # otherwise we would have used CALL_FUNCTION
codegenerator._load_constant_tuple(self.keyword_names_w)
codegenerator.emit_op_arg(ops.CALL_FUNCTION_KW, self.nargs_pushed + len(self.keyword_names_w))
else:
self._pack_kwargs_into_dict()
codegenerator.emit_op_arg(ops.CALL_FUNCTION_EX, int(self.have_kwargs))
|
py | b40543398073b38678a22de891b38e5b592ef492 | from serial.tools.list_ports import comports
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.scrolledtext as st
#import ddbcore
from . import ddbcore
# def Connect(port, baud):
# print("Connect to", port, "with baud rate", baud)
# if port != "":
# ser = pyserial.Serial(port=port,baudrate=baud,
# parity=pyserial.PARITY_NONE,stopbits=pyserial.STOPBITS_ONE,bytesize=pyserial.EIGHTBITS,
# timeout=0)
# Text_box.insert(tk.END, "*** connected to: " + ser.portstr + "\n")
# return ser
# else:
# return None
#
# def Disconnect(ser):
# ser.close()
# print("Disconnected")
WindowRunning = True
def OnWindowClosed():
global WindowRunning
print("DDWifiBridgeWindow closed!")
WindowRunning = False
Window.destroy()
def ClickedConnect():
port = Port_combo.get()
baud = Baud_combo.get()
try:
wifiPort = int(WifiPort_entry.get())
except:
wifiPort = ddbcore.DefWifiPort
ddbcore.InvokeConnect(port, baud, wifiPort)
def ClickedClear():
Text_box.delete('1.0', tk.END)
def FillPortCombo(port: None):
ports = []
if port != None:
ports.append(port)
for comps in comports():
ports.append(comps[0])
Port_combo['values'] = ports
if len(ports) > 0:
if port != None:
Port_combo.set(port)
else:
Port_combo.current(0)
def FillBaudCombo(baud: None):
global DefBaudRate
bauds = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000]
Baud_combo['values'] = bauds
#Baud_combo.current(7)
Baud_combo.set(baud if baud != None else ddbcore.DefBaudRate)
def OnDisconnected():
Port_combo["state"] = "normal"
Baud_combo["state"] = "normal"
WifiPort_entry["state"] = "normal"
Text_box.insert(tk.END, "*** disconnected\n")
def InitWindow(param_dict: None):
global Window
global Show_state
global Auto_scroll_state
global Connect_button
global Port_combo
global Baud_combo
global WifiPort_entry
global Text_box
port = None
baud = None
wifi_port = None
if param_dict != None:
port = param_dict["port"]
baud = param_dict.get("baud")
wifi_port = param_dict.get("wifiPort")
Window = tk.Tk()
Window.protocol("WM_DELETE_WINDOW", OnWindowClosed)
Window.title("DumbDispaly WIFI Bridge")
Window.geometry("800x600")
Show_state = tk.BooleanVar()
Show_state.set(True)
Auto_scroll_state = tk.BooleanVar()
Auto_scroll_state.set(True)
tool_bar = tk.Frame()
Connect_button = tk.Button(tool_bar, command=ClickedConnect)
Port_combo = ttk.Combobox(tool_bar, postcommand=FillPortCombo)
Baud_combo = ttk.Combobox(tool_bar)
direction_label = tk.Label(tool_bar, text=' <==> ')
wifiHost_label = tk.Label(tool_bar, text=ddbcore.WifiHost+':')
WifiPort_entry = tk.Entry(tool_bar, width=6)
spacer_label = tk.Label(tool_bar, text=' | ')
show_check = tk.Checkbutton(tool_bar, text='Show', var=Show_state)
#clear_button = tk.Button(tool_bar, text='Clear', command=ClickedClear)
auto_scroll_check = tk.Checkbutton(tool_bar, text='Auto Scroll', var=Auto_scroll_state,
command=lambda: Text_box.mark_set("insert", tk.END))
clear_button = tk.Button(tool_bar, text='Clear', command=ClickedClear)
Connect_button.pack(side=tk.LEFT)
Port_combo.pack(side=tk.LEFT)
Baud_combo.pack(side=tk.LEFT)
direction_label.pack(side=tk.LEFT)
wifiHost_label.pack(side=tk.LEFT)
WifiPort_entry.pack(side=tk.LEFT)
spacer_label.pack(side=tk.LEFT)
show_check.pack(side=tk.LEFT)
auto_scroll_check.pack(side=tk.LEFT)
clear_button.pack(side=tk.LEFT)
tool_bar.pack()
Text_box = st.ScrolledText(width=100, height=1000)
Text_box.pack(fill=tk.BOTH)
FillPortCombo(port)
FillBaudCombo(baud)
WifiPort_entry.insert(0, str(wifi_port if wifi_port != None else ddbcore.DefWifiPort))
# def Initialize():
# Window = tk.Tk()
# Window.title("DumbDispaly WIFI Bridge")
# Window.geometry("800x600")
# Auto_scroll_state = tk.BooleanVar()
# Auto_scroll_state.set(True)
def RunDDBridgeWinMain(param_dict = None):
ddui = DDWinUserInterface(param_dict)
ddbcore.RunDDBridgeMain(ddui)
class DDWinUserInterface(ddbcore.DDUserInterface):
def __init__(self, param_dict = None):
self.param_dict = param_dict
def initialize(self):
InitWindow(self.param_dict)
def syncConnectionState(self, connected):
Connect_button.config(text="Disconnect" if connected else "Connect", fg="white" if connected else "green", bg="gray" if connected else "lightgrey")
def onConnected(self):
Port_combo["state"] = "disabled"
Baud_combo["state"] = "disabled"
WifiPort_entry["state"] = "disabled"
#Text_box.insert(tk.END, "*** connected\n")
def onDisconnected(self):
try:
Port_combo["state"] = "normal"
Baud_combo["state"] = "normal"
WifiPort_entry["state"] = "normal"
#Text_box.insert(tk.END, "*** disconnected\n")
except:
pass
def isUIRunning(self):
return WindowRunning
def timeSlice(self):
Window.update()
def bridge_send(self, transDir, line):
# global Window
# global Auto_scroll_state
if Show_state.get():
if Auto_scroll_state.get():
Text_box.see(tk.END)
if True:
pos = Text_box.index(tk.INSERT)
end_pos = Text_box.index(tk.END)
line_count = int(end_pos.split('.')[0]) - 1
check_pos = str(line_count) + '.0'
if pos != check_pos:
Auto_scroll_state.set(False)
#Window.update()
if isinstance(line, bytes):
Text_box.insert(tk.END, '......\n')
else:
Text_box.insert(tk.END, transDir + ' ' + line + '\n')
def printLogMessage(self, msg):
print(msg)
def printControlMessage(self, msg):
Text_box.insert(tk.END, msg + "\n")
pass
# if __name__ == "__main__":
# print("Please run DDWifiBridge.py instead!!!")
|
py | b4054476b1adbaab3a733cdb52ff288930ffad75 | import webview
HTML_CODE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>py webview demostrate</title>
<style>
body, html {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
box-sizing: border-box;
font-family: "Microsoft YaHei UI";
overflow: hidden;
user-select: none;
background-color: #d7d7d7;
}
h1 {
font-size: 16px;
text-align: center;
margin: 15px;
}
div {
margin: 0 auto;
text-align: center;
}
button {
display: inline-block;
font-size: 14px;
width: 600px;
padding: 5px;
margin: 5px;
text-align: left;
color: #2a5aff;
}
button>span {
display: inline-block;
width: 150px;
text-align: right;
color: black;
}
</style>
</head>
<body>
<h1>
当前时间:<span id="random"></span> <br/>
当前窗口ID:<span id="browser_id"></span> <br/>
</h1>
<div>
<button onclick="loadUrl()">
<span>弹出一个新的窗口:</span>
window.__cef__.open(param: JsonObject)
</button>
<br/>
<button onclick="window.__cef__.close()">
<span>主调关闭窗口:</span>
window.__cef__.close()
</button>
<br/>
<button onclick="window.__cef__.closeAll()">
<span>关闭所有窗口:</span>
window.__cef__.closeAll()
</button>
<br/>
<button onclick="window.__cef__.toggleFullScreen()">
<span>切换全屏:</span>
window.__cef__.toggleFullScreen()()
</button>
<br/>
</div>
<script>
function loadUrl() {
if (window.__cef__) {
window.__cef__.open({
url: 'http://localhost:8421/pywebview/burgeon/assets/index.html',
title: '伯俊软件',
payload: {
json: { a: 1, b: 2 },
array: [1, 2, 3],
str: 'str',
number: Math.PI,
}
});
}
}
const updateInfo = () => {
document.getElementById('random').innerText = new Date().toLocaleDateString() + ' ' + new Date().toLocaleTimeString()
document.getElementById('browser_id').innerText = window.windowId
};
window.onload = function() {
updateInfo();
setInterval(updateInfo, 1000)
};
const f1 = (e) => {
if (confirm('确定关闭当前窗口')) {
window.__cef__.close();
}
};
setTimeout(() => {
__cef__.addEventListener('windowCloseEvent', f1);
}, 10);
</script>
</body>
</html>
"""
if __name__ == '__main__':
webview.create_window(url=HTML_CODE, context_menu=True, url_type='string')
|
py | b40544a3e63a64d533903cbbd60b1704c44f0ad1 | """
Author(s): Nihal Dhamani ([email protected]),
Carson Schubert ([email protected])
Date Created: 04/10/2019
Helper functions for the TF Bounding Box plugin.
"""
import os
import shutil
import tarfile
import yaml
import click
import urllib.request
from colorama import init, Fore
from pathlib import Path
from ravenml.utils.local_cache import RMLCache
from ravenml.utils.question import user_confirms, user_input, user_selects
from ravenml.utils.plugins import raise_parameter_error
init()
def prepare_for_training(
bbox_cache: RMLCache,
base_dir: Path,
data_path: Path,
arch_path: Path,
model_type: str,
metadata: dict,
config: dict):
""" Prepares the system for training.
Creates artifact directory structure. Prompts user for choice of optimizer and
hyperparameters. Injects hyperparameters into config files. Adds hyperparameters
to given metadata dictionary.
Args:
bbox_cache (RMLCache): cache object for the bbox plugin
base_dir (Path): root of training directory
data_path (Path): path to dataset
arch_path (Path): path to model architecture directory
model_type (str): name of model type (i.e, ssd_inception_v1)
metadata (dict): metadata dictionary to add fields to
config (dict): plugin config from user provided config yaml
Returns:
bool: True if successful, False otherwise
"""
# hyperparameter metadata dictionary
hp_metadata = {}
# create a data folder within our base_directory
os.makedirs(base_dir / 'data')
# copy object-detection.pbtxt from dataset and move into training data folder
pbtxt_file = data_path / 'label_map.pbtxt'
shutil.copy(pbtxt_file, base_dir / 'data')
# calculate number of classes from pbtxt file
with open(pbtxt_file, "r") as f:
ids = [line for line in f if "id:" in line]
num_classes = len(ids)
# get num eval examples from file
num_eval_file = data_path / 'splits/complete/train/test.record.numexamples'
try:
with open(num_eval_file, "r") as f:
lines = f.readlines()
num_eval_examples = int(lines[0])
except:
num_eval_examples = 1
# create models, model, eval, and train folders
model_folder = base_dir / 'models' / 'model'
# model_folder = models_folder / 'model'
# os.makedirs(models_folder)
eval_folder = model_folder / 'eval'
train_folder = model_folder / 'train'
os.makedirs(model_folder)
os.makedirs(eval_folder)
os.makedirs(train_folder)
# load optimizer choices and prompt for selection
defaults = {}
defaults_path = Path(os.path.dirname(__file__)) / 'model_defaults' / f'{model_type}_defaults.yml'
with open(defaults_path, 'r') as stream:
try:
defaults = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
optimizer_name = config['optimizer'] if config.get('optimizer') else user_selects('Choose optimizer', defaults.keys())
hp_metadata['optimizer'] = optimizer_name
### PIPELINE CONFIG CREATION ###
# grab default config for the chosen optimizer
default_config = {}
try:
default_config = defaults[optimizer_name]
except KeyError as e:
hint = 'optimizer name, optimizer not supported for this model architecture.'
raise_parameter_error(optimizer_name, hint)
# create custom configuration if necessary
user_config = default_config
if not config.get('use_default_config'):
if config.get('hyperparameters'):
user_config = _process_user_hyperparameters(user_config, config['hyperparameters'])
else:
user_config = _configuration_prompt(user_config)
_print_config('Using configuration:', user_config)
# add to hyperparameter metadata dict
for field, value in user_config.items():
hp_metadata[field] = value
# load template pipeline file
cur_dir = Path(os.path.dirname(os.path.abspath(__file__)))
pipeline_file_name = f'{model_type}_{optimizer_name.lower()}.config'
pipeline_path = cur_dir / 'pipeline_templates' / pipeline_file_name
with open(pipeline_path) as template:
pipeline_contents = template.read()
# insert training directory path into config file
# TODO: figure out what the hell is going on here
if base_dir.name.endswith('/') or base_dir.name.endswith(r"\\"):
pipeline_contents = pipeline_contents.replace('<replace_path>', str(base_dir))
else:
if os.name == 'nt':
pipeline_contents = pipeline_contents.replace('<replace_path>', str(base_dir) + r"\\")
else:
pipeline_contents = pipeline_contents.replace('<replace_path>', str(base_dir) + '/')
# place TF record files into training directory
num_train_records = 0
num_test_records = 0
records_path = data_path / 'splits/complete/train'
for record_file in os.listdir(records_path):
if record_file.startswith('train.record-'):
num_train_records += 1
file_path = records_path / record_file
shutil.copy(file_path, base_dir / 'data')
if record_file.startswith('test.record-'):
num_test_records += 1
file_path = records_path / record_file
shutil.copy(file_path, base_dir / 'data')
# convert int to left zero padded string of length 5
user_config['num_train_records'] = str(num_train_records).zfill(5)
user_config['num_test_records'] = str(num_test_records).zfill(5)
# insert rest of config into config file
for key, value in user_config.items():
formatted = '<replace_' + key + '>'
pipeline_contents = pipeline_contents.replace(formatted, str(value))
# insert num clases into config file
pipeline_contents = pipeline_contents.replace('<replace_num_classes>', str(num_classes))
# insert num eval examples into config file
pipeline_contents = pipeline_contents.replace('<replace_num_eval_examples>', str(num_eval_examples))
# output final configuation file for training
with open(model_folder / 'pipeline.config', 'w') as file:
file.write(pipeline_contents)
# copy model checkpoints to our train folder
checkpoint_folder = arch_path
checkpoint0_folder = cur_dir / 'checkpoint_0'
file1 = checkpoint_folder / 'model.ckpt.data-00000-of-00001'
file2 = checkpoint_folder / 'model.ckpt.index'
file3 = checkpoint_folder / 'model.ckpt.meta'
file4 = checkpoint0_folder / 'model.ckpt-0.data-00000-of-00001'
file5 = checkpoint0_folder / 'model.ckpt-0.index'
file6 = checkpoint0_folder / 'model.ckpt-0.meta'
shutil.copy2(file1, train_folder)
shutil.copy2(file2, train_folder)
shutil.copy2(file3, train_folder)
shutil.copy2(file4, train_folder)
shutil.copy2(file5, train_folder)
shutil.copy2(file6, train_folder)
# load starting checkpoint template and insert training directory path
checkpoint_file = checkpoint0_folder / 'checkpoint'
with open(checkpoint_file) as cf:
checkpoint_contents = cf.read()
checkpoint_contents = checkpoint_contents.replace('<replace>', str(train_folder))
with open(train_folder / 'checkpoint', 'w') as new_cf:
new_cf.write(checkpoint_contents)
# update metadata and return success
metadata['hyperparameters'] = hp_metadata
return True
def download_model_arch(model_name: str, bbox_cache: RMLCache):
"""Downloads the model architecture with the given name.
Args:
model_name (str): model type
bbox_cache (RMLCache): cache object for the bbox plugin
Returns:
Path: path to model architecture
"""
url = 'http://download.tensorflow.org/models/object_detection/%s.tar.gz' %(model_name)
# make paths within bbox cache
bbox_cache.ensure_subpath_exists('bbox_model_archs')
archs_path = bbox_cache.path / 'bbox_model_archs'
untarred_path = archs_path / model_name
# check if download is required
if not bbox_cache.subpath_exists(untarred_path):
click.echo("Model checkpoint not found in cache. Downloading...")
# download tar file
tar_name = url.split('/')[-1]
tar_path = archs_path / tar_name
urllib.request.urlretrieve(url, tar_path)
click.echo("Untarring model checkpoint...")
if (tar_name.endswith("tar.gz")):
tar = tarfile.open(tar_path, "r:gz")
tar.extractall(path=archs_path)
tar.close()
# get rid of tar file
os.remove(tar_path)
else:
click.echo('Model checkpoint found in cache.')
return untarred_path
def _configuration_prompt(current_config: dict):
"""Prompts user to allow editing of current training configuration.
Args:
current_config (dict): current training configuration
Returns:
dict: updated training configuration
"""
_print_config('Current training configuration:', current_config)
if user_confirms('Edit default configuration?'):
for field in current_config:
if user_confirms(f'Edit {field}? (default: {current_config[field]})'):
current_config[field] = user_input(f'{field}:', default=str(current_config[field]))
return current_config
def _print_config(msg: str, config: dict):
"""Prints the given training configuration with colorization.
Args:
msg (str): message to print prior to printing config
config (dict): training configuration to print
"""
click.echo(msg)
for field, value in config.items():
click.echo(Fore.GREEN + f'{field}: ' + Fore.WHITE + f'{value}')
def _process_user_hyperparameters(current_config: dict, hyperparameters: dict):
"""Edits current training configuration based off parameters specified.
Args:
current_config (dict): current training configuration
hyperparameters (dict): training configuration specified by user
Returns:
dict: updated training configuration
"""
for parameter in hyperparameters.keys():
if(parameter not in current_config):
hint = f'hyperparameters, {parameter} is not supported for this model architecture.'
raise_parameter_error(parameter, hint)
current_config[parameter] = hyperparameters[parameter]
return current_config
|
py | b40544e5c5793830c212c7cb2b9c2ff902095f51 | """
:Authors: - Wilker Aziz
"""
from dgm4nlp.recipes import smart_ropen
from dgm4nlp.nlputils import Tokenizer
from dgm4nlp.nlputils import Multitext
from dgm4nlp.nlputils import read_naacl_alignments
from dgm4nlp.charutils import Multitext3D
import logging
def prepare_training(x_path, y_path,
# data pre-processing
nb_words=[None, None],
shortest_sequence=[None, None],
longest_sequence=[None, None],
# padding
bos_str=[None, None],
eos_str=[None, None],
# normalisation
lowercase=False,
batch_dtype='int64',
mask_dtype='float32',
name='training') -> [list, Multitext]:
"""
Construct vocabularies/tokenizers and memory-map the training data.
:param x_path:
:param y_path:
:param nb_words:
:param shortest_sequence:
:param longest_sequence:
:param bos_str:
:param eos_str:
:param name:
:return:
"""
training_paths = [x_path, y_path]
# Prepare vocabularies
logging.info('Fitting vocabularies')
tks = []
for i, (path, vs, bos, eos) in enumerate(zip(training_paths, nb_words, bos_str, eos_str)):
logging.info(' stream=%d', i)
# tokenizer with a bounded vocabulary
tks.append(Tokenizer(nb_words=vs, bos_str=bos, eos_str=eos, lowercase=lowercase))
tks[-1].fit_one(smart_ropen(path))
logging.info(' vocab-size=%d', tks[-1].vocab_size())
# Prepare training corpus
logging.info('Memory mapping training data')
training = Multitext(training_paths,
tokenizers=tks,
shortest=shortest_sequence,
longest=longest_sequence,
trim=[True, True],
batch_dtype=batch_dtype,
mask_dtype=mask_dtype,
name=name)
# in case the longest sequence was shorter than we thought
longest_sequence = [training.longest_sequence(0), training.longest_sequence(1)]
logging.info(' training-samples=%d longest=%s tokens=%s', training.nb_samples(),
longest_sequence, [training.nb_tokens(0), training.nb_tokens(1)])
return tks, training
def prepare_training3d(
x_path, y_path,
# data pre-processing
nb_chars=[None, None],
longest_word=[None, None],
shortest_sequence=[None, None],
longest_sequence=[None, None],
# padding
bos_str=[None, None],
eos_str=[None, None],
# normalisation
lowercase=False,
batch_dtype='int32',
mask_dtype='bool',
name='training') -> [list, Multitext3D]:
"""
Construct vocabularies/tokenizers and memory-map the training data.
:param x_path:
:param y_path:
:param nb_words:
:param shortest_sequence:
:param longest_sequence:
:param bos_str:
:param eos_str:
:param name:
:return:
"""
training_paths = [x_path, y_path]
# Prepare vocabularies
logging.info('Fitting (char) vocabularies')
tks = []
for i, (path, vs, bos, eos, longword) in enumerate(zip(training_paths, nb_chars, bos_str, eos_str, longest_word)):
logging.info(' stream=%d', i)
# tokenizer with a bounded vocabulary
tks.append(Tokenizer(nb_words=vs, bos_str=bos, eos_str=eos, lowercase=lowercase, mode='chars', longest_token=longword))
tks[-1].fit_one(smart_ropen(path))
logging.info(' (char) vocab-size=%d', tks[-1].vocab_size())
# Prepare training corpus
logging.info('Memory mapping (char) training data')
training = Multitext3D(
training_paths,
tokenizers=tks,
shortest=shortest_sequence,
longest=longest_sequence,
trim=[True, True],
batch_dtype=batch_dtype,
mask_dtype=mask_dtype,
name=name)
# in case the longest sequence was shorter than we thought
longest_sequence = [training.longest_sequence(0), training.longest_sequence(1)]
deepest_sequence = [training.deepest_sequence(0), training.deepest_sequence(1)]
logging.info(' training-samples=%d longest=%s deepest=%s tokens=%s', training.nb_samples(),
longest_sequence, deepest_sequence, [training.nb_tokens(0), training.nb_tokens(1)])
return tks, training
def prepare_validation(tks, x_path, y_path,
wa_path=None,
shortest_sequence=[None, None],
longest_sequence=[None, None],
reverse_alignments=False,
batch_dtype='int64',
mask_dtype='float32',
name='validation') -> [Multitext, tuple]:
"""
Memory-map validation data.
:param tks:
:param x_path:
:param y_path:
:param wa_path:
:param shortest_sequence:
:param longest_sequence:
:param name:
:return:
"""
# Prepare validation corpus
logging.info('Memory mapping validation data')
validation = Multitext(
[x_path, y_path],
tokenizers=tks,
shortest=shortest_sequence,
longest=longest_sequence,
trim=[True, True],
batch_dtype=batch_dtype,
mask_dtype=mask_dtype,
name=name)
logging.info(' dev-samples=%d', validation.nb_samples())
if wa_path: # we have a NAACL file for alignments
logging.info("Working with gold labels for validation: '%s'", wa_path)
# reads in sets of gold alignments
val_wa = read_naacl_alignments(wa_path, reverse=reverse_alignments)
# discard those associated with sentences that are no longer part of the validation set
# (for example due to length constraints)
val_wa = [a_sets for keep, a_sets in zip(validation.iter_selection_flags(),
val_wa) if keep]
logging.info(' gold-samples=%d', len(val_wa))
else:
val_wa = None
return validation, val_wa
def prepare_validation3d(
tks, x_path, y_path,
wa_path=None,
shortest_sequence=[None, None],
longest_sequence=[None, None],
reverse_alignments=False,
batch_dtype='int32',
mask_dtype='bool',
name='validation') -> [Multitext3D, tuple]:
"""
Memory-map validation data.
:param tks:
:param x_path:
:param y_path:
:param wa_path:
:param shortest_sequence:
:param longest_sequence:
:param name:
:return:
"""
# Prepare validation corpus
logging.info('Memory mapping (char) validation data')
validation = Multitext3D(
[x_path, y_path],
tokenizers=tks,
shortest=shortest_sequence,
longest=longest_sequence,
trim=[True, True],
batch_dtype=batch_dtype,
mask_dtype=mask_dtype,
name=name)
logging.info(' dev-samples=%d', validation.nb_samples())
if wa_path: # we have a NAACL file for alignments
logging.info("Working with gold labels for validation: '%s'", wa_path)
# reads in sets of gold alignments
val_wa = read_naacl_alignments(wa_path, reverse=reverse_alignments)
# discard those associated with sentences that are no longer part of the validation set
# (for example due to length constraints)
val_wa = [a_sets for keep, a_sets in zip(validation.iter_selection_flags(),
val_wa) if keep]
logging.info(' gold-samples=%d', len(val_wa))
else:
val_wa = None
return validation, val_wa
def prepare_test(tks, x_path, y_path, wa_path=None, reverse_alignments=False, name='test') -> [Multitext, tuple]:
"""
Memory-map test data.
:param tks:
:param x_path:
:param y_path:
:param wa_path:
:param name:
:return:
"""
logging.info('Memory mapping test data')
test = Multitext([x_path, y_path],
tokenizers=tks,
shortest=None,
longest=None,
trim=[True, True],
mask_dtype='float32',
name=name)
logging.info(' test-samples=%d', test.nb_samples())
if wa_path: # we have a NAACL file for alignments
logging.info("Working with gold labels for test: '%s'", wa_path)
# reads in sets of gold alignments
test_wa = read_naacl_alignments(wa_path, reverse=reverse_alignments)
logging.info(' test-gold-samples=%d', len(test_wa))
else:
test_wa = None
return test, test_wa
def prepare_test3d(tks, x_path, y_path,
wa_path=None, reverse_alignments=False,
batch_dtype='int32', mask_dtype='bool', name='test') -> [Multitext3D, tuple]:
"""
Memory-map test data.
:param tks:
:param x_path:
:param y_path:
:param wa_path:
:param name:
:return:
"""
logging.info('Memory mapping (char) test data')
test = Multitext3D(
[x_path, y_path],
tokenizers=tks,
shortest=None,
longest=None,
trim=[True, True],
batch_dtype=batch_dtype,
mask_dtype=mask_dtype,
name=name)
logging.info(' test-samples=%d', test.nb_samples())
if wa_path: # we have a NAACL file for alignments
logging.info("Working with gold labels for test: '%s'", wa_path)
# reads in sets of gold alignments
test_wa = read_naacl_alignments(wa_path, reverse=reverse_alignments)
logging.info(' test-gold-samples=%d', len(test_wa))
else:
test_wa = None
return test, test_wa
|
py | b405491df0b1531a93c21f6935f974cb0105adf5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 12:00:28 2018
@author: Khaled Nakhleh
"""
|
py | b4054954cf39d84f29d0c77d9a61692a6b196285 | import os
start_dir = os.getcwd()
# mahimahi
os.system("sudo sysctl -w net.ipv4.ip_forward=1")
os.system("sudo add-apt-repository -y ppa:keithw/mahimahi")
os.system("sudo apt-get -y update")
os.system("sudo apt-get -y install mahimahi")
# apache server
os.system("sudo apt-get -y install apache2")
# selenium
os.system("wget 'https://pypi.python.org/packages/source/s/selenium/selenium-2.39.0.tar.gz'")
os.system("sudo apt-get -y install python-setuptools python-pip xvfb xserver-xephyr tightvncserver unzip")
os.system("tar xvzf selenium-2.39.0.tar.gz")
selenium_dir = start_dir + "/selenium-2.39.0"
os.chdir( selenium_dir )
os.system("sudo python setup.py install" )
os.system("sudo sh -c \"echo 'DBUS_SESSION_BUS_ADDRESS=/dev/null' > /etc/init.d/selenium\"")
# py virtual display
os.chdir( start_dir )
os.system("sudo pip install pyvirtualdisplay")
os.system("wget 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb' ")
os.system("sudo dpkg -i google-chrome-stable_current_amd64.deb")
os.system("sudo apt-get -f -y install")
# matplotlib
os.system("sudo apt-get -y install python-matplotlib")
# copy the webpage files to /var/www/html
os.chdir( start_dir )
os.system("sudo cp video_server/myindex_*.html /var/www/html")
os.system("sudo cp video_server/dash.all.min.js /var/www/html")
os.system("sudo cp -r video_server/video* /var/www/html")
os.system("sudo cp video_server/Manifest.mpd /var/www/html")
# make results directory
os.system("mkdir cooked_traces")
os.system("mkdir rl_server/results")
os.system("mkdir exp/results")
|
py | b40549877f51539fa7ba04ce6aef52b7e006ea98 | # This script contains the required client information,
# in order to connect to the game
HOST = "www.andromeda-serv.com"
# Right now you've to provide these information below,
# in order to connect to the game
# TODO: automate this part!
UID = "41299" #UserID
SID = raw_input("Enter SID\n")#"de3cc29efd9b37138205dd077bad36cd49afc2a5" # SessionID
CV = "4.1" #ClientVersion
# This is the map address, some servers might have different
# IPs for different maps
# This bot is specially made for andromeda
# andromeda has just one IP for all of their maps
# TODO: automate this part too!
ADDR = "54.37.129.54", 8080
|
py | b40549a3d5aee93dd9a4d32d4d7fb8464b4617d3 | # Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1212
#!/usr/bin/env python2.7
# encoding : utf-8
while True:
a,b = sorted(raw_input().split(" "),key=lambda x: -len(x))
multiply = len(a)-len(b)
b = ("0"*multiply) +b
if a == "0" and b=="0":
break
else :
carry = 0
arraya = [int(i) for i in a[::-1]] + [0]
arrayb = [int(i) for i in b[::-1]] + [0]
for i in xrange(len(a)):
if arraya[i] + arrayb[i] >= 10:
carry += 1
arraya[i+1] += 1
if carry== 0:
print "No carry operation."
elif carry == 1:
print "1 carry operation."
else:
print "%d carry operations." % carry
|
py | b4054becf091e10928f232caed314602cb2ef535 | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import unittest
import h1
from h1.api.iam_user_api import IamUserApi # noqa: E501
class TestIamUserApi(unittest.TestCase):
"""IamUserApi unit test stubs"""
def setUp(self):
self.api = IamUserApi() # noqa: E501
def tearDown(self):
pass
def test_iam_user_credential_authtoken_delete(self):
"""Test case for iam_user_credential_authtoken_delete
Delete iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_authtoken_get(self):
"""Test case for iam_user_credential_authtoken_get
Get iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_authtoken_list(self):
"""Test case for iam_user_credential_authtoken_list
List iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_create(self):
"""Test case for iam_user_credential_create
Create iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_delete(self):
"""Test case for iam_user_credential_delete
Delete iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_get(self):
"""Test case for iam_user_credential_get
Get iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_list(self):
"""Test case for iam_user_credential_list
List iam/user.credential # noqa: E501
"""
pass
def test_iam_user_credential_patch(self):
"""Test case for iam_user_credential_patch
Update iam/user.credential # noqa: E501
"""
pass
def test_iam_user_get(self):
"""Test case for iam_user_get
Get iam/user # noqa: E501
"""
pass
def test_iam_user_service_get(self):
"""Test case for iam_user_service_get
Get iam/user.service # noqa: E501
"""
pass
def test_iam_user_service_list(self):
"""Test case for iam_user_service_list
List iam/user.service # noqa: E501
"""
pass
def test_iam_user_update(self):
"""Test case for iam_user_update
Update iam/user # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | b4054c59bfd18a349812965696d378011f7c78a5 | import pandas as pd
import yaml
import os
import dill
import cv2
import numpy as np
from tqdm import tqdm
from datetime import datetime
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from lime.wrappers.scikit_image import SegmentationAlgorithm
from src.data.preprocess import remove_text
from src.visualization.visualize import visualize_explanation
def predict_instance(x, model):
'''
Runs model prediction on 1 or more input images.
:param x: Image(s) to predict
:param model: A Keras model
:return: A numpy array comprising a list of class probabilities for each prediction
'''
y = model.predict(x) # Run prediction on the perturbations
if y.shape[1] == 1:
probs = np.concatenate([1.0 - y, y], axis=1) # Compute class probabilities from the output of the model
else:
probs = y
return probs
def predict_and_explain(x, model, exp, num_features, num_samples):
'''
Use the model to predict a single example and apply LIME to generate an explanation.
:param x: Preprocessed image to predict
:param model: The trained neural network model
:param exp: A LimeImageExplainer object
:param num_features: # of features to use in explanation
:param num_samples: # of times to perturb the example to be explained
:return: The LIME explainer for the instance
'''
def predict(x):
'''
Helper function for LIME explainer. Runs model prediction on perturbations of the example.
:param x: List of perturbed examples from an example
:return: A numpy array constituting a list of class probabilities for each predicted perturbation
'''
probs = predict_instance(x, model)
return probs
# Algorithm for superpixel segmentation. Parameters set to limit size of superpixels and promote border smoothness
segmentation_fn = SegmentationAlgorithm('quickshift', kernel_size=2.25, max_dist=50, ratio=0.1, sigma=0.15)
# Generate explanation for the example
explanation = exp.explain_instance(x, predict, num_features=num_features, num_samples=num_samples, segmentation_fn=segmentation_fn)
probs = predict_instance(np.expand_dims(x, axis=0), model)
return explanation, probs
def predict_and_explain_set(raw_img_dir=None, preds_dir=None, save_results=True, give_explanations=True):
'''
Preprocess a raw dataset. Then get model predictions and corresponding explanations.
:param raw_img_dir: Directory in which to look for raw images
:param preds_dir: Path at which to save results of this prediction
:param save_results: Flag specifying whether to save the prediction results to disk
:param give_explanations: Flag specifying whether to provide LIME explanations with predictions spreadsheet
:return: Dataframe of prediction results, optionally including explanations.
'''
# Load project config data
cfg = yaml.full_load(open(os.getcwd() + "/config.yml", 'r'))
cur_date = datetime.now().strftime('%Y%m%d-%H%M%S')
# Restore the model, LIME explainer, and model class indices from their respective serializations
model = load_model(cfg['PATHS']['MODEL_TO_LOAD'], compile=False)
explainer = dill.load(open(cfg['PATHS']['LIME_EXPLAINER'], 'rb'))
class_indices = dill.load(open(cfg['PATHS']['OUTPUT_CLASS_INDICES'], 'rb'))
# Load LIME and prediction constants from config
NUM_SAMPLES = cfg['LIME']['NUM_SAMPLES']
NUM_FEATURES = cfg['LIME']['NUM_FEATURES']
CLASS_NAMES = cfg['DATA']['CLASSES']
# Define column names of the DataFrame representing the prediction results
col_names = ['Image Filename', 'Predicted Class']
for c in cfg['DATA']['CLASSES']:
col_names.append('p(' + c + ')')
# Add columns for client explanation
if give_explanations:
col_names.append('Explanation Filename')
# Set raw image directory based on project config, if not specified
if raw_img_dir is None:
raw_img_dir = cfg['PATHS']['BATCH_PRED_IMGS']
# If no path is specified, create new directory for predictions
if preds_dir is None:
preds_dir = cfg['PATHS']['BATCH_PREDS'] + '\\' + cur_date + '\\'
if save_results and not os.path.exists(cfg['PATHS']['BATCH_PREDS'] + '\\' + cur_date):
os.mkdir(preds_dir)
# Create DataFrame for raw image file names
raw_img_df = pd.DataFrame({'filename': os.listdir(raw_img_dir)})
raw_img_df = raw_img_df[raw_img_df['filename'].str.contains('jpg|png|jpeg', na=False)] # Enforce image files
# Create generator for the image files
img_gen = ImageDataGenerator(preprocessing_function=remove_text, samplewise_std_normalization=True,
samplewise_center=True)
img_iter = img_gen.flow_from_dataframe(dataframe=raw_img_df, directory=raw_img_dir, x_col="filename",
target_size=cfg['DATA']['IMG_DIM'], batch_size=1, class_mode=None,
shuffle=False)
# Predict (and optionally explain) all images in the specified directory
rows = []
print('Predicting and explaining examples.')
for filename in raw_img_df['filename'].tolist():
# Get preprocessed image and make a prediction.
try:
x = img_iter.next()
except StopIteration:
break
y = np.squeeze(predict_instance(x, model))
# Rearrange prediction probability vector to reflect original ordering of classes in project config
p = [y[CLASS_NAMES.index(c)] for c in class_indices]
predicted_class = CLASS_NAMES[np.argmax(p)]
row = [filename, predicted_class]
row.extend(list(p))
# Explain this prediction
if give_explanations:
explanation, _ = predict_and_explain(np.squeeze(x, axis=0), model, explainer, NUM_FEATURES, NUM_SAMPLES)
if cfg['LIME']['COVID_ONLY'] == True:
label_to_see = class_indices['COVID-19']
else:
label_to_see = 'top'
# Load and resize the corresponding original image (no preprocessing)
orig_img = cv2.imread(raw_img_dir + filename)
orig_img = cv2.resize(orig_img, tuple(cfg['DATA']['IMG_DIM']), interpolation=cv2.INTER_NEAREST)
# Generate visual for explanation
exp_filename = visualize_explanation(orig_img, explanation, filename, None, p, CLASS_NAMES,
label_to_see=label_to_see, file_path=preds_dir)
row.append(exp_filename.split('\\')[-1])
rows.append(row)
# Convert results to a Pandas DataFrame and save
results_df = pd.DataFrame(rows, columns=col_names)
if save_results:
results_path = preds_dir + 'predictions.csv'
results_df.to_csv(results_path, columns=col_names, index_label=False, index=False)
return results_df
if __name__ == '__main__':
results = predict_and_explain_set(preds_dir=None, save_results=True, give_explanations=True) |
py | b4054cccf88d2c503a4d42eb6918bcdf9d94914f | from postman.models import Message
def inbox(request):
"""Provide the count of unread messages for an authenticated user."""
if request.user.is_authenticated():
return {'postman_unread_count': Message.objects.inbox_unread_count(request.user)}
else:
return {}
|
py | b4054ee6b41f62fa4aa1edcdc2c84bd2c742ec9b | from docusign_esign import EnvelopesApi, EnvelopeDefinition, TemplateRole, AccountsApi, TemplatesApi
from docusign_esign.client.api_exception import ApiException
from flask import session, request
from ....consts import pattern
from ....docusign import create_api_client
from ....error_handlers import process_error
class Eg030Controller:
@staticmethod
def get_args():
"""Get required session and request arguments"""
# More data validation would be a good idea here
# Strip anything other than the characters listed
signer_email = pattern.sub("", request.form.get("signer_email"))
signer_name = pattern.sub("", request.form.get("signer_name"))
cc_email = request.form.get("cc_email")
cc_name = request.form.get("cc_name")
brand_id = request.form.get("brand")
template_id = request.form.get("envelope_template")
if cc_email and cc_name:
cc_email = pattern.sub("", cc_email)
cc_name = pattern.sub("", cc_name)
args = {
"account_id": session["ds_account_id"], # represent your {ACCOUNT_ID}
"base_path": session["ds_base_path"],
"access_token": session["ds_access_token"], # represent your {ACCESS_TOKEN}
"envelope_args": {
"signer_name": signer_name,
"signer_email": signer_email,
"cc_name": cc_name,
"cc_email": cc_email,
"brand_id": brand_id,
"template_id": template_id
}
}
return args
@classmethod
def worker(cls, args):
"""
1. Create an api client
2. Create an envelope definition object
3. Apply the brand to the envelope using the SDK
"""
# Step 2. Construct your API headers
api_client = create_api_client(base_path=args["base_path"], access_token=args["access_token"])
# Step 3. Construct your request body
envelope_api = EnvelopesApi(api_client)
envelope_definition = cls.make_envelope(args["envelope_args"])
# Step 4. Call the eSignature REST API
response = envelope_api.create_envelope(account_id=args["account_id"], envelope_definition=envelope_definition)
return response
@classmethod
def make_envelope(cls, args):
"""
Creates the envelope definition object
"""
# Create the envelope definition
envelope_definition = EnvelopeDefinition(
status="sent",
template_id=args["template_id"],
brand_id=args["brand_id"]
)
signer = TemplateRole(
email=args["signer_email"],
name=args["signer_name"],
role_name="signer"
)
# In case, we have cc we add him to envelope definition
if args["cc_email"] and args["cc_name"]:
cc = TemplateRole(
email=args["cc_email"],
name=args["cc_name"],
role_name="cc"
)
envelope_definition.template_roles = [signer, cc]
else:
envelope_definition.template_roles = [signer]
return envelope_definition
@staticmethod
def get_data(args):
"""Retrieve brands and envelope templates"""
api_client = create_api_client(base_path=args["base_path"], access_token=args["access_token"])
try:
"""Retrieve all brands using the AccountBrands::List"""
account_api = AccountsApi(api_client)
brands = account_api.list_brands(account_id=args["account_id"]).brands
"""Retrieve all templates using the Templates::List"""
template_api = TemplatesApi(api_client)
envelope_templates = template_api.list_templates(account_id=args["account_id"]).envelope_templates
return brands, envelope_templates
except ApiException as err:
return process_error(err)
|
py | b4055001ffbce84bdbc7b636993dec381ed1ea66 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Compares pandas dataframes by columns.
"""
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
import numpy as np
import pandas as pd
from mriqc.bin import messages
from mriqc.utils.misc import BIDS_COMP
def read_iqms(feat_file):
"""Read in a features table."""
feat_file = Path(feat_file)
if feat_file.suffix == ".csv":
bids_comps = list(BIDS_COMP.keys())
x_df = pd.read_csv(
feat_file, index_col=False, dtype={col: str for col in bids_comps}
)
# Find present bids bits and sort by them
bids_comps_present = list(set(x_df.columns.ravel().tolist()) & set(bids_comps))
bids_comps_present = [bit for bit in bids_comps if bit in bids_comps_present]
x_df = x_df.sort_values(by=bids_comps_present)
# Remove sub- prefix in subject_id
x_df.subject_id = x_df.subject_id.str.lstrip("sub-")
# Remove columns that are not IQMs
feat_names = list(x_df._get_numeric_data().columns.ravel())
for col in bids_comps:
try:
feat_names.remove(col)
except ValueError:
pass
else:
bids_comps_present = ["subject_id"]
x_df = pd.read_csv(
feat_file, index_col=False, sep="\t", dtype={"bids_name": str}
)
x_df = x_df.sort_values(by=["bids_name"])
x_df["subject_id"] = x_df.bids_name.str.lstrip("sub-")
x_df = x_df.drop(columns=["bids_name"])
x_df.subject_id = ["_".join(v.split("_")[:-1]) for v in x_df.subject_id.ravel()]
feat_names = list(x_df._get_numeric_data().columns.ravel())
for col in feat_names:
if col.startswith(("size_", "spacing_", "Unnamed")):
feat_names.remove(col)
return x_df, feat_names, bids_comps_present
def main():
"""Entry point."""
parser = ArgumentParser(
description="Compare two pandas dataframes.",
formatter_class=RawTextHelpFormatter,
)
g_input = parser.add_argument_group("Inputs")
g_input.add_argument(
"-i",
"--input-csv",
action="store",
type=Path,
required=True,
help="input data frame",
)
g_input.add_argument(
"-r",
"--reference-csv",
action="store",
type=Path,
required=True,
help="reference dataframe",
)
g_input.add_argument(
"--tolerance",
type=float,
default=1.0e-5,
help="relative tolerance for comparison",
)
opts = parser.parse_args()
ref_df, ref_names, ref_bids = read_iqms(opts.reference_csv)
tst_df, tst_names, tst_bids = read_iqms(opts.input_csv)
ref_df.set_index(ref_bids)
tst_df.set_index(tst_bids)
if sorted(ref_bids) != sorted(tst_bids):
sys.exit(messages.DFCHECK_DIFFERENT_BITS)
if sorted(ref_names) != sorted(tst_names):
sys.exit(messages.DFCHECK_CSV_COLUMNS)
ref_df = ref_df.sort_values(by=ref_bids)
tst_df = tst_df.sort_values(by=tst_bids)
if len(ref_df) != len(tst_df):
different_length_message = messages.DFCHECK_DIFFERENT_LENGTH.format(
len_input=len(ref_df), len_reference=len(tst_df)
)
print(different_length_message)
tst_rows = tst_df[tst_bids]
ref_rows = ref_df[ref_bids]
print(tst_rows.shape, ref_rows.shape)
tst_keep = np.sum(tst_rows.isin(ref_rows).values.ravel().tolist())
print(tst_keep)
diff = ~np.isclose(
ref_df[ref_names].values, tst_df[tst_names].values, rtol=opts.tolerance
)
if np.any(diff):
# ne_stacked = pd.DataFrame(data=diff, columns=ref_names).stack()
# ne_stacked = np.isclose(ref_df[ref_names], tst_df[ref_names]).stack()
# changed = ne_stacked[ne_stacked]
# changed.set_index(ref_bids)
difference_locations = np.where(diff)
changed_from = ref_df[ref_names].values[difference_locations]
changed_to = tst_df[ref_names].values[difference_locations]
cols = [ref_names[v] for v in difference_locations[1]]
bids_df = ref_df.loc[difference_locations[0], ref_bids].reset_index()
chng_df = pd.DataFrame({"iqm": cols, "from": changed_from, "to": changed_to})
table = pd.concat([bids_df, chng_df], axis=1)
print(table[ref_bids + ["iqm", "from", "to"]].to_string(index=False))
corr = pd.DataFrame()
corr["iqms"] = ref_names
corr["cc"] = [
float(
np.corrcoef(
ref_df[[var]].values.ravel(),
tst_df[[var]].values.ravel(),
rowvar=False,
)[0, 1]
)
for var in ref_names
]
if np.any(corr.cc < 0.95):
iqms = corr[corr.cc < 0.95]
iqms_message = messages.DFCHECK_IQMS_UNDER_095.format(iqms=iqms)
print(iqms_message)
sys.exit(messages.DFCHECK_CSV_CHANGED)
else:
print(messages.DFCHECK_IQMS_CORRELATED)
sys.exit(0)
if __name__ == "__main__":
main()
|
py | b4055061be9d2e12d19a1e7831f90af598e0531d | from abc import abstractmethod
from aioworkers.core.base import AbstractEntity
class AbstractSender(AbstractEntity):
@abstractmethod # pragma: no cover
async def send_message(self, msg):
raise NotImplementedError()
async def send(self, *args, **kwargs):
if args:
for msg in args:
if kwargs:
msg = msg.copy()
msg.update(kwargs)
await self.send_message(msg)
elif kwargs:
await self.send_message(kwargs)
else:
raise ValueError('Empty args')
|
py | b405525be192a91ba918eab527473c80ab5f21f2 | from typing import Optional
from homeassistant.core import State
from .common import SourceEntity
class PowerCalculationStrategyInterface:
async def calculate(self, entity_state: State) -> Optional[float]:
"""Calculate power consumption based on entity state"""
pass
async def validate_config(self, source_entity: SourceEntity):
"""Validate correct setup of the strategy"""
pass
|
py | b4055287de9a1df3b677c232c48c3cce640165f7 | fname = input('Enter the file name')
try:
f = open(fname)
except:
print('The file could not be opened')
quit()
total = 0
count = 0
for line in f:
if 'X-DSPAM-Confidence' in line:
print(line)
i = line.find(': ') + 1
total += float(line[i:len(line)])
count += 1
print('Average spam confidence:', total / count)
|
py | b40552c000e4c9bbcef66cba1f32f89b64037456 | #!/usr/bin/python
import logging
import subprocess
# --------------------------------------------------------------------------------------
# Save this code in file "process_wrapper.py" and adapt as indicated in inline comments.
#
# Notes:
# - This is a Python 3 script.
# - The inputs will be given values by name, thus their order has no importance ...
# - ... except that the inputs with a default value must be listed last.
# - Parameter names are automatically converted into valid Python variable names.
# - Any empty line or line starting with a '#' character will be ignored.
# --------------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def execute(out_dir, command, connector_in):
"""
Inputs:
command -- command -- 45/User String
connector_in -- connector_in -- 45/User String
Outputs:
cmdoutput -- cmdoutput -- 45/User String
connector_out -- connector_out -- 45/User String
Main Dependency:
mep-wps/uc-bundle-1
Software Dependencies:
pywps-4
Processing Resources:
ram -- 4
disk -- 10
cpu -- 1
gpu -- 0
"""
cmdoutput = ""
# ----------------------------------------------------------------------------------
# Insert your own code below.
# The files generated by your code must be stored in the "out_dir" folder.
# Only the content of that folder is persisted in the datastore.
# Give appropriate values to the output parameters. These will be passed to the next
# process(es) following the workflow connections.
# ----------------------------------------------------------------------------------
logger.info("Starting...")
try:
lcommand=command.replace("@OUT_DIR@",str(out_dir))
cmdoutput+="Executing: "+lcommand+'\n'
result=subprocess.run(lcommand,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
cmdoutput+=result.stdout.decode('utf-8')
logger.info(cmdoutput)
except Exception as e:
cmdoutput+=str(e)
logger.info(cmdoutput)
# ----------------------------------------------------------------------------------
# The wrapper must return a dictionary that contains the output parameter values.
# ----------------------------------------------------------------------------------
return {
"cmdoutput": cmdoutput,
"connector_out": connector_in
} |
py | b40553cacc79e9e929dbd276d53c6b395d1d8e92 | import tensorflow as tf
from tensorpack.models import Conv2D
from region_norm_ops import get_rn
def conv_with_rn(gradient):
out = Conv2D('conv', gradient, gradient.get_shape()[3], 1, strides=1, activation=get_rn(),
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0))
gradient = gradient + out
return gradient
|
py | b40554445779a766a1e83bcfa8f6a5e1b61df556 | from datetime import date
from enum import Enum, unique
import attr
@unique
class Priority(Enum):
must = 'Must'
should = 'Should'
may = 'May'
@attr.s
class Req:
subjects = attr.ib()
priority = attr.ib()
due_date = attr.ib()
action = attr.ib()
req_id = attr.ib()
req_text = attr.ib()
citations = attr.ib(default=attr.Factory(list))
data = [
Req(['All agencies'], Priority.should, date(2014, 1, 31),
"send SAO report to [email protected]", "73.01", """
Due to the recent government shutdown, the beginning of the
reporting period was delayed. In consultation with the Office of
Management and Budget (OMB), we have extended the reporting
deadline from�December 31, 2013 to January 31, 2014 for the
following: 1. Senior Agency Official (SAO) annual report: Progress
towards specific goals or requirements in NARA/OMB M-12-18,
Managing Government Records Directive. You should have received
instructions and the template for the report on August 29, 2013.
(SEE AC 29 2013: Senior Agency Official Annual Report Template).
Reports should be sent to [email protected] by Friday, January 31,
2014.
""", ["SAO report"]),
Req(['All agencies'], Priority.must, date(2014, 1, 31),
"send RMSA spreadsheet to [email protected]", "73.02", """
Due to the recent government shutdown, the beginning of the
reporting period was delayed. In consultation with the Office of
Management and Budget (OMB), we have extended the reporting
deadline from�December 31, 2013 to January 31, 2014 for the
following: 2. Records Management Self Assessment (RMSA): Directive
Requirements - 2.2 and 2.5: As part of this year's RMSA, you must
submit your agency's responses to two requirements established in
the Managing Government Records Directive: a) Part I, Section 2,
Requirement 2.2 requires your agency to identify for transfer its
permanent records that have been in existence for more than 30
years and report this information to NARA. b) Part I, Section 2,
Requirement 2.5 requires your agency's Records Officer to identify
all unscheduled records in your agency. This requirement also
covers all records stored at NARA and in other storage facilities
that have not been properly scheduled.
Your agency's responses to these requirements must be submitted
using the spreadsheets provided by NARA earlier this year. (See
AC 23.2013: NARA/OMB M-12-18, Goals Due on or Before December 31,
2013.) All spreadsheets must be sent to [email protected]
by January 31, 2014.
""", ["RMSA spreadsheet"]),
Req(['All agencies'], Priority.should, date(2013, 11, 15),
"""reaffirm or designate a new SAO by sending the name, title, agency,
office, email, phone number, and address information of the agency SAO
to [email protected]""", "73.03", """
Due to the recent government shutdown, the beginning of the
reporting period was delayed. In consultation with the Office of
Management and Budget (OMB), we have extended the reporting
deadline from�December 31, 2013 to January 31, 2014 for the
following:
Annual SAO Designation: please reaffirm or designate a new SAO by
sending the name, title, agency, office, email, phone number, and
address information of the agency SAO to [email protected] by
November 15, 2013.
""", ["SAO"]),
Req(['CFO ACT agency heads'], Priority.must, date(2013, 1, 15),
"designate a SSAO", "74.03", """
Therefore, the head of each of the 24 Chief Financial Officer
(CFO) Act agencies shall designate a Strategic Sourcing
Accountable Official (SSAO), who will have the authority to
coordinate the agency's internal strategic sourcing activities and
its participation in government wide efforts, such as those
described below. Each agency shall send the proposed SSAO's name
and contact information to OMB by January 15, 2013.
""", ['SSAO']),
Req(['CFO ACT agency heads'], Priority.must, date(2013, 1, 15),
"send proposed SSAO's name and contact information to OMB", "74.03",
"""
Therefore, the head of each of the 24 Chief Financial Officer
(CFO) Act agencies shall designate a Strategic Sourcing
Accountable Official (SSAO), who will have the authority to
coordinate the agency's internal strategic sourcing activities and
its participation in government wide efforts, such as those
described below. Each agency shall send the proposed SSAO's name
and contact information to OMB by January 15, 2013.
""", ['SSAO']),
Req(['SSLC'], Priority.must, None, 'meet regularly', '74.05',
"""
The SSLC will meet regularly to provide long-term leadership of
the government's strategic sourcing efforts as well as to take
actions that save taxpayer dollars now.
"""),
Req(['SSLC'], Priority.must, date(2013, 3, 1),
'submit to OMB a set of recommendations', '74.06', """
To that end, by March 2013, the SSLC shall submit to OMB a set of
recommendations for management strategies for specific goods and
services � including several IT commodities identified through the
PortfolioStat process
- that would ensure that the Federal government receives the most
favorable offer possible.
""", ['recommendations']),
Req(['SSLC'], Priority.must, None,
'promote sound strategic sourcing practices within their agencies',
'74.14', """
Each of the SSLC agencies shall promote, to the maximum extent
practicable, sound strategic sourcing practices within their
agencies.
"""),
Req(['SSLC'], Priority.must, None, """
establish an internal cross-functional strategic sourcing council
to oversee the agency's related activities""", '74.15', """
Each SSLC agency shall establish an internal cross-functional
strategic sourcing council to oversee the agency's related
activities.
"""),
Req(['GSA'], Priority.must, date(2013, 10, 1), """
implement, in consultation with the SSLC, at least five new
government-wide strategic sourcing solutions""", '74.17', """
The Administrator of General Services shall: �� implement, in
consultation with the SSLC, at least five new government-wide
strategic sourcing solutions in each of FY 2013 and FY 2014;
""", ['SSLC']),
Req(['GSA'], Priority.must, date(2014, 10, 1), """
implement, in consultation with the SSLC, at least five new
government-wide strategic sourcing solutions""", '74.17', """
The Administrator of General Services shall: �� implement, in
consultation with the SSLC, at least five new government-wide
strategic sourcing solutions in each of FY 2013 and FY 2014;
""", ['SSLC']),
Req(['GSA'], Priority.must, None, """
increase the transparency of prices paid for common goods and
services for use by agency officials in market research and
negotiations""", '74.18', """
The Administrator of General Services shall: �� increase the
transparency of prices paid for common goods and services for use
by agency officials in market research and negotiations; and
"""),
Req(['GSA'], Priority.must, None, """
as needed, promulgate requirements, regulations, and best
practices for acquiring, using, and, where applicable, disposing
of the commodities managed through strategic sourcing
initiatives.""", '74.19', """
The Administrator of General Services shall: �� as needed,
promulgate requirements, regulations, and best practices for
acquiring, using, and, where applicable, disposing of the
commodities managed through strategic sourcing initiatives.
"""),
Req(["CFO Act agencies' government-wide vehicles"], Priority.must, None,
"""
reflect input from a large number of potential agency users �
especially the largest likely users � regarding customer demand
for the goods and services being considered, the acquisition
strategy (including contract pricing, delivery and other terms and
conditions, and performance requirements), and the commodity
management approach""", '74.20', """
However, at a minimum, government-wide vehicles shall: � reflect
input from a large number of potential agency users � especially
the largest likely users � regarding customer demand for the goods
and services being considered, the acquisition strategy (including
contract pricing, delivery and other terms and conditions, and
performance requirements), and the commodity management approach;
"""),
Req(["CFO Act agencies' government-wide vehicles"], Priority.must, None,
"""
ensure that the Federal government gets credit for all sales
provided under that vehicle, regardless of payment method, unless
the sales are identified with other government contracts, so that
volume-based pricing discounts can be applied""", '74.21', """
However, at a minimum, government-wide vehicles shall: � ensure
that the Federal government gets credit for all sales provided
under that vehicle, regardless of payment method, unless the sales
are identified with other government contracts, so that
volume-based pricing discounts can be applied;
"""),
Req(["CFO Act agencies' government-wide vehicles"], Priority.must, None,
"""
include tiered pricing, or other appropriate strategies, to reduce
prices as cumulative sales volume increases""", '74.22', """
However, at a minimum, government-wide vehicles shall: �
include tiered pricing, or other appropriate strategies, to reduce
prices as cumulative sales volume increases;
"""),
Req(["CFO Act agencies' government-wide vehicles"], Priority.must, None,
"""
require vendors to provide sufficient pricing, usage, and
performance data to enable the government to improve their
commodity management practices on an ongoing basis""", '74.23', """
However, at a minimum, government-wide vehicles shall: �
require vendors to provide sufficient pricing, usage, and
performance data to enable the government to improve their
commodity management practices on an ongoing basis; and
"""),
Req(["CFO Act agencies' government-wide vehicles"], Priority.must, None,
"""
be supported by a contract administration plan that demonstrates
commitment by the executive agent to perform active commodity
management and monitor vendor performance and pricing changes
throughout the life of the contract to ensure the benefits of
strategic sourcing are maintained.""", '74.24', """
However, at a minimum, government-wide vehicles shall: � be
supported by a contract administration plan that demonstrates
commitment by the executive agent to perform active commodity
management and monitor vendor performance and pricing changes
throughout the life of the contract to ensure the benefits of
strategic sourcing are maintained.
"""),
Req(['CFO Act agencies'], Priority.must, None, """
seek to increase participation by small business""", '74.25', """
To the maximum extent practicable, all strategic sourcing
opportunities shall seek to increase participation by small
businesses.
"""),
Req(["CFO Act agencies' strategic sourcing agreements"], Priority.must,
None, "baseline small business use under current strategies", '74.26',
"""
All proposed strategic sourcing agreements must baseline small
business use under current strategies and set goals to meet or
exceed that baseline participation under the new strategic
sourcing vehicles.
"""),
Req(["CFO Act agencies' strategic sourcing agreements"], Priority.must,
None, """
set goals to meet or exceed baseline participation under the new
strategic sourcing vehicles""", '74.27', """
All proposed strategic sourcing agreements must baseline small
business use under current strategies and set goals to meet or
exceed that baseline participation under the new strategic
sourcing vehicles.
"""),
]
data = list(sorted(data, key=lambda r: r.due_date or date(2222, 2, 2)))
if __name__ == '__main__':
print("""
<html>
<head>
<style type="text/css">
.due {
color: green;
}
.actor {
color: purple;
}
.priority {
color: red;
font-weight: bold;
}
</style>
</head>
<body>
""")
for req in data:
title = "{0}: {1}".format(req.req_id, req.req_text.replace('"', "'"))
text = req.action
for cit in req.citations:
text = text.replace(
cit, '<a href="#not-implemented">{0}</a>'.format(cit))
print('<p title="{0}">'.format(title))
if req.due_date:
print('<span class="due">By {0}</span>'.format(
req.due_date.isoformat()))
print("""
<span class="actor">{0}</span>
<span class="priority">{1}</span>
{2}
</p>
""".format(req.subjects[0], req.priority.value, text))
print("""
</body>
</html>
""")
|
py | b40554e1ea8d8eb9b4eada4f5b204dcbfe01336e | from abc import ABC, abstractmethod
from skbandit.bandits import Bandit
from skbandit.environments import Environment, EnvironmentNoMoreAcceptingInputsException, FullInformationEnvironment, \
SemiBanditFeedbackEnvironment, BanditFeedbackEnvironment
class Experiment(ABC):
"""Performs an experiment with a bandit algorithm.
An experiment takes two parameters: a `bandit`, which acts on an `environment`.
The constructor is supposed to set the `best_arm` field to the best possible action on the environment (i.e.
the one that generates the highest reward -- or one such combination of arms, depending on the setting).
This is the main task for subclassing an environment. The `best_arm` is however not necessarily set, if it
does not make sense for the specific environment.
"""
def __init__(self, environment: Environment, bandit: Bandit):
self._environment = environment
self._bandit = bandit
self._best_arm = None
@property
def best_arm(self):
return self._best_arm
def regret(self, reward: float) -> float:
"""Determines the regret when getting a given reward."""
return self._environment.regret(reward)
@abstractmethod
def round(self) -> float:
"""Performs one round of experiment, yielding the regret for this round."""
pass
def rounds(self, n: int) -> float:
"""Performs several rounds of experiment, yielding the total regret.
If the environment stops accepting inputs within the `n` rounds, execution automatically stops.
"""
if not self._environment.may_stop_accepting_inputs:
return sum(self.round() for _ in range(n))
else:
total_reward = 0.0
for _ in range(n):
if not self._environment.will_accept_input():
break
total_reward += self.round()
return total_reward
class FullInformationExperiment(Experiment):
"""Performs an experiment with full information, i.e. one reward is known per arm and per round"""
def __init__(self, environment: FullInformationEnvironment, bandit: Bandit):
super().__init__(environment, bandit)
def round(self) -> float:
if self._environment.may_stop_accepting_inputs and not self._environment.will_accept_input():
raise EnvironmentNoMoreAcceptingInputsException
arm = self._bandit.pull()
rewards, reward = self._environment.rewards(arm) # List and float.
self._bandit.rewards(rewards)
return self.regret(reward)
class SemiBanditFeedbackExperiment(Experiment):
"""Performs an experiment with semi-bandit information, i.e. rewards are known per played arm and per round"""
def __init__(self, environment: SemiBanditFeedbackEnvironment, bandit: Bandit):
super().__init__(environment, bandit)
def round(self) -> float:
if self._environment.may_stop_accepting_inputs and not self._environment.will_accept_input():
raise EnvironmentNoMoreAcceptingInputsException
arm = self._bandit.pull()
rewards, reward = self._environment.rewards(arm) # Dictionary and float.
self._bandit.rewards(rewards)
return self.regret(reward)
class BanditFeedbackExperiment(Experiment):
"""Performs an experiment with full-bandit information, i.e. only one reward is known per round"""
def __init__(self, environment: BanditFeedbackEnvironment, bandit: Bandit):
super().__init__(environment, bandit)
def round(self) -> float:
if self._environment.may_stop_accepting_inputs and not self._environment.will_accept_input():
raise EnvironmentNoMoreAcceptingInputsException
arm = self._bandit.pull()
reward = self._environment.reward(arm)
self._bandit.reward(arm, reward)
return self.regret(reward)
|
py | b40554f371a70028244e8fe9af2fcda84dbea865 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-06 20:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("events", "0012_event_image")]
operations = [
migrations.AlterField(
model_name="event",
name="author",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="oppretter",
to=settings.AUTH_USER_MODEL,
),
)
]
|
py | b405561d2337dfda810c5bc4bc66e4c4f951211a | import unittest
import numpy as np
import ase
from chemiscope import create_input
TEST_FRAMES = [ase.Atoms("CO2")]
class TestCreateInputMeta(unittest.TestCase):
def test_meta(self):
meta = {}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "<unknown>")
self.assertEqual(len(data["meta"].keys()), 1)
meta = {"name": ""}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "<unknown>")
self.assertEqual(len(data["meta"].keys()), 1)
meta = {"name": "foo"}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"].keys()), 1)
meta = {"name": "foo", "description": "bar"}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(data["meta"]["description"], "bar")
self.assertEqual(len(data["meta"].keys()), 2)
meta = {"name": "foo", "references": ["bar"]}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"]["references"]), 1)
self.assertEqual(data["meta"]["references"][0], "bar")
self.assertEqual(len(data["meta"].keys()), 2)
meta = {"name": "foo", "authors": ["bar"]}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"]["authors"]), 1)
self.assertEqual(data["meta"]["authors"][0], "bar")
self.assertEqual(len(data["meta"].keys()), 2)
def test_meta_unknown_keys_warning(self):
meta = {"name": "foo", "what_is_this": "I don't know"}
with self.assertWarns(UserWarning) as cm:
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"].keys()), 1)
self.assertEqual(
cm.warning.args, ("ignoring unexpected metadata: what_is_this",)
)
def test_meta_conversions(self):
meta = {"name": 33}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "33")
self.assertEqual(len(data["meta"].keys()), 1)
meta = {"name": ["foo", "bar"], "description": False}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "['foo', 'bar']")
self.assertEqual(data["meta"]["description"], "False")
self.assertEqual(len(data["meta"].keys()), 2)
meta = {"name": "foo", "references": (3, False)}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"]["references"]), 2)
self.assertEqual(data["meta"]["references"][0], "3")
self.assertEqual(data["meta"]["references"][1], "False")
self.assertEqual(len(data["meta"].keys()), 2)
meta = {"name": "foo", "authors": (3, False)}
data = create_input(frames=TEST_FRAMES, meta=meta)
self.assertEqual(data["meta"]["name"], "foo")
self.assertEqual(len(data["meta"]["authors"]), 2)
self.assertEqual(data["meta"]["authors"][0], "3")
self.assertEqual(data["meta"]["authors"][1], "False")
self.assertEqual(len(data["meta"].keys()), 2)
class TestCreateInputProperties(unittest.TestCase):
def test_properties(self):
properties = {"name": {"target": "atom", "values": [2, 3, 4]}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["values"], [2, 3, 4])
self.assertEqual(len(data["properties"]["name"].keys()), 2)
properties = {"name": {"target": "atom", "values": ["2", "3", "4"]}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["values"], ["2", "3", "4"])
self.assertEqual(len(data["properties"]["name"].keys()), 2)
properties = {
"name": {
"target": "atom",
"values": [2, 3, 4],
"description": "foo",
},
}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["description"], "foo")
self.assertEqual(data["properties"]["name"]["values"], [2, 3, 4])
self.assertEqual(len(data["properties"]["name"].keys()), 3)
properties = {
"name": {
"target": "atom",
"values": [2, 3, 4],
"units": "foo",
},
}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["units"], "foo")
self.assertEqual(data["properties"]["name"]["values"], [2, 3, 4])
self.assertEqual(len(data["properties"]["name"].keys()), 3)
def test_ndarray_properties(self):
# shape N
properties = {"name": {"target": "atom", "values": np.array([2, 3, 4])}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["values"], [2, 3, 4])
self.assertEqual(len(data["properties"].keys()), 1)
# shape N
properties = {"name": {"target": "atom", "values": np.array(["2", "3", "4"])}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["values"], ["2", "3", "4"])
self.assertEqual(len(data["properties"].keys()), 1)
# shape N x 1
properties = {"name": {"target": "atom", "values": np.array([[2], [3], [4]])}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["target"], "atom")
self.assertEqual(data["properties"]["name"]["values"], [2, 3, 4])
self.assertEqual(len(data["properties"].keys()), 1)
# shape N x 3
properties = {
"name": {
"target": "atom",
"values": np.array([[1, 2, 4], [1, 2, 4], [1, 2, 4]]),
}
}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name[1]"]["target"], "atom")
self.assertEqual(data["properties"]["name[1]"]["values"], [1, 1, 1])
self.assertEqual(data["properties"]["name[2]"]["target"], "atom")
self.assertEqual(data["properties"]["name[2]"]["values"], [2, 2, 2])
self.assertEqual(data["properties"]["name[3]"]["target"], "atom")
self.assertEqual(data["properties"]["name[3]"]["values"], [4, 4, 4])
self.assertEqual(len(data["properties"].keys()), 3)
def test_invalid_name(self):
properties = {"": {"target": "atom", "values": [2, 3, 4]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception), "the name of a property can not be the empty string"
)
properties = {False: {"target": "atom", "values": [2, 3, 4]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception),
"the name of a property name must be a string, "
+ "got 'False' of type <class 'bool'>",
)
def test_invalid_target(self):
properties = {"name": {"values": [2, 3, 4]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(str(cm.exception), "missing 'target' for the 'name' property")
properties = {"name": {"target": "atoms", "values": [2, 3, 4]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception),
"the target must be 'atom' or 'structure' for the 'name' property",
)
def test_invalid_types_metadata(self):
properties = {"name": {"target": "atom", "values": [2, 3, 4], "units": False}}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["units"], "False")
properties = {
"name": {"target": "atom", "values": [2, 3, 4], "description": False}
}
data = create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(data["properties"]["name"]["description"], "False")
def test_property_unknown_keys_warning(self):
properties = {"name": {"target": "atom", "values": [2, 3, 4], "what": False}}
with self.assertWarns(UserWarning) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(cm.warning.args, ("ignoring unexpected property key: what",))
def test_invalid_values_types(self):
properties = {"name": {"target": "atom", "values": 3}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception), "unknown type (<class 'int'>) for property 'name'"
)
properties = {"name": {"target": "atom", "values": {"test": "bad"}}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception), "unknown type (<class 'dict'>) for property 'name'"
)
properties = {"name": {"target": "atom", "values": [{}, {}, {}]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception),
"unsupported type in property 'name' values: should be string or number",
)
def test_wrong_number_of_values(self):
properties = {"name": {"target": "atom", "values": [2, 3]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception),
"wrong size for the property 'name' with target=='atom': "
+ "expected 3 values, got 2",
)
properties = {"name": {"target": "structure", "values": [2, 3, 5]}}
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, properties=properties)
self.assertEqual(
str(cm.exception),
"wrong size for the property 'name' with target=='structure': "
+ "expected 1 values, got 3",
)
class TestCreateInputEnvironments(unittest.TestCase):
def test_environment(self):
data = create_input(frames=TEST_FRAMES + TEST_FRAMES, cutoff=3.5)
self.assertEqual(len(data["environments"]), 6)
for i, env in enumerate(data["environments"]):
self.assertEqual(env["structure"], i // 3)
self.assertEqual(env["center"], i % 3)
self.assertEqual(env["cutoff"], 3.5)
def test_environment_wrong_type(self):
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, cutoff="3.5")
self.assertEqual(
str(cm.exception), "cutoff must be a float, got '3.5' of type <class 'str'>"
)
with self.assertRaises(Exception) as cm:
create_input(frames=TEST_FRAMES, cutoff=False)
self.assertEqual(
str(cm.exception),
"cutoff must be a float, got 'False' of type <class 'bool'>",
)
if __name__ == "__main__":
unittest.main()
|
py | b40556804897f4792e6c5a6a39b99917df1f1ee4 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from backend.dashboard.examples.constants import RES_KIND_WITH_DEMO_MANIFEST
class FetchResourceDemoManifestSLZ(serializers.Serializer):
""" 获取指定资源配置模版 """
kind = serializers.ChoiceField(label=_('资源类型'), choices=RES_KIND_WITH_DEMO_MANIFEST)
|
py | b405581bc39cb5f04113d1fa8aac6c219aa476d7 | # -*- coding: utf-8 -*-
"""Console script for ssptools."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for ssptools."""
click.echo("Replace this message by putting your code into "
"ssptools.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
py | b40558544ebd43f4968ac4ce970f66e9fcf12ca8 | import datetime
import os
import re
import time
from Cookie import BaseCookie, SimpleCookie, CookieError
from pprint import pformat
from urllib import urlencode
from urlparse import urljoin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if 'HTTP_X_FORWARDED_HOST' in self.META:
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
return ''
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data)
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started:
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
self._raw_post_data = ''
try:
self._post, self._files = self.parse_file_upload(self.META, self)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self):
output = []
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([urlencode({k: smart_str(v, self.encoding)}) for v in list_])
return '&'.join(output)
class CompatCookie(SimpleCookie):
"""
Cookie class that handles some issues with browser compatibility.
"""
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(CompatCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, BaseCookie):
try:
c = CompatCookie()
c.load(cookie)
except CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = CompatCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
HttpResponse.__init__(self)
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
HttpResponse.__init__(self)
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
HttpResponse.__init__(self)
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
class HttpResponseServerError(HttpResponse):
status_code = 500
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
|
py | b405599759906e1a078fb953db0d6f776feafcbb | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Jigar Tarpara and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class UpdateNamingSeries(Document):
pass
@frappe.whitelist()
def get_series():
quury = frappe.db.sql("select name from `tabSeries` ")
return {
"series" : quury,
}
@frappe.whitelist()
def get_count(prefix):
count = frappe.db.get_value("Series",
prefix, "current", order_by = "name")
return count
@frappe.whitelist()
def update_count(prefix, count):
if prefix:
frappe.db.sql("update `tabSeries` set current = '{0}' where name = '{1}' ".format(count, prefix))
frappe.msgprint("Success")
return count
@frappe.whitelist()
def insert_series(series):
"""insert series if missing"""
if frappe.db.get_value('Series', series, 'name', order_by="name") == None:
frappe.db.sql("insert into tabSeries (name, current) values (%s, 0)", (series))
return "Series Added"
else:
return "Series Already There" |
py | b40559ecd310a4e91f18c0cb288080ddcd43b4f4 | import logging.config
import os
from rec_to_nwb.processing.nwb.components.device.probe.fl_probe_manager import FlProbeManager
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class ProbeOriginator:
def __init__(self, device_factory, device_injector, probes_metadata):
self.device_factory = device_factory
self.device_injector = device_injector
self.fl_probe_manager = FlProbeManager(probes_metadata)
def make(self, nwb_content, shanks_dict, probes_valid_map, ):
logger.info('Probes: Building')
fl_probes = self.fl_probe_manager.get_fl_probes(shanks_dict, probes_valid_map)
logger.info('Probes: Creating probes')
probes = [self.device_factory.create_probe(fl_probe) for fl_probe in fl_probes]
logger.info('Probes: Injecting probes into NWB')
self.device_injector.inject_all_devices(nwb_content, probes)
return probes
|
py | b4055a67eaa895f051c6fa3918535e51f8c2b664 | import urllib
import sqlite3
import datetime
import tweepy
from xml.dom.minidom import parseString
from settings import *
class Item:
title = ""
link = ""
class CustomUrlOpener(urllib.FancyURLopener):
version = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"
def getFeedData():
urllib._urlopener = CustomUrlOpener()
feedRequest = urllib.urlopen(HACKER_NEWS_RSS_URL)
feedString = feedRequest.read()
feedRequest.close()
return parseString(feedString)
def iterateItems(feedData):
results = []
for element in feedData.getElementsByTagName("item"):
item = Item()
item.title = element.getElementsByTagName("title")[0].childNodes[0].data
item.link = element.getElementsByTagName("link")[0].childNodes[0].data
item.commentLink = element.getElementsByTagName("comments")[0].childNodes[0].data
results.append(item)
return results
def isSchemaThere(conn):
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE name='links'")
if len(cur.fetchall()) > 0:
return True
return False
def createSchema(conn):
cur = conn.cursor()
cur.execute("CREATE TABLE links (title text, url text, first_seen datetime)")
def isNewLink(item):
conn = sqlite3.connect(LOCAL_LINK_DB)
if isSchemaThere(conn) == False:
createSchema(conn)
cur = conn.cursor()
cur.execute("SELECT url FROM links WHERE url=?", (item.link,))
isNew = len(cur.fetchall()) == 0
conn.commit()
conn.close()
return isNew
def insertLink(item):
conn = sqlite3.connect(LOCAL_LINK_DB)
cur = conn.cursor()
cur.execute("INSERT INTO links VALUES (?, ?, datetime('now'))", (item.title, item.link,))
conn.commit()
conn.close()
def fixCommentLink(link):
protocolEnd = 0
if "//" in link:
protocolEnd = link.index("//") + len("//")
return "https://" + link[protocolEnd:]
else:
return "https://" + link.lstrip("/")
def getTweetText(item):
maxTitleTextLength = TWITTER_MAX - (TCO_SHORT_URL_LENGTH + len(DIVIDER_TEXT))
if item.link <> item.commentLink:
maxTitleTextLength -= (len(COMMENT_TEXT) + TCO_SHORT_URL_LENGTH)
tweetText = item.title.strip(" .,:;!?")[:maxTitleTextLength] + DIVIDER_TEXT + item.link
if item.link <> item.commentLink:
tweetText += COMMENT_TEXT + fixCommentLink(item.commentLink)
return tweetText
def submitTweet(tweetText):
if DEBUG_MODE == False:
auth = tweepy.OAuthHandler(TWITTER_CUSTOMER_KEY, TWITTER_CUSTOMER_SECRET)
auth.set_access_token(TWITTER_OAUTH_KEY, TWITTER_OAUTH_SECRET)
api = tweepy.API(auth)
api.update_status(tweetText)
else:
print tweetText
for item in iterateItems(getFeedData()):
if isNewLink(item):
insertLink(item)
tweetText = getTweetText(item)
submitTweet(tweetText)
|
py | b4055ad17bbccf79c13a6e288f491b771c4296f3 | # @Time : 2020/11/22
# @Author : Kun Zhou
# @Email : [email protected]
# UPDATE:
# @Time : 2020/11/24
# @Author : Kun Zhou
# @Email : [email protected]
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""Near infinity, useful as a large penalty for scoring when inf is bad."""
NEAR_INF = 1e20
NEAR_INF_FP16 = 65504
def neginf(dtype):
"""Returns a representable finite number near -inf for a dtype."""
if dtype is torch.float16:
return -NEAR_INF_FP16
else:
return -NEAR_INF
def _create_selfattn_mask(x):
# figure out how many timestamps we need
bsz = x.size(0)
time = x.size(1)
# make sure that we don't look into the future
mask = torch.tril(x.new(time, time).fill_(1))
# broadcast across batch
mask = mask.unsqueeze(0).expand(bsz, -1, -1)
return mask
def create_position_codes(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * j / dim) for j in range(dim // 2)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.tensor(np.sin(position_enc)).type_as(out)
out[:, 1::2] = torch.tensor(np.cos(position_enc)).type_as(out)
out.detach_()
out.requires_grad = False
def _normalize(tensor, norm_layer):
"""Broadcast layer norm"""
size = tensor.size()
return norm_layer(tensor.view(-1, size[-1])).view(size)
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, dim, dropout=.0):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.dim = dim
self.attn_dropout = nn.Dropout(p=dropout) # --attention-dropout
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
# TODO: merge for the initialization step
nn.init.xavier_normal_(self.q_lin.weight)
nn.init.xavier_normal_(self.k_lin.weight)
nn.init.xavier_normal_(self.v_lin.weight)
# and set biases to 0
self.out_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.out_lin.weight)
def forward(self, query, key=None, value=None, mask=None):
# Input is [B, query_len, dim]
# Mask is [B, key_len] (selfattn) or [B, key_len, key_len] (enc attn)
batch_size, query_len, dim = query.size()
assert dim == self.dim, \
f'Dimensions do not match: {dim} query vs {self.dim} configured'
assert mask is not None, 'Mask is None, please specify a mask'
n_heads = self.n_heads
dim_per_head = dim // n_heads
scale = math.sqrt(dim_per_head)
def prepare_head(tensor):
# input is [batch_size, seq_len, n_heads * dim_per_head]
# output is [batch_size * n_heads, seq_len, dim_per_head]
bsz, seq_len, _ = tensor.size()
tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head)
tensor = tensor.transpose(1, 2).contiguous().view(
batch_size * n_heads,
seq_len,
dim_per_head
)
return tensor
# q, k, v are the transformed values
if key is None and value is None:
# self attention
key = value = query
elif value is None:
# key and value are the same, but query differs
# self attention
value = key
_, key_len, dim = key.size()
q = prepare_head(self.q_lin(query))
k = prepare_head(self.k_lin(key))
v = prepare_head(self.v_lin(value))
dot_prod = q.div_(scale).bmm(k.transpose(1, 2))
# [B * n_heads, query_len, key_len]
attn_mask = (
(mask == 0)
.view(batch_size, 1, -1, key_len)
.repeat(1, n_heads, 1, 1)
.expand(batch_size, n_heads, query_len, key_len)
.view(batch_size * n_heads, query_len, key_len)
)
assert attn_mask.shape == dot_prod.shape
dot_prod.masked_fill_(attn_mask, neginf(dot_prod.dtype))
attn_weights = F.softmax(dot_prod, dim=-1).type_as(query)
attn_weights = self.attn_dropout(attn_weights) # --attention-dropout
attentioned = attn_weights.bmm(v)
attentioned = (
attentioned.type_as(query)
.view(batch_size, n_heads, query_len, dim_per_head)
.transpose(1, 2).contiguous()
.view(batch_size, query_len, dim)
)
out = self.out_lin(attentioned)
return out
class TransformerFFN(nn.Module):
def __init__(self, dim, dim_hidden, relu_dropout=.0):
super(TransformerFFN, self).__init__()
self.relu_dropout = nn.Dropout(p=relu_dropout)
self.lin1 = nn.Linear(dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, dim)
nn.init.xavier_uniform_(self.lin1.weight)
nn.init.xavier_uniform_(self.lin2.weight)
# TODO: initialize biases to 0
def forward(self, x):
x = F.relu(self.lin1(x))
x = self.relu_dropout(x) # --relu-dropout
x = self.lin2(x)
return x
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.attention = MultiHeadAttention(
n_heads, embedding_size,
dropout=attention_dropout, # --attention-dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout=relu_dropout)
self.norm2 = nn.LayerNorm(embedding_size)
self.dropout = nn.Dropout(p=dropout)
def forward(self, tensor, mask):
tensor = tensor + self.dropout(self.attention(tensor, mask=mask))
tensor = _normalize(tensor, self.norm1)
tensor = tensor + self.dropout(self.ffn(tensor))
tensor = _normalize(tensor, self.norm2)
tensor *= mask.unsqueeze(-1).type_as(tensor)
return tensor
class TransformerEncoder(nn.Module):
"""
Transformer encoder module.
:param int n_heads: the number of multihead attention heads.
:param int n_layers: number of transformer layers.
:param int embedding_size: the embedding sizes. Must be a multiple of n_heads.
:param int ffn_size: the size of the hidden layer in the FFN
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param float dropout: Dropout used around embeddings and before layer
layer normalizations. This is used in Vaswani 2017 and works well on
large datasets.
:param float attention_dropout: Dropout performed after the multhead attention
softmax. This is not used in Vaswani 2017.
:param float relu_dropout: Dropout used after the ReLU in the FFN. Not used
in Vaswani 2017, but used in Tensor2Tensor.
:param int padding_idx: Reserved padding index in the embeddings matrix.
:param bool learn_positional_embeddings: If off, sinusoidal embeddings are
used. If on, position embeddings are learned from scratch.
:param bool embeddings_scale: Scale embeddings relative to their dimensionality.
Found useful in fairseq.
:param bool reduction: If true, returns the mean vector for the entire encoding
sequence.
:param int n_positions: Size of the position embeddings matrix.
"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
padding_idx=0,
learn_positional_embeddings=False,
embeddings_scale=False,
reduction=True,
n_positions=1024
):
super(TransformerEncoder, self).__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.reduction = reduction
self.padding_idx = padding_idx
# this is --dropout, not --relu-dropout or --attention-dropout
self.dropout = nn.Dropout(dropout)
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
# check input formats:
if embedding is not None:
assert (
embedding_size is None or embedding_size == embedding.weight.shape[1]
), "Embedding dim must match the embedding size."
if embedding is not None:
self.embeddings = embedding
else:
assert False
assert padding_idx is not None
self.embeddings = nn.Embedding(
vocabulary_size, embedding_size, padding_idx=padding_idx
)
nn.init.normal_(self.embeddings.weight, 0, embedding_size ** -0.5)
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(TransformerEncoderLayer(
n_heads, embedding_size, ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
))
def forward(self, input):
"""
input data is a FloatTensor of shape [batch, seq_len, dim]
mask is a ByteTensor of shape [batch, seq_len], filled with 1 when
inside the sequence and 0 outside.
"""
mask = input != self.padding_idx
positions = (mask.cumsum(dim=1, dtype=torch.int64) - 1).clamp_(min=0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
# --dropout on the embeddings
tensor = self.dropout(tensor)
tensor *= mask.unsqueeze(-1).type_as(tensor)
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.reduction:
divisor = mask.type_as(tensor).sum(dim=1).unsqueeze(-1).clamp(min=1e-7)
output = tensor.sum(dim=1) / divisor
return output
else:
output = tensor
return output, mask
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.dropout = nn.Dropout(p=dropout)
self.self_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.encoder_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2 = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout=relu_dropout)
self.norm3 = nn.LayerNorm(embedding_size)
def forward(self, x, encoder_output, encoder_mask):
decoder_mask = self._create_selfattn_mask(x)
# first self attn
residual = x
# don't peak into the future!
x = self.self_attention(query=x, mask=decoder_mask)
x = self.dropout(x) # --dropout
x = x + residual
x = _normalize(x, self.norm1)
residual = x
x = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2)
# finally the ffn
residual = x
x = self.ffn(x)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm3)
return x
def _create_selfattn_mask(self, x):
# figure out how many timestamps we need
bsz = x.size(0)
time = x.size(1)
# make sure that we don't look into the future
mask = torch.tril(x.new(time, time).fill_(1))
# broadcast across batch
mask = mask.unsqueeze(0).expand(bsz, -1, -1)
return mask
class TransformerDecoder(nn.Module):
"""
Transformer Decoder layer.
:param int n_heads: the number of multihead attention heads.
:param int n_layers: number of transformer layers.
:param int embedding_size: the embedding sizes. Must be a multiple of n_heads.
:param int ffn_size: the size of the hidden layer in the FFN
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param float dropout: Dropout used around embeddings and before layer
layer normalizations. This is used in Vaswani 2017 and works well on
large datasets.
:param float attention_dropout: Dropout performed after the multhead attention
softmax. This is not used in Vaswani 2017.
:param int padding_idx: Reserved padding index in the embeddings matrix.
:param bool learn_positional_embeddings: If off, sinusoidal embeddings are
used. If on, position embeddings are learned from scratch.
:param bool embeddings_scale: Scale embeddings relative to their dimensionality.
Found useful in fairseq.
:param int n_positions: Size of the position embeddings matrix.
"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
n_positions=1024,
):
super().__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.dropout = nn.Dropout(p=dropout) # --dropout
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
self.embeddings = embedding
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(TransformerDecoderLayer(
n_heads, embedding_size, ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
))
def forward(self, input, encoder_state, incr_state=None):
encoder_output, encoder_mask = encoder_state
seq_len = input.shape[1]
positions = input.new_empty(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0) # (batch, seq_len)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor = self.dropout(tensor) # --dropout
for layer in self.layers:
tensor = layer(tensor, encoder_output, encoder_mask)
return tensor, None
class TransformerDecoderLayerKG(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.dropout = nn.Dropout(p=dropout)
self.self_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.encoder_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2 = nn.LayerNorm(embedding_size)
self.encoder_db_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2_db = nn.LayerNorm(embedding_size)
self.encoder_kg_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2_kg = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout=relu_dropout)
self.norm3 = nn.LayerNorm(embedding_size)
def forward(self, x, encoder_output, encoder_mask, kg_encoder_output, kg_encoder_mask, db_encoder_output, db_encoder_mask):
decoder_mask = _create_selfattn_mask(x)
# first self attn
residual = x
# don't peak into the future!
x = self.self_attention(query=x, mask=decoder_mask)
x = self.dropout(x) # --dropout
x = x + residual
x = _normalize(x, self.norm1)
residual = x
x = self.encoder_db_attention(
query=x,
key=db_encoder_output,
value=db_encoder_output,
mask=db_encoder_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2_db)
residual = x
x = self.encoder_kg_attention(
query=x,
key=kg_encoder_output,
value=kg_encoder_output,
mask=kg_encoder_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2_kg)
residual = x
x = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2)
# finally the ffn
residual = x
x = self.ffn(x)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm3)
return x
class TransformerDecoderKG(nn.Module):
"""
Transformer Decoder layer.
:param int n_heads: the number of multihead attention heads.
:param int n_layers: number of transformer layers.
:param int embedding_size: the embedding sizes. Must be a multiple of n_heads.
:param int ffn_size: the size of the hidden layer in the FFN
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param float dropout: Dropout used around embeddings and before layer
layer normalizations. This is used in Vaswani 2017 and works well on
large datasets.
:param float attention_dropout: Dropout performed after the multhead attention
softmax. This is not used in Vaswani 2017.
:param float relu_dropout: Dropout used after the ReLU in the FFN. Not used
in Vaswani 2017, but used in Tensor2Tensor.
:param int padding_idx: Reserved padding index in the embeddings matrix.
:param bool learn_positional_embeddings: If off, sinusoidal embeddings are
used. If on, position embeddings are learned from scratch.
:param bool embeddings_scale: Scale embeddings relative to their dimensionality.
Found useful in fairseq.
:param int n_positions: Size of the position embeddings matrix.
"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
n_positions=1024,
):
super().__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.dropout = nn.Dropout(dropout) # --dropout
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
self.embeddings = embedding
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(TransformerDecoderLayerKG(
n_heads, embedding_size, ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
))
def forward(self, input, encoder_state, kg_encoder_output, kg_encoder_mask,
db_encoder_output, db_encoder_mask, incr_state=None):
encoder_output, encoder_mask = encoder_state
seq_len = input.size(1)
positions = input.new(seq_len).long() # (seq_len)
positions = torch.arange(seq_len, out=positions).unsqueeze(0) # (1, seq_len)
tensor = self.embeddings(input) # (bs, seq_len, embed_dim)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor = self.dropout(tensor) # --dropout
for layer in self.layers:
tensor = layer(tensor, encoder_output, encoder_mask, kg_encoder_output, kg_encoder_mask, db_encoder_output, db_encoder_mask)
return tensor, None
|
py | b4055b82c7be87ca49733598e2e396203aba744c | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple filesystem-backed store
"""
import errno
import hashlib
import os
import urlparse
from glance.common import exception
from glance.common import utils
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
datadir_opt = cfg.StrOpt('filesystem_store_datadir')
CONF = cfg.CONF
CONF.register_opt(datadir_opt)
class StoreLocation(glance.store.location.StoreLocation):
"""Class describing a Filesystem URI"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'file')
self.path = self.specs.get('path')
def get_uri(self):
return "file://%s" % self.path
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
"""
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('file', 'filesystem')
self.scheme = pieces.scheme
path = (pieces.netloc + pieces.path).strip()
if path == '':
reason = _("No path specified in URI: %s") % uri
LOG.error(reason)
raise exception.BadStoreUri('No path specified')
self.path = path
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a large file
"""
CHUNKSIZE = 65536
def __init__(self, filepath):
self.filepath = filepath
self.fp = open(self.filepath, 'rb')
def __iter__(self):
"""Return an iterator over the image file"""
try:
while True:
chunk = self.fp.read(ChunkedFile.CHUNKSIZE)
if chunk:
yield chunk
else:
break
finally:
self.close()
def close(self):
"""Close the internal file pointer"""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance.store.base.Store):
def get_schemes(self):
return ('file', 'filesystem')
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
self.datadir = CONF.filesystem_store_datadir
if self.datadir is None:
reason = (_("Could not find %s in configuration options.") %
'filesystem_store_datadir')
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="filesystem",
reason=reason)
if not os.path.exists(self.datadir):
msg = _("Directory to write image files does not exist "
"(%s). Creating.") % self.datadir
LOG.info(msg)
try:
os.makedirs(self.datadir)
except IOError:
reason = _("Unable to create datadir: %s") % self.datadir
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="filesystem",
reason=reason)
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
filepath = loc.path
if not os.path.exists(filepath):
raise exception.NotFound(_("Image file %s not found") % filepath)
else:
msg = _("Found image at %s. Returning in ChunkedFile.") % filepath
LOG.debug(msg)
return (ChunkedFile(filepath), None)
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
:raises Forbidden if cannot delete because of permissions
"""
loc = location.store_location
fn = loc.path
if os.path.exists(fn):
try:
LOG.debug(_("Deleting image at %(fn)s") % locals())
os.unlink(fn)
except OSError:
raise exception.Forbidden(_("You cannot delete file %s") % fn)
else:
raise exception.NotFound(_("Image file %s does not exist") % fn)
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns an `glance.store.ImageAddResult` object
containing information about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval `glance.store.ImageAddResult` object
:raises `glance.common.exception.Duplicate` if the image already
existed
:note By default, the backend writes the image data to a file
`/<DATADIR>/<ID>`, where <DATADIR> is the value of
the filesystem_store_datadir configuration option and <ID>
is the supplied image ID.
"""
filepath = os.path.join(self.datadir, str(image_id))
if os.path.exists(filepath):
raise exception.Duplicate(_("Image file %s already exists!")
% filepath)
checksum = hashlib.md5()
bytes_written = 0
try:
with open(filepath, 'wb') as f:
for buf in utils.chunkreadable(image_file,
ChunkedFile.CHUNKSIZE):
bytes_written += len(buf)
checksum.update(buf)
f.write(buf)
except IOError as e:
if e.errno in [errno.EFBIG, errno.ENOSPC]:
try:
os.unlink(filepath)
except Exception:
msg = _('Unable to remove partial image data for image %s')
LOG.error(msg % image_id)
raise exception.StorageFull()
elif e.errno == errno.EACCES:
raise exception.StorageWriteDenied()
else:
raise
checksum_hex = checksum.hexdigest()
LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with "
"checksum %(checksum_hex)s") % locals())
return ('file://%s' % filepath, bytes_written, checksum_hex)
|
py | b4055c79ddf8de16a9ab78fd1f25b33d90654745 | #!/usr/bin/python
import os
read_file_name = 'submit.sb'
read_file = open(read_file_name,'r')
read_file_contents = read_file.readlines()
read_file.close()
start = START OF ITERATIVE FILENAMES HERE
end = END OF ITERATIVE FILENEAME HERE
span = INTERVAL BETWEEN NUMS IN FILENAMES HERE
for i in range(start,end,span):
write_file_name = ('ns_' + str(i) + '/submit' + '_ns_' + str(i) + '.sb')
cmd = ('cd ns_' + str(i))
os.system(cmd)
write_file = open(write_file_name, 'a')
inc = 0
for line in read_file_contents:
if inc == 0:
new_line = line
elif inc == 1:
new_line = line[:-4] + str(i) + '"\n'
elif inc <= 3:
new_line = line[:-14] + str(i) + line[-12:]
elif inc < 17:
new_line = line
elif inc == 17:
new_line = line[:9] + str(i) + line[11:33] + str(i) + line[35:]
write_file.write(new_line)
inc += 1
write_file.close()
|
py | b4055ca459b3116d8d06321f76cf57a94b3d77ad |
from .product import Product, ProductStock
from .product_entry import ProductEntry
from .types import Type, Unit
|
py | b4055db5737ed6abe2f3ea9315b3abd48b4bb8fe | # Python - 3.6.0
fusc = lambda n: n if n <= 1 else (fusc(n >> 1) + ((n & 1) and fusc((n >> 1) + 1)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.