ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfdaaeb52d843f85d556a05ca33056d5b0f052c | class Version: pass |
py | 7dfdab6e5607477a23a8985e42489c5d6b6f3bbf | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144)))),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=11, ###
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=11, ###
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=11, ###
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/root/docker_mounts_ssd/cq/chongqing1_round1_train1_20191223/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1888, 1888), keep_ratio=True), ###
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1888, 1888), ###
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=data_root + 'non_cat0_annotations.json',
img_prefix=data_root + 'images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 19])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_hrnetv2p_w18_1x_cq_1888_s21'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | 7dfdac2a78e8832e49865a6a660a3b2c39df1357 | # coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import jamf
from jamf.api.time_zones_preview_api import TimeZonesPreviewApi # noqa: E501
from jamf.rest import ApiException
class TestTimeZonesPreviewApi(unittest.TestCase):
"""TimeZonesPreviewApi unit test stubs"""
def setUp(self):
self.api = jamf.api.time_zones_preview_api.TimeZonesPreviewApi() # noqa: E501
def tearDown(self):
pass
def test_v1_time_zones_get(self):
"""Test case for v1_time_zones_get
Return information about the currently supported Time Zones # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dfdacdc90d9e02e5857aa90e592b67dfbad96bc | # -*- coding: utf-8 -*-
"""
genres manager module.
"""
import pyrin.configuration.services as config_services
import pyrin.validator.services as validator_services
from pyrin.core.globals import _
from pyrin.core.structs import Manager
from pyrin.database.services import get_current_store
from charma.genres import GenresPackage
from charma.genres.models import GenreEntity
from charma.genres.exceptions import GenreDoesNotExistError
class GenresManager(Manager):
"""
genres manager class.
"""
package_class = GenresPackage
def __init__(self):
"""
initializes an instance of GenresManager.
"""
super().__init__()
self._main_genres = self._get_main_genres()
def _get_main_genres(self):
"""
gets list of main genres from config store.
:rtype: list[str]
"""
genres = config_services.get('genres', 'general', 'main_genres', default=[])
result = []
for item in genres:
result.append(item.lower())
return result
def _is_main(self, name):
"""
gets a value indicating that given genre is a main genre.
:param str name: genre name.
:rtype: bool
"""
return name.lower() in self._main_genres
def _make_find_expressions(self, expressions, **filters):
"""
makes find expressions with given filters.
:param list expressions: list of expressions to add
new expressions into it.
:keyword str name: genre name.
:keyword bool is_main: is main genre.
:raises ValidationError: validation error.
:rtype: list
"""
validator_services.validate_for_find(GenreEntity, filters)
name = filters.get('name')
is_main = filters.get('is_main')
if name is not None:
expressions.append(GenreEntity.name.icontains(name))
if is_main is not None:
expressions.append(GenreEntity.is_main == is_main)
def _get(self, id):
"""
gets genre with given id.
it returns None if genre does not exist.
:param uuid.UUID id: genre id.
:rtype: GenreEntity
"""
store = get_current_store()
return store.query(GenreEntity).get(id)
def _get_all(self, *expressions):
"""
gets all genres using provided expressions.
:param object expressions: expressions to be applied by filter.
:rtype: list[GenreEntity]
"""
store = get_current_store()
return store.query(GenreEntity).filter(*expressions)\
.order_by(GenreEntity.name).all()
def get(self, id):
"""
gets genre with given id.
it raises an error if genre does not exist.
:param uuid.UUID id: genre id.
:raises GenreDoesNotExistError: genre does not exist error.
:rtype: GenreEntity
"""
entity = self._get(id)
if entity is None:
raise GenreDoesNotExistError(_('Genre with id [{id}] does not exist.')
.format(id=id))
return entity
def create(self, name, **options):
"""
creates a new genre.
:param str name: genre name.
:raises ValidationError: validation error.
:returns: created genre id.
:rtype: uuid.UUID
"""
options.update(name=name)
validator_services.validate_dict(GenreEntity, options)
entity = GenreEntity(**options)
entity.is_main = self._is_main(name)
entity.save()
return entity.id
def find(self, **filters):
"""
finds genres with given filters.
:keyword str name: genre name.
:keyword bool is_main: is main genre.
:raises ValidationError: validation error.
:rtype: list[GenreEntity]
"""
expressions = []
self._make_find_expressions(expressions, **filters)
return self._get_all(*expressions)
def exists(self, name):
"""
gets a value indicating that a genre with given name exists.
:param str name: genre name.
:rtype: bool
"""
store = get_current_store()
return store.query(GenreEntity.id).filter(GenreEntity.name.ilike(name)).existed()
def get_all(self):
"""
gets all genres.
:rtype: list[GenreEntity]
"""
return self._get_all()
def delete(self, id):
"""
deletes a genre with given id.
:param uuid.UUID id: genre id.
:returns: count of deleted items.
:rtype: int
"""
store = get_current_store()
return store.query(GenreEntity).filter(GenreEntity.id == id).delete()
def get_by_name(self, name):
"""
gets a genre by name.
it returns None if genre does not exist.
:param str name: genre name.
:rtype: GenreEntity
"""
store = get_current_store()
return store.query(GenreEntity).filter(GenreEntity.name.ilike(name)).one_or_none()
|
py | 7dfdae2e19e8aa6418f29c4220f982578a09b699 | # (https://github.com/sandorex/extract-browser-data.py)
# extract-browser-data
#
# Copyright 2020 Aleksandar Radivojevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import time
from pathlib import Path
def _get_window_id(pid):
ids = subprocess.check_output(
['xdotool', 'search', '--onlyvisible', '--pid',
str(pid)]).decode('utf-8').strip().split(' ')
wid = None
if len(ids) == 1:
wid = ids[0]
else:
for window_id in ids:
try:
subprocess.check_call(['xdotool', 'getwindowname', window_id])
except subprocess.CalledProcessError:
continue
wid = window_id
break
if wid is None:
raise Exception('could not find the window id for Firefox')
return wid
class Wrapper:
DEFAULT_EXECUTABLE_NAME: str
def __init__(self, default_executable, executable, args):
self.executable = executable if executable is not None else default_executable
self.args = args
self.process = None
self.wid = None
def __enter__(self):
return self.start()
def __exit__(self, *args):
self.stop()
def start(self):
self.process = subprocess.Popen([self.executable] + self.args)
# delay for it to startup properly
time.sleep(4)
self.wid = _get_window_id(self.process.pid)
return self
def _stop(self):
raise NotImplementedError()
def stop(self):
self._stop()
self.process.wait(100)
self.wid = None
self.process = None
def kill(self):
self.process.kill()
self.process.wait(100)
self.process = None
self.wid = None
@classmethod
def read_version(cls, executable=None):
if executable is None:
executable = cls.DEFAULT_EXECUTABLE_NAME
try:
version = subprocess.check_output(
[executable, '--version'],
stderr=subprocess.DEVNULL).decode('utf-8')
except FileNotFoundError:
return None
match = re.search(r'([0-9.]+)', version)
if match is None:
return None
version = match.group(1)
# raw string version and a integer version
return version, int(version.replace('.', ''))
class FirefoxWrapper(Wrapper):
DEFAULT_EXECUTABLE_NAME: str = 'firefox'
def __init__(self, profile, executable=None):
self.profile_path = Path(profile).resolve().absolute()
args = ['-profile', self.profile_path]
firefox_args = os.environ.get('FIREFOX_ARGS')
if firefox_args:
args += firefox_args.split(' ')
super().__init__(self.DEFAULT_EXECUTABLE_NAME, executable, args)
def _stop(self):
subprocess.check_call(['xdotool', 'windowactivate', '--sync', self.wid])
time.sleep(0.5)
subprocess.check_call(['wmctrl', '-ic', self.wid])
time.sleep(0.5)
subprocess.check_call(['xdotool', 'key', 'Return'])
# TODO pass which keystore to use as an enum
class ChromiumWrapper(Wrapper):
DEFAULT_EXECUTABLE_NAME: str = 'chromium-browser'
def __init__(self,
user_data_dir,
executable=None,
additional_args=None,
use_basic_password_store=True):
self.user_data_dir = Path(user_data_dir).resolve().absolute()
args = [f'--user-data-dir={self.user_data_dir}']
if use_basic_password_store:
# NOTE any other password store will ask for a password
args.append('--password-store=basic')
if additional_args is not None:
args += additional_args
chromium_args = os.environ.get('CHROMIUM_ARGS')
if chromium_args:
args += chromium_args.split(' ')
super().__init__(self.DEFAULT_EXECUTABLE_NAME, executable, args)
def _stop(self):
subprocess.check_call(['xdotool', 'windowactivate', '--sync', self.wid])
time.sleep(0.5)
subprocess.check_call(['wmctrl', '-ic', self.wid])
|
py | 7dfdae8365dbd1f948a6b1a8075ca11478ae9122 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates build.ninja that will build GN."""
import contextlib
import errno
import optparse
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import urllib2
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
REPO_ROOT = os.path.dirname(SCRIPT_DIR)
GN_ROOT = os.path.join(REPO_ROOT, 'tools', 'gn')
is_win = sys.platform.startswith('win')
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform.startswith('darwin')
is_posix = is_linux or is_mac
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('--use-lto', action='store_true',
help='Enable the use of LTO')
parser.add_option('--no-sysroot', action='store_true',
help='(Linux only) Do not build with the Debian sysroot.')
parser.add_option('--no-last-commit-position', action='store_true',
help='Do not generate last_commit_position.h.')
parser.add_option('--out-path',
help='The path to generate the build files in.')
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
linux_sysroot = None
if is_linux and not options.no_sysroot:
linux_sysroot = UpdateLinuxSysroot()
out_dir = options.out_path or os.path.join(REPO_ROOT, 'out')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not options.no_last_commit_position:
GenerateLastCommitPosition(os.path.join(out_dir, 'last_commit_position.h'))
WriteGNNinja(os.path.join(out_dir, 'build.ninja'), options, linux_sysroot)
return 0
def GenerateLastCommitPosition(header):
ROOT_TAG = 'initial-commit'
describe_output = subprocess.check_output(
['git', 'describe', 'HEAD', '--match', ROOT_TAG], shell=is_win,
cwd=REPO_ROOT)
mo = re.match(ROOT_TAG + '-(\d+)-g([0-9a-f]+)', describe_output)
if not mo:
raise ValueError(
'Unexpected output from git describe when generating version header')
contents = '''// Generated by build/gen.py.
#ifndef OUT_LAST_COMMIT_POSITION_H_
#define OUT_LAST_COMMIT_POSITION_H_
#define LAST_COMMIT_POSITION "%s (%s)"
#endif // OUT_LAST_COMMIT_POSITION_H_
''' % (mo.group(1), mo.group(2))
# Only write/touch this file if the commit position has changed.
old_contents = ''
if os.path.isfile(header):
with open(header, 'rb') as f:
old_contents = f.read()
if old_contents != contents:
with open(header, 'wb') as f:
f.write(contents)
def UpdateLinuxSysroot():
# Sysroot revision from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/sysroots.json
server = 'https://commondatastorage.googleapis.com'
path = 'chrome-linux-sysroot/toolchain'
revision = '1015a998c2adf188813cca60b558b0ea1a0b6ced'
filename = 'debian_sid_amd64_sysroot.tar.xz'
url = '%s/%s/%s/%s' % (server, path, revision, filename)
sysroot = os.path.join(SCRIPT_DIR, os.pardir, '.linux-sysroot')
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return sysroot
print 'Installing Debian root image from %s' % url
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, filename)
print 'Downloading %s' % url
for _ in range(3):
response = urllib2.urlopen(url)
with open(tarball, 'wb') as f:
f.write(response.read())
break
else:
raise Exception('Failed to download %s' % url)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return sysroot
def WriteGenericNinja(path, static_libraries, executables,
cc, cxx, ar, ld, options,
cflags=[], cflags_cc=[], ldflags=[], libflags=[],
include_dirs=[], solibs=[]):
ninja_header_lines = [
'cc = ' + cc,
'cxx = ' + cxx,
'ar = ' + ar,
'ld = ' + ld,
'',
'rule regen',
' command = %s ../build/gen.py%s' % (
sys.executable, ' -d' if options.debug else ''),
' description = Regenerating ninja files',
'',
'build build.ninja: regen',
' generator = 1',
' depfile = build.ninja.d',
'',
]
template_filename = os.path.join(SCRIPT_DIR, {
'win32': 'build_win.ninja.template',
'darwin': 'build_mac.ninja.template',
'linux2': 'build_linux.ninja.template'
}[sys.platform])
with open(template_filename) as f:
ninja_template = f.read()
if is_win:
executable_ext = '.exe'
library_ext = '.lib'
object_ext = '.obj'
else:
executable_ext = ''
library_ext = '.a'
object_ext = '.o'
def escape_path_ninja(path):
return path.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
def src_to_obj(path):
return escape_path_ninja('%s' % os.path.splitext(path)[0] + object_ext)
def library_to_a(library):
return '%s%s' % (library, library_ext)
ninja_lines = []
def build_source(src_file, settings):
ninja_lines.extend([
'build %s: %s %s' % (src_to_obj(src_file),
settings['tool'],
escape_path_ninja(
os.path.join(REPO_ROOT, src_file))),
' includes = %s' % ' '.join(
['-I' + escape_path_ninja(dirname) for dirname in
include_dirs + settings.get('include_dirs', [])]),
' cflags = %s' % ' '.join(cflags + settings.get('cflags', [])),
' cflags_cc = %s' %
' '.join(cflags_cc + settings.get('cflags_cc', [])),
])
for library, settings in static_libraries.iteritems():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.append('build %s: alink_thin %s' % (
library_to_a(library),
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
ninja_lines.append(' libflags = %s' % ' '.join(libflags))
for executable, settings in executables.iteritems():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.extend([
'build %s%s: link %s | %s' % (
executable, executable_ext,
' '.join([src_to_obj(src_file) for src_file in settings['sources']]),
' '.join([library_to_a(library) for library in settings['libs']])),
' ldflags = %s' % ' '.join(ldflags),
' solibs = %s' % ' '.join(solibs),
' libs = %s' % ' '.join(
[library_to_a(library) for library in settings['libs']]),
])
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write('\n'.join(ninja_header_lines))
f.write(ninja_template)
f.write('\n'.join(ninja_lines))
with open(path + '.d', 'w') as f:
f.write('build.ninja: ' +
os.path.relpath(os.path.join(SCRIPT_DIR, 'gen.py'),
os.path.dirname(path)) + ' ' +
os.path.relpath(template_filename, os.path.dirname(path)) + '\n')
def WriteGNNinja(path, options, linux_sysroot):
if is_win:
cc = os.environ.get('CC', 'cl.exe')
cxx = os.environ.get('CXX', 'cl.exe')
ld = os.environ.get('LD', 'link.exe')
ar = os.environ.get('AR', 'lib.exe')
else:
cc = os.environ.get('CC', 'clang')
cxx = os.environ.get('CXX', 'clang++')
ld = cxx
ar = os.environ.get('AR', 'llvm-ar')
cflags = os.environ.get('CFLAGS', '').split()
cflags_cc = os.environ.get('CXXFLAGS', '').split()
ldflags = os.environ.get('LDFLAGS', '').split()
libflags = os.environ.get('LIBFLAGS', '').split()
include_dirs = [REPO_ROOT, os.path.dirname(path)]
libs = []
if is_posix:
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.append('-DNDEBUG')
cflags.append('-O3')
ldflags.append('-O3')
# Use -fdata-sections and -ffunction-sections to place each function
# or data item into its own section so --gc-sections can eliminate any
# unused functions and data items.
cflags.extend(['-fdata-sections', '-ffunction-sections'])
ldflags.extend(['-fdata-sections', '-ffunction-sections'])
ldflags.append('-Wl,-dead_strip' if is_mac else '-Wl,--gc-sections')
# Omit all symbol information from the output file.
ldflags.append('-Wl,-S' if is_mac else '-Wl,-strip-all')
# Enable identical code-folding.
if is_linux:
ldflags.append('-Wl,--icf=all')
cflags.extend([
'-D_FILE_OFFSET_BITS=64',
'-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS',
'-pthread',
'-pipe',
'-fno-exceptions',
'-fno-rtti',
])
cflags_cc.extend(['-std=c++14', '-Wno-c++11-narrowing'])
if is_linux:
if linux_sysroot:
# Use the sid sysroot that UpdateLinuxSysroot() downloads. We need to
# force the used of libstdc++ for now because libc++ is not in that
# sysroot and we don't currently have a local build of that. We should
# probably resolve this and (re-)add a way to build against libc++.
cflags.append('--sysroot=' + linux_sysroot)
ldflags.append('--sysroot=' + linux_sysroot)
cflags.append('-stdlib=libstdc++')
ldflags.extend(['-static-libstdc++',
'-stdlib=libstdc++',
'-Wl,--as-needed',
])
libs.extend([
'-lgcc_s',
'-lpthread',
])
elif is_mac:
min_mac_version_flag = '-mmacosx-version-min=10.9'
cflags.append(min_mac_version_flag)
ldflags.append(min_mac_version_flag)
if options.use_lto:
cflags.extend(['-flto', '-fwhole-program-vtables'])
ldflags.extend(['-flto', '-fwhole-program-vtables'])
elif is_win:
if not options.debug:
cflags.extend(['/Ox', '/DNDEBUG', '/GL'])
libflags.extend(['/LTCG'])
ldflags.extend(['/LTCG', '/OPT:REF', '/OPT:ICF'])
cflags.extend([
'/DNOMINMAX',
'/DUNICODE',
'/DWIN32_LEAN_AND_MEAN',
'/DWINVER=0x0A00',
'/D_CRT_SECURE_NO_DEPRECATE',
'/D_SCL_SECURE_NO_DEPRECATE',
'/D_UNICODE',
'/D_WIN32_WINNT=0x0A00',
'/FS',
'/Gy',
'/W4',
'/WX',
'/Zi',
'/wd4099',
'/wd4100',
'/wd4127',
'/wd4244',
'/wd4267',
'/wd4838',
'/wd4996',
])
cflags_cc.extend([
'/GR-',
'/D_HAS_EXCEPTIONS=0',
])
ldflags.extend(['/DEBUG', '/MACHINE:x64'])
static_libraries = {
'base': {'sources': [
'base/callback_internal.cc',
'base/command_line.cc',
'base/environment.cc',
'base/files/file.cc',
'base/files/file_enumerator.cc',
'base/files/file_path.cc',
'base/files/file_path_constants.cc',
'base/files/file_util.cc',
'base/files/scoped_file.cc',
'base/files/scoped_temp_dir.cc',
'base/json/json_parser.cc',
'base/json/json_reader.cc',
'base/json/json_writer.cc',
'base/json/string_escape.cc',
'base/logging.cc',
'base/md5.cc',
'base/memory/ref_counted.cc',
'base/memory/weak_ptr.cc',
'base/sha1.cc',
'base/strings/string_number_conversions.cc',
'base/strings/string_piece.cc',
'base/strings/string_split.cc',
'base/strings/string_util.cc',
'base/strings/string_util_constants.cc',
'base/strings/stringprintf.cc',
'base/strings/utf_string_conversion_utils.cc',
'base/strings/utf_string_conversions.cc',
'base/third_party/icu/icu_utf.cc',
'base/timer/elapsed_timer.cc',
'base/value_iterators.cc',
'base/values.cc',
], 'tool': 'cxx', 'include_dirs': []},
'gn_lib': {'sources': [
'tools/gn/action_target_generator.cc',
'tools/gn/action_values.cc',
'tools/gn/analyzer.cc',
'tools/gn/args.cc',
'tools/gn/binary_target_generator.cc',
'tools/gn/builder.cc',
'tools/gn/builder_record.cc',
'tools/gn/build_settings.cc',
'tools/gn/bundle_data.cc',
'tools/gn/bundle_data_target_generator.cc',
'tools/gn/bundle_file_rule.cc',
'tools/gn/c_include_iterator.cc',
'tools/gn/command_analyze.cc',
'tools/gn/command_args.cc',
'tools/gn/command_check.cc',
'tools/gn/command_clean.cc',
'tools/gn/command_desc.cc',
'tools/gn/command_format.cc',
'tools/gn/command_gen.cc',
'tools/gn/command_help.cc',
'tools/gn/command_ls.cc',
'tools/gn/command_path.cc',
'tools/gn/command_refs.cc',
'tools/gn/commands.cc',
'tools/gn/compile_commands_writer.cc',
'tools/gn/config.cc',
'tools/gn/config_values.cc',
'tools/gn/config_values_extractors.cc',
'tools/gn/config_values_generator.cc',
'tools/gn/copy_target_generator.cc',
'tools/gn/create_bundle_target_generator.cc',
'tools/gn/deps_iterator.cc',
'tools/gn/desc_builder.cc',
'tools/gn/eclipse_writer.cc',
'tools/gn/err.cc',
'tools/gn/escape.cc',
'tools/gn/exec_process.cc',
'tools/gn/filesystem_utils.cc',
'tools/gn/function_exec_script.cc',
'tools/gn/function_foreach.cc',
'tools/gn/function_forward_variables_from.cc',
'tools/gn/function_get_label_info.cc',
'tools/gn/function_get_path_info.cc',
'tools/gn/function_get_target_outputs.cc',
'tools/gn/function_process_file_template.cc',
'tools/gn/function_read_file.cc',
'tools/gn/function_rebase_path.cc',
'tools/gn/functions.cc',
'tools/gn/function_set_defaults.cc',
'tools/gn/function_set_default_toolchain.cc',
'tools/gn/functions_target.cc',
'tools/gn/function_template.cc',
'tools/gn/function_toolchain.cc',
'tools/gn/function_write_file.cc',
'tools/gn/group_target_generator.cc',
'tools/gn/header_checker.cc',
'tools/gn/import_manager.cc',
'tools/gn/inherited_libraries.cc',
'tools/gn/input_conversion.cc',
'tools/gn/input_file.cc',
'tools/gn/input_file_manager.cc',
'tools/gn/item.cc',
'tools/gn/json_project_writer.cc',
'tools/gn/label.cc',
'tools/gn/label_pattern.cc',
'tools/gn/lib_file.cc',
'tools/gn/loader.cc',
'tools/gn/location.cc',
'tools/gn/ninja_action_target_writer.cc',
'tools/gn/ninja_binary_target_writer.cc',
'tools/gn/ninja_build_writer.cc',
'tools/gn/ninja_bundle_data_target_writer.cc',
'tools/gn/ninja_copy_target_writer.cc',
'tools/gn/ninja_create_bundle_target_writer.cc',
'tools/gn/ninja_group_target_writer.cc',
'tools/gn/ninja_target_command_util.cc',
'tools/gn/ninja_target_writer.cc',
'tools/gn/ninja_toolchain_writer.cc',
'tools/gn/ninja_utils.cc',
'tools/gn/ninja_writer.cc',
'tools/gn/operators.cc',
'tools/gn/output_file.cc',
'tools/gn/parse_node_value_adapter.cc',
'tools/gn/parser.cc',
'tools/gn/parse_tree.cc',
'tools/gn/path_output.cc',
'tools/gn/pattern.cc',
'tools/gn/pool.cc',
'tools/gn/qt_creator_writer.cc',
'tools/gn/runtime_deps.cc',
'tools/gn/scheduler.cc',
'tools/gn/scope.cc',
'tools/gn/scope_per_file_provider.cc',
'tools/gn/settings.cc',
'tools/gn/setup.cc',
'tools/gn/source_dir.cc',
'tools/gn/source_file.cc',
'tools/gn/source_file_type.cc',
'tools/gn/standard_out.cc',
'tools/gn/string_utils.cc',
'tools/gn/substitution_list.cc',
'tools/gn/substitution_pattern.cc',
'tools/gn/substitution_type.cc',
'tools/gn/substitution_writer.cc',
'tools/gn/switches.cc',
'tools/gn/target.cc',
'tools/gn/target_generator.cc',
'tools/gn/template.cc',
'tools/gn/token.cc',
'tools/gn/tokenizer.cc',
'tools/gn/tool.cc',
'tools/gn/toolchain.cc',
'tools/gn/trace.cc',
'tools/gn/value.cc',
'tools/gn/value_extractors.cc',
'tools/gn/variables.cc',
'tools/gn/visibility.cc',
'tools/gn/visual_studio_utils.cc',
'tools/gn/visual_studio_writer.cc',
'tools/gn/xcode_object.cc',
'tools/gn/xcode_writer.cc',
'tools/gn/xml_element_writer.cc',
'util/exe_path.cc',
'util/msg_loop.cc',
'util/semaphore.cc',
'util/sys_info.cc',
'util/ticks.cc',
'util/worker_pool.cc',
], 'tool': 'cxx', 'include_dirs': []},
}
executables = {
'gn': {'sources': [ 'tools/gn/gn_main.cc' ],
'tool': 'cxx', 'include_dirs': [], 'libs': []},
'gn_unittests': { 'sources': [
'tools/gn/action_target_generator_unittest.cc',
'tools/gn/analyzer_unittest.cc',
'tools/gn/args_unittest.cc',
'tools/gn/builder_unittest.cc',
'tools/gn/c_include_iterator_unittest.cc',
'tools/gn/command_format_unittest.cc',
'tools/gn/config_unittest.cc',
'tools/gn/config_values_extractors_unittest.cc',
'tools/gn/escape_unittest.cc',
'tools/gn/exec_process_unittest.cc',
'tools/gn/filesystem_utils_unittest.cc',
'tools/gn/function_foreach_unittest.cc',
'tools/gn/function_forward_variables_from_unittest.cc',
'tools/gn/function_get_label_info_unittest.cc',
'tools/gn/function_get_path_info_unittest.cc',
'tools/gn/function_get_target_outputs_unittest.cc',
'tools/gn/function_process_file_template_unittest.cc',
'tools/gn/function_rebase_path_unittest.cc',
'tools/gn/function_template_unittest.cc',
'tools/gn/function_toolchain_unittest.cc',
'tools/gn/function_write_file_unittest.cc',
'tools/gn/functions_target_unittest.cc',
'tools/gn/functions_unittest.cc',
'tools/gn/header_checker_unittest.cc',
'tools/gn/inherited_libraries_unittest.cc',
'tools/gn/input_conversion_unittest.cc',
'tools/gn/label_pattern_unittest.cc',
'tools/gn/label_unittest.cc',
'tools/gn/loader_unittest.cc',
'tools/gn/ninja_action_target_writer_unittest.cc',
'tools/gn/ninja_binary_target_writer_unittest.cc',
'tools/gn/ninja_build_writer_unittest.cc',
'tools/gn/ninja_bundle_data_target_writer_unittest.cc',
'tools/gn/ninja_copy_target_writer_unittest.cc',
'tools/gn/ninja_create_bundle_target_writer_unittest.cc',
'tools/gn/ninja_group_target_writer_unittest.cc',
'tools/gn/ninja_target_writer_unittest.cc',
'tools/gn/ninja_toolchain_writer_unittest.cc',
'tools/gn/operators_unittest.cc',
'tools/gn/parse_tree_unittest.cc',
'tools/gn/parser_unittest.cc',
'tools/gn/path_output_unittest.cc',
'tools/gn/pattern_unittest.cc',
'tools/gn/runtime_deps_unittest.cc',
'tools/gn/scope_per_file_provider_unittest.cc',
'tools/gn/scope_unittest.cc',
'tools/gn/source_dir_unittest.cc',
'tools/gn/source_file_unittest.cc',
'tools/gn/string_utils_unittest.cc',
'tools/gn/substitution_pattern_unittest.cc',
'tools/gn/substitution_writer_unittest.cc',
'tools/gn/target_unittest.cc',
'tools/gn/template_unittest.cc',
'tools/gn/test_with_scheduler.cc',
'tools/gn/test_with_scope.cc',
'tools/gn/tokenizer_unittest.cc',
'tools/gn/unique_vector_unittest.cc',
'tools/gn/value_unittest.cc',
'tools/gn/visibility_unittest.cc',
'tools/gn/visual_studio_utils_unittest.cc',
'tools/gn/visual_studio_writer_unittest.cc',
'tools/gn/xcode_object_unittest.cc',
'tools/gn/xml_element_writer_unittest.cc',
'util/test/gn_test.cc',
], 'tool': 'cxx', 'include_dirs': [], 'libs': []},
}
if is_posix:
static_libraries['base']['sources'].extend([
'base/files/file_enumerator_posix.cc',
'base/files/file_posix.cc',
'base/files/file_util_posix.cc',
'base/posix/file_descriptor_shuffle.cc',
'base/posix/safe_strerror.cc',
'base/strings/string16.cc',
])
if is_linux:
static_libraries['base']['sources'].extend([
'base/strings/sys_string_conversions_posix.cc',
])
if is_mac:
static_libraries['base']['sources'].extend([
'base/files/file_util_mac.mm',
'base/mac/bundle_locations.mm',
'base/mac/foundation_util.mm',
'base/strings/sys_string_conversions_mac.mm',
])
libs.extend([
'-framework', 'AppKit',
'-framework', 'CoreFoundation',
'-framework', 'Foundation',
'-framework', 'Security',
])
if is_win:
static_libraries['base']['sources'].extend([
'base/files/file_enumerator_win.cc',
'base/files/file_util_win.cc',
'base/files/file_win.cc',
'base/strings/sys_string_conversions_win.cc',
'base/win/registry.cc',
'base/win/scoped_handle.cc',
'base/win/scoped_process_information.cc',
])
libs.extend([
'advapi32.lib',
'dbghelp.lib',
'kernel32.lib',
'ole32.lib',
'shell32.lib',
'user32.lib',
'userenv.lib',
'version.lib',
'winmm.lib',
'ws2_32.lib',
'Shlwapi.lib',
])
# we just build static libraries that GN needs
executables['gn']['libs'].extend(static_libraries.keys())
executables['gn_unittests']['libs'].extend(static_libraries.keys())
WriteGenericNinja(path, static_libraries, executables, cc, cxx, ar, ld,
options, cflags, cflags_cc, ldflags, libflags, include_dirs,
libs)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | 7dfdaf0eeaa112ed78da6d7f83f53d359390503a | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Gams(Package):
"""The General Algebraic Modeling System is a high-level modeling system
for mathematical optimization. GAMS is designed for modeling and solving
linear, nonlinear, and mixed-integer optimization problems."""
homepage = "https://www.gams.com/"
manual_download = True
version('27.2', '4f3f3484a4389661e0522a4cfe0289fd', expand=False)
def url_for_version(self, version):
return "file://{0}/linux_x64_64_sfx.exe".format(os.getcwd())
def setup_run_environment(self, env):
env.prepend_path("PATH", join_path(
self.prefix, 'gams{0}_linux_x64_64_sfx'.format(self.version)))
def install(self, spec, prefix):
os.chmod(join_path(self.stage.source_path,
"linux_x64_64_sfx.exe"), 0o755)
os.system("./linux_x64_64_sfx.exe")
install_tree(join_path(self.stage.source_path,
'gams{0}_linux_x64_64_sfx'
.format(self.version)),
join_path(self.prefix, 'gams{0}_linux_x64_64_sfx'
.format(self.version)))
install('{0}/gamslice.txt'.format(os.getcwd()),
join_path(self.prefix, 'gams{0}_linux_x64_64_sfx'
.format(self.version), 'gamslice.txt'))
|
py | 7dfdaf384321632bfb21f0a17ba4dbbb896a3921 | # Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains the twisted-specific bits for the Couchbase client.
"""
from typing import *
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from couchbase_core.asynchronous.analytics import AsyncAnalyticsRequest
from couchbase.asynchronous import AsyncViewResult, AsyncQueryResultBase, AsyncAnalyticsResultBase, AsyncSearchResult
from couchbase.cluster import Cluster as V3SyncCluster, AsyncCluster as V3AsyncCluster
from couchbase.collection import AsyncCBCollection as BaseAsyncCBCollection
from couchbase_core.asynchronous.events import EventQueue
from couchbase.asynchronous.search import AsyncSearchRequest
from couchbase_core.asynchronous.n1ql import AsyncN1QLRequest
from couchbase_core.asynchronous.view import AsyncViewBase
from couchbase_core.client import Client as CoreClient
from couchbase.exceptions import CouchbaseException
from couchbase_core.supportability import internal
from txcouchbase.iops import v0Iops
from couchbase.bucket import AsyncBucket as V3AsyncBucket
class BatchedRowMixin(object):
def __init__(self, *args, **kwargs):
"""
Iterator/Container object for a single-call row-based results.
This functions as an iterator over all results of the query, once the
query has been completed.
Additional metadata may be obtained by examining the object. See
:class:`~couchbase_core.views.iterator.Views` for more details.
You will normally not need to construct this object manually.
"""
self._d = Deferred()
self.__rows = [] # likely a superlcass might have this?
def _getDeferred(self):
return self._d
def start(self):
super(BatchedRowMixin, self).start()
self.raw.rows_per_call = -1
return self
def on_rows(self, rowiter):
"""
Reimplemented from :meth:`~AsyncViewBase.on_rows`
"""
self.__rows = rowiter
self._d.callback(self)
self._d = None
def on_error(self, ex):
"""
Reimplemented from :meth:`~AsyncViewBase.on_error`
"""
if self._d:
self._d.errback()
self._d = None
def on_done(self):
"""
Reimplemented from :meth:`~AsyncViewBase.on_done`
"""
if self._d:
self._d.callback(self)
self._d = None
def __iter__(self):
"""
Iterate over the rows in this resultset
"""
return iter(self.__rows)
class BatchedView(BatchedRowMixin, AsyncViewBase):
def __init__(self, *args, **kwargs):
AsyncViewBase.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedViewResult(BatchedRowMixin, AsyncViewResult):
def __init__(self, *args, **kwargs):
AsyncViewResult.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedN1QLRequest(BatchedRowMixin, AsyncN1QLRequest):
def __init__(self, *args, **kwargs):
AsyncN1QLRequest.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedQueryResult(BatchedRowMixin, AsyncQueryResultBase):
def __init__(self, *args, **kwargs):
AsyncQueryResultBase.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedAnalyticsRequest(BatchedRowMixin, AsyncAnalyticsRequest):
def __init__(self, *args, **kwargs):
AsyncAnalyticsRequest.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedAnalyticsResult(BatchedRowMixin, AsyncAnalyticsResultBase):
def __init__(self, *args, **kwargs):
AsyncAnalyticsResultBase.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedSearchRequest(BatchedRowMixin, AsyncSearchRequest):
def __init__(self, *args, **kwargs):
AsyncSearchRequest.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class BatchedSearchResult(BatchedRowMixin, AsyncSearchResult):
def __init__(self, *args, **kwargs):
AsyncSearchResult.__init__(self, *args, **kwargs)
BatchedRowMixin.__init__(self, *args, **kwargs)
class TxEventQueue(EventQueue):
"""
Subclass of EventQueue. This implements the relevant firing methods,
treating an 'Event' as a 'Deferred'
"""
def fire_async(self, event):
reactor.callLater(0, event.callback, None)
def call_single_success(self, event, *args, **kwargs):
event.callback(None)
def call_single_failure(self, event, *args, **kwargs):
event.errback(None)
class ConnectionEventQueue(TxEventQueue):
"""
For events fired upon connect
"""
def maybe_raise(self, err, *args, **kwargs):
if not err:
return
raise err
T = TypeVar('T', bound=CoreClient)
class TxRawClientMixin(object):
@internal
def __init__(self, connstr=None, **kwargs):
"""
Client mixin for Twisted. This inherits from an 'AsyncClient' class,
but also adds some twisted-specific logic for hooking on a connection.
"""
if connstr and 'connstr' not in kwargs:
kwargs['connstr'] = connstr
iops = v0Iops(reactor)
super(TxRawClientMixin, self).__init__(iops=iops, **kwargs)
self._evq = {
'connect': ConnectionEventQueue(),
'_dtor': TxEventQueue()
}
self._conncb = self._evq['connect']
self._dtorcb = self._evq['_dtor']
def registerDeferred(self, event, d):
"""
Register a defer to be fired at the firing of a specific event.
:param string event: Currently supported values are `connect`. Another
value may be `_dtor` which will register an event to fire when this
object has been completely destroyed.
:param event: The defered to fire when the event succeeds or failes
:type event: :class:`Deferred`
If this event has already fired, the deferred will be triggered
asynchronously.
Example::
def on_connect(*args):
print("I'm connected")
def on_connect_err(*args):
print("Connection failed")
d = Deferred()
cb.registerDeferred('connect', d)
d.addCallback(on_connect)
d.addErrback(on_connect_err)
:raise: :exc:`ValueError` if the event name is unrecognized
"""
try:
self._evq[event].schedule(d)
except KeyError:
raise ValueError("No such event type", event)
def on_connect(self):
"""
Short-hand for the following idiom::
d = Deferred()
cb.registerDeferred('connect', d)
return d
:return: A :class:`Deferred`
"""
d = Deferred()
self.registerDeferred('connect', d)
return d
def defer(self, opres):
"""
Converts a raw :class:`couchbase_core.results.AsyncResult` object
into a :class:`Deferred`.
This is shorthand for the following "non-idiom"::
d = Deferred()
opres = cb.upsert("foo", "bar")
opres.callback = d.callback
def d_err(res, ex_type, ex_val, ex_tb):
d.errback(opres, ex_type, ex_val, ex_tb)
opres.errback = d_err
return d
:param opres: The operation to wrap
:type opres: :class:`couchbase_core.results.AsyncResult`
:return: a :class:`Deferred` object.
Example::
opres = cb.upsert("foo", "bar")
d = cb.defer(opres)
def on_ok(res):
print("Result OK. Cas: {0}".format(res.cas))
d.addCallback(opres)
"""
d = Deferred()
def _on_err(mres, ex_type, ex_val, ex_tb):
try:
raise ex_type(ex_val)
except CouchbaseException:
d.errback()
opres.set_callbacks(d.callback, _on_err)
return d
def deferred_verb(self, itercls, raw_verb, cooked_verb, *args, **kwargs):
if not self.connected:
cb = lambda x: cooked_verb(*args, **kwargs)
return self.on_connect().addCallback(cb)
kwargs['itercls'] = itercls
o = raw_verb(*args, **kwargs)
o.start()
return o._getDeferred()
connected = CoreClient.connected
class TxDeferredClientMixin(TxRawClientMixin):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "TxDeferred_Wrapped"):
for k, v in cls._gen_memd_wrappers(TxDeferredClientMixin._meth_factory).items():
setattr(cls, k, v)
cls.TxDeferred_Wrapped = True
return super(TxDeferredClientMixin, cls).__new__(cls, *args, **kwargs)
@internal
def __init__(self, *args, **kwargs):
"""
This mixin inherits from :class:`TxRawClientMixin`.
In addition to the connection methods, this class' data access methods
return :class:`Deferreds` instead of :class:`Result` objects.
Operations such as :meth:`get` or :meth:`set` will invoke the
:attr:`Deferred.callback` with the result object when the result is
complete, or they will invoke the :attr:`Deferred.errback` with an
exception (or :class:`Failure`) in case of an error. The rules of the
:attr:`~couchbase_core.client.Client.quiet` attribute for raising
exceptions apply to the invocation of the ``errback``. This means that
in the case where the synchronous client would raise an exception,
the Deferred API will have its ``errback`` invoked. Otherwise, the
result's :attr:`~couchbase_v2.result.Result.success` field should be
inspected.
Likewise multi operations will be invoked with a
:class:`~couchbase.result.MultiResultBase` compatible object.
Some examples:
Using single items::
d_set = cb.upsert("foo", "bar")
d_get = cb.get("foo")
def on_err_common(*args):
print("Got an error: {0}".format(args)),
def on_set_ok(res):
print("Successfuly set key with CAS {0}".format(res.cas))
def on_get_ok(res):
print("Successfuly got key with value {0}".format(res.value))
d_set.addCallback(on_set_ok).addErrback(on_err_common)
d_get.addCallback(on_get_ok).addErrback(on_get_common)
# Note that it is safe to do this as operations performed on the
# same key are *always* performed in the order they were scheduled.
Using multiple items::
d_get = cb.get_multi(("Foo", "bar", "baz"))
def on_mres(mres):
for k, v in mres.items():
print("Got result for key {0}: {1}".format(k, v.value))
d_get.addCallback(on_mres)
"""
super(TxDeferredClientMixin, self).__init__(*args, **kwargs)
def _connectSchedule(self, f, meth, *args, **kwargs):
qop = Deferred()
qop.addCallback(lambda x: f(meth, *args, **kwargs))
self._evq['connect'].schedule(qop)
return qop
def _wrap(self, # type: TxDeferredClient
meth, *args, **kwargs):
"""
Calls a given method with the appropriate arguments, or defers such
a call until the instance has been connected
"""
if not self.connected:
return self._connectSchedule(self._wrap, meth, *args, **kwargs)
opres = meth(self, *args, **kwargs)
return self.defer(opres)
### Generate the methods
@staticmethod
def _meth_factory(meth, _):
def ret(self, *args, **kwargs):
return self._wrap(meth, *args, **kwargs)
return ret
class TxRawCollection(TxRawClientMixin, BaseAsyncCBCollection):
pass
class TxCollection(TxDeferredClientMixin, TxRawCollection):
pass
class TxRawBucket(TxRawClientMixin, V3AsyncBucket):
@internal
def __init__(self, *args, **kwargs):
super(TxRawBucket, self).__init__(collection_factory=kwargs.pop('collection_factory', TxRawCollection), *args, **kwargs)
def view_query_ex(self, viewcls, *args, **kwargs):
"""
Query a view, with the ``viewcls`` instance receiving events
of the query as they arrive.
:param type viewcls: A class (derived from :class:`AsyncViewBase`)
to instantiate
Other arguments are passed to the standard `query` method.
This functions exactly like the :meth:`~couchbase.asynchronous.AsyncClient.query`
method, except it automatically schedules operations if the connection
has not yet been negotiated.
"""
kwargs['itercls'] = viewcls
o = super(TxRawBucket, self).view_query(*args, **kwargs)
if not self.connected:
self.on_connect().addCallback(lambda x: o.start())
else:
o.start()
return o
def view_query(self, *args, **kwargs):
"""
Returns a :class:`Deferred` object which will have its callback invoked
with a :class:`BatchedView` when the results are complete.
Parameters follow conventions of
:meth:`~couchbase_v2.bucket.Bucket.query`.
Example::
d = cb.queryAll("beer", "brewery_beers")
def on_all_rows(rows):
for row in rows:
print("Got row {0}".format(row))
d.addCallback(on_all_rows)
"""
if not self.connected:
cb = lambda x: self.view_query(*args, **kwargs)
return self.on_connect().addCallback(cb)
kwargs['itercls'] = BatchedViewResult
o = super(TxRawBucket, self).view_query(*args, **kwargs)
try:
o.start()
except Exception as e:
raise
return o._getDeferred()
class TxBucket(TxDeferredClientMixin, TxRawBucket):
@internal
def __init__(self, *args, **kwargs):
super(TxBucket,self).__init__(collection_factory=TxCollection, *args, **kwargs)
class TxBaseCluster(TxRawClientMixin, V3AsyncCluster):
def bucket(self, *args, **kwargs):
return super(TxBaseCluster, self).bucket(*args, **kwargs)
class TxRawCluster(TxBaseCluster):
def __init__(self, *args, **kwargs):
super(TxRawCluster, self).__init__(*args, bucket_factory=kwargs.pop('bucket_factory', TxRawBucket), **kwargs)
def query_ex(self, cls, *args, **kwargs):
"""
Execute a N1QL statement providing a custom handler for rows.
This method allows you to define your own subclass (of
:class:`~AsyncN1QLRequest`) which can handle rows as they are
received from the network.
:param cls: The subclass (not instance) to use
:param args: Positional arguments for the class constructor
:param kwargs: Keyword arguments for the class constructor
.. seealso:: :meth:`queryEx`, around which this method wraps
"""
kwargs['itercls'] = cls
o = super(TxRawCluster, self).query(*args, **kwargs)
if not self.connected:
self.on_connect().addCallback(lambda x: o.start())
else:
o.start()
return o
def query(self, *args, **kwargs):
"""
Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase_v2.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase_v2.bucket.Bucket.n1ql_query`
"""
return self.deferred_verb(BatchedQueryResult, super(TxRawCluster,self).query, self.query, *args, **kwargs)
def analytics_query(self, *args, **kwargs):
return self.deferred_verb(BatchedAnalyticsResult, super(TxRawCluster, self).analytics_query, self.analytics_query, *args,
**kwargs)
def search(self, cls, *args, **kwargs):
"""
Experimental Method
Execute a Search query providing a custom handler for rows.
This method allows you to define your own subclass (of
:class:`~AsyncSearchRequest`) which can handle rows as they are
received from the network.
:param cls: The subclass (not instance) to use
:param args: Positional arguments for the class constructor
:param kwargs: Keyword arguments for the class constructor
.. seealso:: :meth:`search`, around which this method wraps
"""
kwargs['itercls'] = cls
o = super(TxRawCluster, self).search_query(*args, **kwargs)
if not self.connected:
self.on_connect().addCallback(lambda x: o.start())
else:
o.start()
return o
def search_query(self, *args, **kwargs):
"""
Experimental Method
Execute a Search query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.SearchRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase_v2.bucket.Bucket.search`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.search('name', ft.MatchQuery('nosql'), limit=10)
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase_v2.bucket.Bucket.search`
"""
if not self.connected:
cb = lambda x: self.search_query(*args, **kwargs)
return self.on_connect().addCallback(cb)
kwargs['itercls'] = BatchedSearchResult
o = super(TxRawCluster, self).search_query(*args, **kwargs)
o.start()
return o._getDeferred()
class TxCluster(TxDeferredClientMixin, TxRawCluster):
def __init__(self, *args, **kwargs):
super(TxCluster, self).__init__(*args, bucket_factory=kwargs.pop('bucket_factory', TxBucket), **kwargs)
class TxSyncCluster(V3SyncCluster):
def __init__(self, *args, **kwargs):
super(TxSyncCluster, self).__init__(*args, bucket_factory=kwargs.pop('bucket_factory', TxBucket), **kwargs)
|
py | 7dfdaf642692f1fe3610092d7b7a9b218bce6837 | # -*- coding: utf-8 -*-
"""Tests for threading tools."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 7dfdaf642692f1fe3610092d7b7a9b218bce6837 $'
from tests.aspects import unittest, TestCase
from pywikibot.tools import ThreadedGenerator, intersect_generators
class BasicThreadedGeneratorTestCase(TestCase):
"""ThreadedGenerator test cases."""
net = False
def test_run_from_iterable(self):
"""Test thread running with iterable target."""
iterable = 'abcd'
thd_gen = ThreadedGenerator(target=iterable)
thd_gen.start()
self.assertEqual(list(thd_gen), list(iterable))
def gen_func(self):
"""Helper method for generator test."""
iterable = 'abcd'
for i in iterable:
yield i
def test_run_from_gen_function(self):
"""Test thread running with generator as target."""
iterable = 'abcd'
thd_gen = ThreadedGenerator(target=self.gen_func)
thd_gen.start()
self.assertEqual(list(thd_gen), list(iterable))
class GeneratorIntersectTestCase(TestCase):
"""Base class for intersect_generators test cases."""
def assertEqualItertools(self, gens):
"""Assert intersect_generators result is same as set intersection."""
# If they are a generator, we need to convert to a list
# first otherwise the generator is empty the second time.
datasets = [list(gen) for gen in gens]
set_result = set(datasets[0]).intersection(*datasets[1:])
result = list(intersect_generators(datasets))
self.assertCountEqual(set(result), result)
self.assertCountEqual(result, set_result)
class BasicGeneratorIntersectTestCase(GeneratorIntersectTestCase):
"""Disconnected intersect_generators test cases."""
net = False
def test_intersect_basic(self):
"""Test basic interset without duplicates."""
self.assertEqualItertools(['abc', 'db', 'ba'])
def test_intersect_with_dups(self):
"""Test basic interset with duplicates."""
self.assertEqualItertools(['aabc', 'dddb', 'baa'])
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
py | 7dfdaf8b9a07a2d56036aeeec7753ead5efe0315 | # -*- coding: utf-8 -*-
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import json
import os
from collections import defaultdict
from urllib.error import HTTPError
from urllib.request import urlopen
import numpy as np
import pandas as pd
import xlrd
from tqdm.auto import tqdm
from eevalue.definitions import ROOT_DIR
############## Data download ##############################################################
###########################################################################################
def fetch_data(period, filetype, destination, suffix):
"""General data downloader from ADMIE (Greek TSO)
Parameters
----------
period: List of datetime dates or pandas DatetimeIndex with freq='D'
filetype: String. See https://www.admie.gr/getFiletypeInfoEN for valid values
destination: String. The path to the folder for storing the downloaded data
suffix: String. The name to add at each stored file (after the date)
Example:
period = pd.date_range(start=datetime.date(2019, 12, 14),
end=datetime.date(2020, 12, 31),
freq='D')
fetch_admie_data(period, 'DayAheadSchedulingUnitAvailabilities',
'availabilities', 'Availability.xls')
Returns
-------
list of dates for which downloading failed
"""
path = os.path.join(ROOT_DIR, "RawData/EL/{}".format(destination))
if not os.path.exists(path):
os.makedirs(path)
missing_days = []
pbar = tqdm(total=len(period))
for day in period:
file_path = None
sday = day.strftime("%Y-%m-%d")
try:
response = urlopen(
"https://www.admie.gr/getOperationMarketFile?"
f"dateStart={sday}&dateEnd={sday}&FileCategory={filetype}"
)
except HTTPError:
continue
else:
response = json.loads(response.read().decode("utf-8"))
if len(response) > 0:
file_path = response[0]["file_path"]
if file_path is not None:
try:
f = urlopen(file_path)
except HTTPError:
missing_days.append(day)
continue
else:
missing_days.append(day)
continue
sday = day.strftime("%Y%m%d")
with open(os.path.join(path, sday + suffix), "wb") as stream:
stream.write(f.read())
pbar.update(1)
pbar.close()
return missing_days
############## Data aggregation into datasets #############################################
###########################################################################################
def aggregate_load(period):
path = os.path.join(ROOT_DIR, "RawData/EL/day_ahead_results")
pbar = tqdm(total=len(period))
data = {}
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}_DAS")
idx = sheet.col_values(0).index("LOAD DECLARATIONS + LOSSES")
data[day] = sheet.row_values(idx)[1:25]
pbar.update(1)
pbar.close()
result = pd.DataFrame.from_dict(data, orient="index", columns=range(24))
result = result.stack()
result.index = result.index.map(lambda x: x[0].replace(hour=int(x[1])))
result = result.to_frame("Total load [MW]")
path = os.path.join(ROOT_DIR, "DataSets/Load/EL")
result.to_csv(os.path.join(path, "load.csv"))
def aggregate_imports(period):
path = os.path.join(ROOT_DIR, "RawData/EL/day_ahead_results")
pbar = tqdm(total=len(period))
data = {}
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}_DAS")
names = sheet.col_values(0)
start = names.index("NET BORDER SCHEDULES") + 1
end = names.index("BORDER IMPORTS")
res = 0
for i in range(start, end):
try:
res = res + np.array(sheet.row_values(i)[1:25])
except np.core._exceptions.UFuncTypeError:
res = res + np.pad(
np.array(sheet.row_values(i)[1:24]), (0, 1), "constant"
)
data[day] = (
-1
) * res # ADMIE uses negative sign for imports and positive for exports
pbar.update(1)
pbar.close()
result = pd.DataFrame.from_dict(data, orient="index", columns=range(24))
result = result.stack()
result.index = result.index.map(lambda x: x[0].replace(hour=int(x[1])))
result = result.to_frame("Net imports [MW]")
path = os.path.join(ROOT_DIR, "DataSets/NetImports/EL")
result.to_csv(os.path.join(path, "imports.csv"))
def aggregate_secondary_reserves(period):
path = os.path.join(ROOT_DIR, "RawData/EL/day_ahead_results")
pbar = tqdm(total=len(period))
data_2u = {}
data_2d = {}
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}_SecondaryReserve")
names = sheet.col_values(0)
data_2u[day] = sheet.row_values(names.index("Up - Requirement"))[2:26]
data_2d[day] = sheet.row_values(names.index("Dn - Requirement"))[2:26]
pbar.update(1)
pbar.close()
data_2u = (
pd.DataFrame.from_dict(data_2u, orient="index", columns=range(24))
.stack()
.to_frame("2U")
)
data_2u.index = data_2u.index.map(lambda x: x[0].replace(hour=int(x[1])))
data_2d = (
pd.DataFrame.from_dict(data_2d, orient="index", columns=range(24))
.stack()
.to_frame("2D")
)
data_2d.index = data_2d.index.map(lambda x: x[0].replace(hour=int(x[1])))
result = pd.concat([data_2u, data_2d], axis=1)
path = os.path.join(ROOT_DIR, "DataSets/Load/EL")
result.to_csv(os.path.join(path, "reserves.csv"))
def aggregate_committed_capacity(period):
plants = pd.read_csv(os.path.join(ROOT_DIR, "DataSets/PowerPlants/EL/plants.csv"))
units = (
plants[["Technology", "Fuel", "Unit"]]
.groupby(["Technology", "Fuel"])["Unit"]
.apply(list)
.to_dict()
)
for key, unit_names in units.items():
if "THESAVROS" in unit_names:
units[key].remove("THESAVROS")
units[key].extend(["THESAVROS1", "THESAVROS2", "THESAVROS3"])
break
path = os.path.join(ROOT_DIR, "RawData/EL/day_ahead_results")
pbar = tqdm(total=len(period))
data = defaultdict(dict)
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}_DAS")
all_names = sheet.col_values(0)
for key, unit_names in units.items():
both = set(unit_names).intersection(all_names)
idx = [all_names.index(x) for x in both]
result = 0
for i in idx:
try:
result += np.array(sheet.row_values(i)[1:25])
except np.core._exceptions.UFuncTypeError:
result += np.pad(
np.array(sheet.row_values(i)[1:24]), (0, 1), "constant"
)
data[key][day] = result
pbar.update(1)
pbar.close()
path = os.path.join(ROOT_DIR, "DataSets/Generation/EL")
for item in units:
tech, fuel = item
result = pd.DataFrame.from_dict(data[item], "index")
result = result.stack()
result.index = result.index.map(lambda x: x[0].replace(hour=int(x[1])))
result = result.to_frame("Capacity committed [MW]")
result.to_csv(os.path.join(path, f"{tech}_{fuel}.csv"))
def aggregate_available_capacity(period):
plants = pd.read_csv(os.path.join(ROOT_DIR, "DataSets/PowerPlants/EL/plants.csv"))
combinations = set(zip(plants["Technology"], plants["Fuel"]))
total_capacity = dict()
for item in combinations:
tech, fuel = item
subset = plants[(plants["Technology"] == tech) & (plants["Fuel"] == fuel)]
total_capacity[item] = subset["PowerCapacity"].sum()
units = (
plants[["Technology", "Fuel", "Unit"]]
.groupby(["Technology", "Fuel"])["Unit"]
.apply(list)
.to_dict()
)
path = os.path.join(ROOT_DIR, "RawData/EL/availabilities")
pbar = tqdm(total=len(period))
data = defaultdict(dict)
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name("Unit_MaxAvail_Publish")
for item in combinations:
subset = units[item]
subset_idx = [
i for i, plant in enumerate(sheet.col_values(1)) if plant in subset
]
data[item][day] = {
"Available capacity [MW]": sum(
[float(sheet.cell(i, 3).value) for i in subset_idx]
)
}
pbar.update(1)
pbar.close()
path = os.path.join(ROOT_DIR, "DataSets/Capacity/EL")
for item in combinations:
tech, fuel = item
result = pd.DataFrame.from_dict(data[item], orient="index")
result["Nominal capacity [MW]"] = total_capacity[item]
result.to_csv(os.path.join(path, f"{tech}_{fuel}.csv"))
def aggregate_available_hydro(period):
plants = pd.read_csv(os.path.join(ROOT_DIR, "DataSets/PowerPlants/EL/plants.csv"))
hydro_plants = plants[plants["Technology"] == "HDR"]["Unit"].tolist()
path = os.path.join(ROOT_DIR, "RawData/EL/availabilities")
pbar = tqdm(total=len(period))
data = defaultdict(dict)
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name("Unit_MaxAvail_Publish")
subset_idx = [
i for i, plant in enumerate(sheet.col_values(1)) if plant in hydro_plants
]
values = {}
for i in subset_idx:
unit = sheet.cell(i, 1).value
values[unit] = float(sheet.cell(i, 3).value)
data[day] = values
pbar.update(1)
pbar.close()
result = pd.DataFrame.from_dict(data, orient="index")
path = os.path.join(ROOT_DIR, "DataSets/Hydro/EL")
result.to_csv(os.path.join(path, "availability.csv"))
def aggregate_res_generation(period):
path = os.path.join(ROOT_DIR, "RawData/EL/res_forecast")
pbar = tqdm(total=len(period))
data = {}
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}_RES_Forecast")
idx = sheet.col_values(0).index("Total System")
data[day] = sheet.row_values(idx)[1:25]
pbar.update(1)
pbar.close()
result = pd.DataFrame.from_dict(data, orient="index", columns=range(24))
result = result.stack()
result.index = result.index.map(lambda x: x[0].replace(hour=int(x[1])))
result = result.to_frame("RES generation [MW]")
path = os.path.join(ROOT_DIR, "DataSets/Generation/EL")
result.to_csv(os.path.join(path, "RES.csv"))
def aggregate_reservoir_levels(period):
plants = pd.read_csv(os.path.join(ROOT_DIR, "DataSets/PowerPlants/EL/plants.csv"))
hydro_plants = plants[plants["Technology"] == "HDR"]["Unit"].tolist()
hydro_plants.remove("THESAVROS")
hydro_plants.extend(["THESAVROS1", "THESAVROS2", "THESAVROS3"])
path = os.path.join(ROOT_DIR, "RawData/EL/reservoirs")
pbar = tqdm(total=len(period))
data = {}
data_avg = {}
for day in period:
sday = day.strftime("%Y%m%d")
xlfile = glob.glob(os.path.join(path, sday) + "*")
if len(xlfile) == 0:
continue
else:
try:
book = xlrd.open_workbook(xlfile[0], formatting_info=True)
except xlrd.XLRDError:
continue
sheet = book.sheet_by_name(f"{sday}ReservoirFillingRate")
idx = [
i for i, plant in enumerate(sheet.col_values(1)) if plant in hydro_plants
]
values = {}
for i in idx:
unit = sheet.cell(i, 1).value
try:
rate = float(sheet.cell(i, 2).value)
except ValueError:
rate = np.nan
finally:
values[unit] = rate
data[day] = values
data_avg[day] = sheet.cell(2, 10).value
pbar.update(1)
pbar.close()
result = pd.DataFrame.from_dict(data, orient="index")
result["THESAVROS"] = result[["THESAVROS1", "THESAVROS2", "THESAVROS3"]].mean(
axis=1
)
result = result.drop(["THESAVROS1", "THESAVROS2", "THESAVROS3"], axis=1)
path = os.path.join(ROOT_DIR, "DataSets/Hydro/EL")
result.to_csv(os.path.join(path, "reservoirs.csv"))
result = pd.DataFrame.from_dict(
data_avg, orient="index", columns=["Filling Rate [%]"]
)
result.to_csv(os.path.join(path, "reservoirs_avg.csv"))
|
py | 7dfdafbc6e6efb2fb1704b8bbc10239ac4c9b162 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-achievement-service ()
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ResponseError(Model):
"""Response error (response.Error)
Properties:
error_code: (errorCode) REQUIRED int
error_message: (errorMessage) REQUIRED str
"""
# region fields
error_code: int # REQUIRED
error_message: str # REQUIRED
# endregion fields
# region with_x methods
def with_error_code(self, value: int) -> ResponseError:
self.error_code = value
return self
def with_error_message(self, value: str) -> ResponseError:
self.error_message = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "error_code"):
result["errorCode"] = int(self.error_code)
elif include_empty:
result["errorCode"] = 0
if hasattr(self, "error_message"):
result["errorMessage"] = str(self.error_message)
elif include_empty:
result["errorMessage"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
error_code: int,
error_message: str,
) -> ResponseError:
instance = cls()
instance.error_code = error_code
instance.error_message = error_message
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ResponseError:
instance = cls()
if not dict_:
return instance
if "errorCode" in dict_ and dict_["errorCode"] is not None:
instance.error_code = int(dict_["errorCode"])
elif include_empty:
instance.error_code = 0
if "errorMessage" in dict_ and dict_["errorMessage"] is not None:
instance.error_message = str(dict_["errorMessage"])
elif include_empty:
instance.error_message = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, ResponseError]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[ResponseError]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[ResponseError, List[ResponseError], Dict[Any, ResponseError]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"errorCode": "error_code",
"errorMessage": "error_message",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"errorCode": True,
"errorMessage": True,
}
# endregion static methods
|
py | 7dfdafe036f582ad49c389f781b40659c843ca93 | #coding: utf-8
import re
import json
from urllib.parse import urlparse
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
from scrapy.utils.response import get_base_url
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor as sle
from antspi.items import *
from misc.log import *
'''
1. 默认取sel.css()[0],如否则需要'__unique':false
2. 默认字典均为css解析,如否则需要'__use':'dump'表明是用于dump数据
'''
class ZhihuSpider(CrawlSpider):
name = "zhihu"
allowed_domains = ["zhihu.com"]
start_urls = [
"http://www.zhihu.com/",
"https://www.zhihu.com/people/edonym/activities",
]
rules = [
Rule(sle(allow=("/people/[^/]+/followees$")), callback='parse_followees'),
Rule(sle(allow=("/people/[^/]+/followers$", )), callback='parse_followers'),
Rule(sle(allow=("/people/[^/]+$", )), callback='parse_people_with_rules', follow=True),
]
# need dfs/bfs
all_css_rules = {
'.zm-profile-header': {
'.zm-profile-header-main': {
'__use':'dump',
'name':'.title-section .name::text',
'sign':'.title-section .bio::text',
'location':'.location.item::text',
'business':'.business.item::text',
'employment':'.employment.item::text',
'position':'.position.item::text',
'education':'.education.item::text',
'education_extra':'.education-extra.item::text',
}, '.zm-profile-header-operation': {
'__use':'dump',
'agree':'.zm-profile-header-user-agree strong::text',
'thanks':'.zm-profile-header-user-thanks strong::text',
}, '.profile-navbar': {
'__use':'dump',
'asks':'a[href*=asks] .num::text',
'answers':'a[href*=answers] .num::text',
'posts':'a[href*=posts] .num::text',
'collections':'a[href*=collections] .num::text',
'logs':'a[href*=logs] .num::text',
},
}, '.zm-profile-side-following': {
'__use':'dump',
'followees':'a.item[href*=followees] strong::text',
'followers':'a.item[href*=followers] strong::text',
}
}
def traversal(self, sel, rules, item):
# print 'traversal:', sel, rules.keys()
if '__use' in rules:
for nk, nv in rules.items():
if nk == '__use':
continue
if nk not in item:
item[nk] = []
if sel.css(nv):
item[nk] += [i.extract() for i in sel.css(nv)]
else:
item[nk] = []
else:
for nk, nv in rules.items():
for i in sel.css(nk):
self.traversal(i, nv, item)
def dfs(self, sel, rules, item_class):
if sel is None:
return []
item = item_class()
self.traversal(sel, rules, item)
return item
def parse(self, response):
# do something
cookies = dict(d_c0="AGAA7VC1SAqPTplBGfV6WtVRNLv2uElx4qM=|1469458060",
_za="5909eb78-6167-47ef-ac04-6adf2b9f97c1",
_zap="6171804f-4acc-4c4f-8605-d622a016fa6c",
_xsrf="d0b2766e263c8639aefea98d0664245f",
__utma="155987696.1214355691.1484310193.1484310193.1484310193.1",
__utmv="155987696.100-1|2=registration_date=20140516=1^3=entry_date=20140516=1",
__utmz="155987696.1484310193.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)")
yield scrapy.Request(url= "https://www.zhihu.com/people/edonym/activities",
cookies=cookies,
callback= self.parse
)
def parse_with_rules(self, response, rules, item_class):
return self.dfs(Selector(response), rules, item_class)
def parse_people_with_rules(self, response):
item = self.parse_with_rules(response, self.all_css_rules, ZhihuPeopleItem)
item['id'] = urlparse(response.url).path.split('/')[-1]
info('Parsed '+response.url) # +' to '+str(item))
return item
def parse_followers(self, response):
return self.parse_people_with_rules(response)
def parse_followees(self, response):
return self.parse_people_with_rules(response)
|
py | 7dfdafec6d42ce78f0b9a45879f5db180da474c6 | # -*- coding: utf-8 -*-
# @Time : 2022/3/19 8:55 上午
# @Author : Kevin
# @File : __init__.py.py
# @Software: PyCharm
|
py | 7dfdb0732e636b4aed593b142127412554430a76 | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Tests functions in the shapes module.
"""
# third party
from django.test import TestCase
from django.contrib.gis.geos import Point, Polygon, MultiPolygon
from geopy.distance import vincenty
# local
from target.locations.models import Location
from tests.fixture_manager import get_fixtures
from utils.geometry import shapes, units
from utils.geometry.bounds import Bounds
class ReverseCoordinateOrderTestCase(TestCase):
"""
Test class for function reverse_coordinate_order.
"""
def test_reverse_coordinates_2tuple(self):
"""
Tests the function reverse_coordinate_order for a 2-tuple.
"""
reversed_order = shapes.reverse_coordinate_order((0, 1))
self.assertEqual(reversed_order[0], 1)
self.assertEqual(reversed_order[1], 0)
def test_reverse_coordinates_4tuple(self):
"""
Tests the function reverse_coordinate_order for a 4-tuple.
"""
with self.assertRaises(ValueError):
shapes.reverse_coordinate_order((1, 2, 3, 1))
class TestIsRectangleTestCase(TestCase):
"""
Test class for function is_rectangle.
"""
def test_for_clockwise_rectangle(self):
"""
Tests the function is_rectangle for a clockwise rectangle.
"""
rectangle = ((
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
))
self.assertEqual(shapes.is_rectangle(rectangle), True)
def test_for_counterclockwise(self):
"""
Tests the function is_rectangle for a counterclockwise rectangle.
"""
rectangle = ((
(0.0, 0.0),
(50.0, 0.0),
(50.0, 50.0),
(0.0, 50.0),
(0.0, 0.0)
))
self.assertEqual(shapes.is_rectangle(rectangle), True)
def test_less_than_four_segments(self):
"""
Tests the function is_rectangle for fewer than four line segments.
"""
# a triangle
coords = (
(0.0, 0.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(coords), False)
def test_more_than_four_segments(self):
"""
Tests the function is_rectangle for more than four line segments.
"""
# a pentagon
coords = (
(0.0, 0.0),
(0.0, 50.0),
(25.0, 75.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(coords), False)
def test_not_closed(self):
"""
Tests the function is_rectangle for a series of coordinates that are
not closed.
"""
coords = (
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 1.0) # doesn't match first point
)
self.assertEqual(shapes.is_rectangle(coords), False)
def test_top_bottom_not_parallel(self):
"""
Tests the function is_rectangle for a polygon whose top and bottom sides
are not parallel.
"""
# top crooked
polygon = (
(0.0, 0.0),
(0.0, 25.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
# botton crooked
polygon = (
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 25.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
def test_left_right_not_parallel(self):
"""
Tests the function is_rectangle for a polygon whose left and right sides
are not parallel.
"""
# left side crooked
polygon = (
(0.0, 0.0),
(25.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
# right side crooked
polygon = (
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(25.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
def test_no_width(self):
"""
Tests the function is_rectangle for a polygon whose bottom or top is
a point.
"""
# top is point
polygon = (
(0.0, 0.0),
(0.0, 50.0),
(0.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
# bottom is point
polygon = (
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(0.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
def test_no_height(self):
"""
Tests the function is_rectangle for a polygon whose side is a point.
"""
# left side is point
polygon = (
(0.0, 0.0),
(0.0, 0.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
# right side is point
polygon = (
(0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 50.0),
(0.0, 0.0)
)
self.assertEqual(shapes.is_rectangle(polygon), False)
class CircleToRectangleTestCase(TestCase):
"""
Test class for the function convert_circle_to_rectangle.
"""
def test_at_equator(self):
"""
Tests the function convert_circle_to_rectangle at the Equator.
This makes it easier to calculate expected values.
"""
coord = (-78.0, 0)
radius_m = 10000
square = shapes.convert_circle_to_rectangle(coord, radius_m)
# check that the shape is correct
self.assertEqual(shapes.is_rectangle(square[0]), True,
'Shape is not a rectangle')
# check that the rectangle center is at the original point
bounds = Bounds(*square.extent)
self.assertAlmostEqual(square.centroid[0], coord[0], places=6,
msg='The center longitude is incorrect')
self.assertAlmostEqual(square.centroid[1], coord[1], places=6,
msg='The center latitude is incorrect')
# check that rectangle has the correct area
area_in_sq_m = bounds.center_width_m * bounds.height_m
actual_sq_km = units.sq_meters_to_sq_km(area_in_sq_m)
expected_sq_km = (2 * radius_m / 1000)**2
self.assertAlmostEqual(actual_sq_km, expected_sq_km, delta=0.001)
# check that the rectangle contains the circle
radius_in_degrees = units.convert_meters_to_degrees(radius_m, 'down')
point = Point(coord)
circle = point.buffer(radius_in_degrees)
self.assertTrue(square.contains(circle))
class CalculateFarthestDistanceTestCase(TestCase):
"""
Test class for the function calculate_farthest_dist_km.
"""
def test_dist_for_triangle(self):
"""
Tests the function calculate_farthest_dist_km for a triangle.
"""
points = ((0, 10), (0, 20), (20, 15))
target = (0, 15)
actual = shapes.calculate_farthest_dist_km(points, target)
expected = vincenty((15, 20), (15, 0)).kilometers
self.assertEqual(actual, expected)
def test_dist_for_empty_tuple(self):
"""
Tests the function calculate_farthest_dist_km for an empty tuple.
"""
points = ()
target = (0, 15)
actual = shapes.calculate_farthest_dist_km(points, target)
expected = 0
self.assertEqual(actual, expected)
class CalculateRadiusTestCase(TestCase):
"""
Base test class for polygon and multipoylgon radius calculators.
"""
fixtures = get_fixtures(['locations'])
class CalculatePolygonRadiusTestCase(CalculateRadiusTestCase):
"""
Test class for the function calculate_polygon_radius_km.
"""
def test_radius_for_rectangle(self):
"""
Tests the function calculate_polygon_radius_km for a rectangle.
"""
rectangle = Location.objects.get(pk=3)
actual = shapes.calculate_polygon_radius_km(rectangle.geom)
point = shapes.reverse_coordinate_order(rectangle.geom.centroid)
expected = vincenty(point, (0, 1)).kilometers
self.assertEqual(actual, expected)
def test_radius_for_polygon(self):
"""
Tests the function calculate_polygon_radius_km for a nonrectangular
polygon.
"""
polygon = Location.objects.get(pk=7)
actual = shapes.calculate_polygon_radius_km(polygon.geom)
point = shapes.reverse_coordinate_order(polygon.geom.centroid)
expected = vincenty(point, (8, 0)).kilometers
self.assertEqual(actual, expected)
class CalculateMultiPolygonRadiusTestCase(CalculateRadiusTestCase):
"""
Test class for the calculate_multipoly_radius_km function.
"""
def test_radius_for_multipolygon(self):
"""
Test case for a rectangle.
"""
multipolygon = Location.objects.get(pk=8)
actual = shapes.calculate_multipoly_radius_km(multipolygon.geom)
point = shapes.reverse_coordinate_order(multipolygon.geom.centroid)
expected = vincenty(point, (20, 50)).kilometers
self.assertEqual(actual, expected)
class FactorPolygonTestCase(TestCase):
"""
Tests factor_polygon_into_circles function.
"""
def _process_parameters(self, coord, radius_m):
"""
Helper function to process test parameters through
the factor_polygon_into_circles function.
"""
radius_km = units.meters_to_km(radius_m)
polygon = Polygon(coord)
points = shapes.factor_polygon_into_circles(polygon, radius_km)
# take the generated points and turn them into "circles" (polygons)
radius_in_deg = units.convert_meters_to_degrees(radius_m)
circles = [Point(point).buffer(radius_in_deg) for point in points]
# convert the list of circles into a multipolyon and merge them
merged_circles = MultiPolygon(circles).cascaded_union
# make sure the merged circles have no holes and completely cover
# the original polygon
self.assertTrue(merged_circles.num_interior_rings == 0,
'The merged circles had %s holes but should have none'
% merged_circles.num_interior_rings)
self.assertTrue(merged_circles.prepared.covers(polygon),
'The merged circles do not cover the polygon')
def test_around_equator_1km_radius(self):
"""
Tests the function factor_polygon_into_circles for a location around the
Equator and a radius of 1 km.
"""
coord = ((0.0, -0.01), (0.0, 0.01),
(0.025, 0.01), (0.05, -0.01), (0.0, -0.01))
radius_m = 1000
self._process_parameters(coord, radius_m)
def test_nothern_lat(self):
"""
Tests the function factor_polygon_into_circles for a location at a
northern latitude and a radius of 1 km.
"""
coord = ((-78.0, 0.23), (-78.0, 0.25),
(-79.5, 0.25), (-79.5, 0.23), (-78.0, 0.23))
radius_m = 1000
self._process_parameters(coord, radius_m)
def test_nothern_lat_5km_radius(self):
"""
Tests the function factor_polygon_into_circles for a location at a
northern latitude and a radius of 5 km.
"""
coord = ((-78.0, 0.23), (-78.0, 0.25),
(-79.5, 0.25), (-79.5, 0.23), (-78.0, 0.23))
radius_m = 5000
self._process_parameters(coord, radius_m)
def test_far_nothern_lat_5km_radius(self):
"""
Tests the function factor_polygon_into_circles for a location at a far
northern latitude and a radius of 5 km.
"""
coord = ((-78.0, 0.43), (-78.0, 0.45),
(-79.5, 0.45), (-79.5, 0.43), (-78.0, 0.43))
radius_m = 5000
self._process_parameters(coord, radius_m)
def test_far_south_1000km_radius(self):
"""
Tests the function factor_polygon_into_circles for a location at a far
southern latitude and a radius of 1000 km.
"""
coord = ((0.0, -0.43), (0.0, -0.45),
(1.0, -0.45), (1.0, -0.43), (0.0, -0.43))
radius_m = 1000000
self._process_parameters(coord, radius_m)
def test_radius_too_small(self):
"""
Tests the function factor_polygon_into_circles for a radius of 10 m.
"""
coord = ((0.0, -0.01), (0.0, 0.01),
(0.025, 0.01), (0.05, -0.01), (0.0, -0.01))
radius_km = 0.01
with self.assertRaises(ValueError):
polygon = Polygon(coord)
shapes.factor_polygon_into_circles(polygon, radius_km)
class ConvertToPointTestCase(TestCase):
"""
Test class for the function convert_to_point function.
"""
def test_convert_dict_to_point(self):
"""
Tests the convert_to_point function with a dictionary.
"""
point = shapes.convert_to_point({'lat': 40, 'lon': 100}, 'lon')
self.assertEqual(point[0], 100)
self.assertEqual(point[1], 40)
def test_convert_lat_list_to_point(self):
"""
Tests the convert_to_point function with a list that starts with
latitude.
"""
point = shapes.convert_to_point([40, 100], 'lat')
self.assertEqual(point[0], 100)
self.assertEqual(point[1], 40)
def test_convert_lon_list_to_point(self):
"""
Tests the convert_to_point function with a list that starts with
longitude.
"""
point = shapes.convert_to_point([100, 40], 'lon')
self.assertEqual(point[0], 100)
self.assertEqual(point[1], 40)
|
py | 7dfdb1c60f7f45bac405372c90a2fb23a63cc3a7 | # Copyright (c) 2017 LSD - UFCG.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from controller.utils.locator.instance import InstanceLocator
from controller.utils.ssh import SSHUtils
from mock.mock import MagicMock
from controller.exceptions.kvm import InstanceNotFoundException
class TestInstanceLocator(unittest.TestCase):
def setUp(self):
self.compute_1 = "compute1"
self.compute_2 = "compute2"
self.user = "root"
self.vm_id = "vm-id"
self.ssh_utils = SSHUtils({})
self.compute_nodes = [self.compute_1, self.compute_2]
self.compute_nodes_key = "key"
self.instance_locator = InstanceLocator(
self.ssh_utils, self.compute_nodes, self.compute_nodes_key)
def tearDown(self):
pass
def located(self, command, user, host, key):
return {self.compute_1: "0\n", self.compute_2: "1\n"}[host]
def impossible_to_locate(self, command, user, host, key):
return {self.compute_1: "1\n", self.compute_2: "1\n"}[host]
def test_locate(self):
self.ssh_utils.run_and_get_result = MagicMock()
self.ssh_utils.run_and_get_result.side_effect = self.located
result = self.instance_locator.locate(self.vm_id)
self.ssh_utils.run_and_get_result.assert_any_call("virsh schedinfo %s > /dev/null 2> /dev/null ; echo $?" %
(self.vm_id),
self.user, self.compute_1, self.compute_nodes_key)
self.assertEquals(result, self.compute_1)
def test_locate_impossible_to_find_instance(self):
self.ssh_utils.run_and_get_result = MagicMock()
self.ssh_utils.run_and_get_result.side_effect = self.impossible_to_locate
self.assertRaises(InstanceNotFoundException,
self.instance_locator.locate, self.vm_id)
self.ssh_utils.run_and_get_result.assert_any_call("virsh schedinfo %s > /dev/null 2> /dev/null ; echo $?" %
(self.vm_id), self.user, self.compute_1,
self.compute_nodes_key)
self.ssh_utils.run_and_get_result.assert_any_call("virsh schedinfo %s > /dev/null 2> /dev/null ; echo $?" %
(self.vm_id), self.user,
self.compute_2, self.compute_nodes_key)
if __name__ == "__main__":
unittest.main()
|
py | 7dfdb2810d16717c8eba164e7d23b3642ede8220 | from django.db.models import CharField
# from django_fake_model import models as f
from global_id.models.mixins import GUIDMixin
class FakeChat(GUIDMixin):
name = CharField(max_length=255, blank=True,
null=True)
|
py | 7dfdb2975b06b468c2100536c223c2f70f64ce35 | from .decorators import get_request
class Comments(object):
"""
评论读取接口
"""
_SHOW = 'https://api.weibo.com/2/comments/show.json'
_BY_ME = 'https://api.weibo.com/2/comments/by_me.json'
_TO_ME = 'https://api.weibo.com/2/comments/to_me.json'
_TIMELINE = 'https://api.weibo.com/2/comments/timeline.json'
_MENTIONS = 'https://api.weibo.com/2/comments/mentions.json'
_SHOW_BATCH = 'https://api.weibo.com/2/comments/show_batch.json'
def __init__(self, oauth):
self.oauth = oauth
@get_request
def show(self,
id: int,
since_id: int=0,
max_id: int=0,
count: int=20,
page: int=1,
filter_by_author: int=0):
"""
根据微博ID返回某条微博的评论列表
:param id: 需要查询的微博ID。
:param since_id: 若指定此参数,则返回ID比since_id大的微博(即比since_id时间晚的微博),默认为0。
:param max_id: 若指定此参数,则返回ID小于或等于max_id的微博,默认为0。
:param count: 单页返回的记录条数,最大不超过200,默认为20。
:param page: 返回结果的页码,默认为1。
:param filter_by_author: 作者筛选类型,0:全部、1:我关注的人、2:陌生人,默认为0。
只返回授权用户的评论,非授权用户的评论将不返回
:return: content, status_code
"""
...
@get_request
def by_me(self,
since_id: int=0,
max_id: int=0,
count: int=50,
page: int=1,
filter_by_source: int=0):
"""
获取当前登录用户所发出的评论列表
:param since_id: 若指定此参数,则返回ID比since_id大的评论(即比since_id时间晚的评论),默认为0。
:param max_id: 若指定此参数,则返回ID小于或等于max_id的评论,默认为0。
:param count: 单页返回的记录条数,默认为50。
:param page: 返回结果的页码,默认为1。
:param filter_by_source: 来源筛选类型,0:全部、1:来自微博的评论、2:来自微群的评论,默认为0。
:return: content, status_code
"""
...
@get_request
def to_me(self,
since_id: int=0,
max_id: int=0,
count: int=50,
page: int=1,
filter_by_author: int=0,
filter_by_source: int=0):
"""
获取当前登录用户所发出的评论列表
:param since_id: 若指定此参数,则返回ID比since_id大的评论(即比since_id时间晚的评论),默认为0。
:param max_id: 若指定此参数,则返回ID小于或等于max_id的评论,默认为0。
:param count: 单页返回的记录条数,默认为50。
:param page: 返回结果的页码,默认为1。
:param filter_by_author: 作者筛选类型,0:全部、1:我关注的人、2:陌生人,默认为0。
:param filter_by_source: 来源筛选类型,0:全部、1:来自微博的评论、2:来自微群的评论,默认为0。
只返回授权用户的评论,非授权用户的评论将不返回
:return: content, status_code
"""
...
@get_request
def timeline(self,
since_id: int=0,
max_id: int=0,
count: int=20,
page: int=1,
trim_user: int=0):
"""
获取当前登录用户的最新评论包括接收到的与发出的
:param since_id: 若指定此参数,则返回ID比since_id大的评论(即比since_id时间晚的评论),默认为0。
:param max_id: 若指定此参数,则返回ID小于或等于max_id的评论,默认为0。
:param count: 单页返回的记录条数,默认为50。
:param page: 返回结果的页码,默认为1。
:param trim_user: 返回值中user字段开关,0:返回完整user字段、1:user字段仅返回user_id,默认为0。
只返回授权用户的评论,非授权用户的评论将不返回
:return: content, status_code
"""
...
@get_request
def mentions(self,
since_id: int=0,
max_id: int=0,
count: int=50,
page: int=1,
filter_by_author: int=0,
filter_by_source: int=0):
"""
获取当前登录用户所发出的评论列表
:param since_id: 若指定此参数,则返回ID比since_id大的评论(即比since_id时间晚的评论),默认为0。
:param max_id: 若指定此参数,则返回ID小于或等于max_id的评论,默认为0。
:param count: 单页返回的记录条数,默认为50。
:param page: 返回结果的页码,默认为1。
:param filter_by_author: 作者筛选类型,0:全部、1:我关注的人、2:陌生人,默认为0。
:param filter_by_source: 来源筛选类型,0:全部、1:来自微博的评论、2:来自微群的评论,默认为0。
只返回授权用户的评论,非授权用户的评论将不返回
:return: content, status_code
"""
...
@get_request
def show_batch(self, cids: int):
"""
根据评论ID批量返回评论信息
:param cids: 需要查询的批量评论ID,用半角逗号分隔,最大50。
:return: content, status_code
"""
...
|
py | 7dfdb2fc73c907fa82eda628257e0706186f355c | """prodai URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from django.conf import settings
from src.accounts import views
urlpatterns = [
# Webpages for authenticated Users
path('dashboard/', include('src.dashboard.urls')),
path('timetracking/', include('src.timetracking.urls')),
path('analytics/', include('src.analytics.urls')),
path('projectmanagment/', include('src.projectmanagment.urls')),
path('team/', include('src.team.urls')),
path('profile/', views.profile_view, name='profile'),
path('settings/', include('src.settings.urls')),
# API Interface
path('api/v1/', include('src.api.urls')),
# Webpages both for authenticated and not authenticated Users
path('accounts/', include('src.accounts.urls')),
# Webpages for not authenticated users
path('', include('src.homepage.urls')),
# Webpages for admins
path('admin/', admin.site.urls),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 7dfdb37cd7e0d70d8bb1051d14156dc9bbaafb87 | """
Contains classes corresponding to the Maya type hierarchy, including `DependNode`, `Transform`, `Mesh`, and `Camera`.
"""
import sys
import os
import re
import inspect
import itertools
import math
import pymel.util as _util
import pymel.internal.pmcmds as cmds # @UnresolvedImport
import pymel.internal.factories as _factories
import pymel.api as _api # @UnresolvedImport
import pymel.internal.apicache as _apicache
import pymel.internal.pwarnings as _warnings
from pymel.internal import getLogger as _getLogger
import datatypes
_logger = _getLogger(__name__)
# to make sure Maya is up
import pymel.internal as internal
import pymel.versions as versions
from maya.cmds import about as _about
import maya.mel as mm
#from general import *
import general
import other
from animation import listAnimatable as _listAnimatable
from system import namespaceInfo as _namespaceInfo, FileReference as _FileReference
_thisModule = sys.modules[__name__]
#__all__ = ['Component', 'MeshEdge', 'MeshVertex', 'MeshFace', 'Attribute', 'DependNode' ]
# Mesh Components
# If we're reloading, clear the pynode types out
_factories.clearPyNodeTypes()
# Dictionary mapping from maya node type names (ie, surfaceShape) to pymel
# class names, in this module - ie, SurfaceShape
mayaTypeNameToPymelTypeName = {}
pymelTypeNameToMayaTypeName = {}
class DependNode(general.PyNode):
__apicls__ = _api.MFnDependencyNode
__metaclass__ = _factories.MetaMayaNodeWrapper
#-------------------------------
# Name Info and Manipulation
#-------------------------------
# def __new__(cls,name,create=False):
# """
# Provides the ability to create the object when creating a class
#
# >>> n = pm.Transform("persp",create=True)
# >>> n.__repr__()
# # Result: nt.Transform(u'persp1')
# """
# if create:
# ntype = cls.__melnode__
# name = createNode(ntype,n=name,ss=1)
# return general.PyNode.__new__(cls,name)
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self._apiobject.object() )
@_util.universalmethod
def __melobject__(self):
"""Special method for returning a mel-friendly representation."""
if isinstance(self, DependNode):
# For instance, return the node's name...
return self.name()
else:
# For the class itself, return the mel node name
return self.__melnode__
def __repr__(self):
"""
:rtype: `unicode`
"""
return u"nt.%s(%r)" % (self.__class__.__name__, self.name())
def _updateName(self):
# test validity
self.__apimobject__()
self._name = self.__apimfn__().name()
return self._name
# TODO: unify handling of name parsing (perhaps around the name parser
# classes?
def name(self, update=True, stripNamespace=False, levels=0, long=False,
stripUnderWorld=False):
'''The name of the node
Returns
-------
unicode
Parameters
----------
update : bool
if True, will always query to underlying maya object to get it's
current name (and will therefore detect renames, re-parenting, etc);
if False, it will use a cached value if available (which is slightly
faster, but may be out of date)
stripNamespace : bool
if True, all nodes will have their namespaces stipped off of them
(or a certain number of them, if levels is also used)
levels : int
if stripNamespace is True, then this number will determine the how
many namespaces will be removed; if 0 (the default), then all
leading namespaces will be removed; otherwise, this value gives the
number of left-most levels to strip
long : bool
ignored; included simply to unify the interface between DependNode
and DagNode, to make it easier to loop over lists of them
stripUnderWorld : bool
ignored; included simply to unify the interface between DependNode
and DagNode, to make it easier to loop over lists of them
Examples
--------
>>> import pymel.core as pm
>>> pm.newFile(f=1)
''
>>> node = pm.createNode('blinn')
>>> pm.namespace(add='foo')
u'foo'
>>> pm.namespace(add='bar', parent='foo')
u'foo:bar'
>>> pm.namespace(add='stuff', parent='foo:bar')
u'foo:bar:stuff'
>>> node.rename(':foo:bar:stuff:blinn1')
nt.Blinn(u'foo:bar:stuff:blinn1')
>>> node.name()
u'foo:bar:stuff:blinn1'
>>> node.name(stripNamespace=True)
u'blinn1'
>>> node.name(stripNamespace=True, levels=1)
u'bar:stuff:blinn1'
>>> node.name(stripNamespace=True, levels=2)
u'stuff:blinn1'
'''
if update or self._name is None:
try:
self._updateName()
except general.MayaObjectError:
_logger.warn("object %s no longer exists" % self._name)
name = self._name
if stripNamespace:
if levels:
spaceSplit = name.split(':')
name = ':'.join(spaceSplit[min(len(spaceSplit) - 1, levels):])
else:
name = name.rsplit(':', 1)[-1]
return name
def namespace(self, root=False):
"""Returns the namespace of the object with trailing colon included.
See `DependNode.parentNamespace` for a variant which does not include
the trailing colon.
By default, if the object is in the root namespace, an empty string is
returned; if root is True, ':' is returned in this case.
Returns
-------
unicode
"""
ns = self.parentNamespace()
if ns or root:
ns += ':'
return ns
def shortName(self, **kwargs):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
Returns
-------
unicode
"""
return self.name(**kwargs)
def longName(self, **kwargs):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name(**kwargs)
def nodeName(self, **kwargs):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name(**kwargs)
#rename = rename
def rename(self, name, **kwargs):
"""
:rtype: `DependNode`
"""
# self.setName( name ) # no undo support
# check for preserveNamespace a pymel unique flag
if kwargs.pop('preserveNamespace', False):
name = self.namespace(root=True) + name
# ensure shortname
if '|' in name:
name = name.split('|')[-1]
return general.rename(self, name, **kwargs)
def __apiobject__(self):
"get the default API object (MObject) for this node if it is valid"
return self.__apimobject__()
def __apimobject__(self):
"get the ``maya.OpenMaya.MObject`` for this node if it is valid"
handle = self.__apihandle__()
if _api.isValidMObjectHandle(handle):
return handle.object()
raise general.MayaNodeError(self._name)
def __apihandle__(self):
"get the ``maya.OpenMaya.MObjectHandle`` for this node if it is valid"
return self.__apiobjects__['MObjectHandle']
def __str__(self):
return "%s" % self.name()
def __unicode__(self):
return u"%s" % self.name()
if versions.current() >= versions.v2009:
def __hash__(self):
return self.__apihandle__().hashCode()
def node(self):
"""for compatibility with Attribute class
:rtype: `DependNode`
"""
return self
#--------------------------
# Modification
#--------------------------
def lock(self, **kwargs):
'lockNode -lock 1'
#kwargs['lock'] = True
# kwargs.pop('l',None)
# return cmds.lockNode( self, **kwargs)
return self.setLocked(True)
def unlock(self, **kwargs):
'lockNode -lock 0'
#kwargs['lock'] = False
# kwargs.pop('l',None)
# return cmds.lockNode( self, **kwargs)
return self.setLocked(False)
def cast(self, swapNode, **kwargs):
"""nodeCast"""
return cmds.nodeCast(self, swapNode, *kwargs)
duplicate = general.duplicate
#--------------------------
# xxx{ Presets
#--------------------------
def savePreset(self, presetName, custom=None, attributes=[]):
kwargs = {'save': True}
if attributes:
kwargs['attributes'] = ' '.join(attributes)
if custom:
kwargs['custom'] = custom
return cmds.nodePreset(presetName, **kwargs)
def loadPreset(self, presetName):
kwargs = {'load': True}
return cmds.nodePreset(presetName, **kwargs)
def deletePreset(self, presetName):
kwargs = {'delete': True}
return cmds.nodePreset(presetName, **kwargs)
def listPresets(self):
kwargs = {'list': True}
return cmds.nodePreset(**kwargs)
#}
#--------------------------
# xxx{ Info
#--------------------------
type = general.nodeType
def referenceFile(self):
"""referenceQuery -file
Return the reference file to which this object belongs. None if object is not referenced
:rtype: `FileReference`
"""
try:
return _FileReference(cmds.referenceQuery(self, f=1))
except RuntimeError:
None
isReadOnly = _factories.wrapApiMethod(_api.MFnDependencyNode, 'isFromReferencedFile', 'isReadOnly')
def classification(self, **kwargs):
'getClassification'
return general.getClassification(self.type(), **kwargs)
# return self.__apimfn__().classification( self.type() )
#}
#--------------------------
# xxx{ Connections
#--------------------------
def inputs(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None)
kwargs['destination'] = False
kwargs.pop('d', None)
return general.listConnections(self, **kwargs)
def outputs(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None)
kwargs['destination'] = True
kwargs.pop('d', None)
return general.listConnections(self, **kwargs)
def sources(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None)
kwargs['destination'] = False
kwargs.pop('d', None)
return general.listConnections(self, **kwargs)
def destinations(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None)
kwargs['destination'] = True
kwargs.pop('d', None)
return general.listConnections(self, **kwargs)
def shadingGroups(self):
"""list any shading groups in the future of this object - works for
shading nodes, transforms, and shapes
Also see listSets(type=1) - which returns which 'rendering sets' the
object is a member of (and 'rendering sets' seem to consist only of
shading groups), whereas this method searches the object's future for
any nodes of type 'shadingEngine'.
:rtype: `DependNode` list
"""
return self.future(type='shadingEngine')
#}
#--------------------------
# xxx{ Attributes
#--------------------------
def __getattr__(self, attr):
try:
return getattr(super(general.PyNode, self), attr)
except AttributeError:
try:
return DependNode.attr(self, attr)
except general.MayaAttributeError, e:
# since we're being called via __getattr__ we don't know whether the user was intending
# to get a class method or a maya attribute, so we raise a more generic AttributeError
raise AttributeError, "%r has no attribute or method named '%s'" % (self, attr)
@_util.universalmethod
def attrDefaults(obj, attr): # @NoSelf
"""
Access to an attribute of a node. This does not require an instance:
>>> nt.Transform.attrDefaults('tx').isKeyable()
True
but it can use one if needed ( for example, for dynamically created attributes )
>>> nt.Transform(u'persp').attrDefaults('tx').isKeyable()
Note: this is still experimental.
"""
if inspect.isclass(obj):
self = None
cls = obj # keep things familiar
else:
self = obj # keep things familiar
cls = type(obj)
attributes = cls.__apiobjects__.setdefault('MFnAttributes', {})
attrObj = attributes.get(attr, None)
if not _api.isValidMObject(attrObj):
def toAttrObj(apiObj):
try:
attrObj = apiObj.attribute(attr)
if attrObj.isNull():
raise RuntimeError
except RuntimeError:
# just try it first, then check if it has the attribute if
# we errored (as opposed to always check first if the node
# has the attribute), on the assumption that this will be
# "faster" for most cases, where the node actually DOES have
# the attribute...
if not apiObj.hasAttribute(attr):
raise general.MayaAttributeError('%s.%s' % (cls.__melnode__, attr))
else:
# don't know why we got this error, so just reraise
raise
return attrObj
if self is None:
if hasattr(_api, 'MNodeClass'):
# Yay, we have MNodeClass, use it!
nodeCls = _api.MNodeClass(cls.__melnode__)
attrObj = toAttrObj(nodeCls)
else:
# We don't have an instance of the node, we need
# to make a ghost one...
with _apicache._GhostObjMaker(cls.__melnode__) as nodeObj:
if nodeObj is None:
# for instance, we get this if we have an abstract class...
raise RuntimeError("Unable to get attribute defaults for abstract node class %s, in versions prior to 2012" % cls.__melnode__)
nodeMfn = cls.__apicls__(nodeObj)
attrObj = toAttrObj(nodeMfn)
else:
nodeMfn = self.__apimfn__()
attrObj = toAttrObj(nodeMfn)
attributes[attr] = attrObj
return general.AttributeDefaults(attrObj)
def attr(self, attr):
"""
access to attribute plug of a node. returns an instance of the Attribute class for the
given attribute name.
:rtype: `Attribute`
"""
return self._attr(attr, False)
# Just have this alias because it will sometimes return attributes for an
# underlying shape, which we may want for DagNode.attr, but don't want for
# DependNode.attr (and using the on-shape result, instead of throwing it
# away and then finding it again on the shape, saves time for the DagNode
# case)
def _attr(self, attr, allowOtherNode):
# return Attribute( '%s.%s' % (self, attr) )
try:
if '.' in attr or '[' in attr:
# Compound or Multi Attribute
# there are a couple of different ways we can proceed:
# Option 1: back out to _api.toApiObject (via general.PyNode)
# return Attribute( self.__apiobject__(), self.name() + '.' + attr )
# Option 2: nameparse.
# this avoids calling self.name(), which can be slow
import pymel.util.nameparse as nameparse
nameTokens = nameparse.getBasicPartList('dummy.' + attr)
result = self.__apiobject__()
for token in nameTokens[1:]: # skip the first, bc it's the node, which we already have
if isinstance(token, nameparse.MayaName):
if isinstance(result, _api.MPlug):
# you can't get a child plug from a multi/array plug.
# if result is currently 'defaultLightList1.lightDataArray' (an array)
# and we're trying to get the next plug, 'lightDirection', then we need a dummy index.
# the following line will reuslt in 'defaultLightList1.lightDataArray[-1].lightDirection'
if result.isArray():
result = self.__apimfn__().findPlug(unicode(token))
else:
result = result.child(self.__apimfn__().attribute(unicode(token)))
else: # Node
result = self.__apimfn__().findPlug(unicode(token))
# # search children for the attribute to simulate cam.focalLength --> perspShape.focalLength
# except TypeError:
# for i in range(fn.childCount()):
# try: result = _api.MFnDagNode( fn.child(i) ).findPlug( unicode(token) )
# except TypeError: pass
# else:break
if isinstance(token, nameparse.NameIndex):
if token.value != -1:
result = result.elementByLogicalIndex(token.value)
plug = result
else:
try:
plug = self.__apimfn__().findPlug(attr, False)
except RuntimeError:
# Don't use .findAlias, as it always returns the 'base'
# attribute - ie, if the alias is to foo[0].bar, it will
# just point to foo
# aliases
#obj = _api.MObject()
#self.__apimfn__().findAlias( attr, obj )
#plug = self.__apimfn__().findPlug( obj, False )
# the following technique gets aliased attributes as well. turning dagPlugs to off saves time because we already
# know the dagNode. however, certain attributes, such as rotatePivot, are detected as components,
# despite the fact that findPlug finds them as MPlugs. need to look into this
# TODO: test speed versus above method
try:
plug = _api.toApiObject(self.name() + '.' + attr, dagPlugs=False)
except RuntimeError:
raise
if not isinstance(plug, _api.MPlug):
raise RuntimeError
if not (allowOtherNode or plug.node() == self.__apimobject__()):
# we could have gotten an attribute on a shape object,
# which we don't want
raise RuntimeError
return general.Attribute(self.__apiobject__(), plug)
except RuntimeError:
# raise our own MayaAttributeError, which subclasses AttributeError and MayaObjectError
raise general.MayaAttributeError('%s.%s' % (self, attr))
hasAttr = general.hasAttr
@_factories.addMelDocs('setAttr')
def setAttr(self, attr, *args, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.setAttr("%s.%s" % (self, attr), *args, **kwargs)
@_factories.addMelDocs('setAttr')
def setDynamicAttr(self, attr, *args, **kwargs):
"""
same as `DependNode.setAttr` with the force flag set to True. This causes
the attribute to be created based on the passed input value.
"""
# for now, using strings is better, because there is no MPlug support
kwargs['force'] = True
return general.setAttr("%s.%s" % (self, attr), *args, **kwargs)
@_factories.addMelDocs('getAttr')
def getAttr(self, attr, *args, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.getAttr("%s.%s" % (self, attr), *args, **kwargs)
@_factories.addMelDocs('addAttr')
def addAttr(self, attr, **kwargs):
# for now, using strings is better, because there is no MPlug support
assert 'longName' not in kwargs and 'ln' not in kwargs
kwargs['longName'] = attr
return general.addAttr(unicode(self), **kwargs)
@_factories.addMelDocs('deleteAttr')
def deleteAttr(self, attr, *args, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.deleteAttr("%s.%s" % (self, attr), *args, **kwargs)
@_factories.addMelDocs('connectAttr')
def connectAttr(self, attr, destination, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.connectAttr("%s.%s" % (self, attr), destination, **kwargs)
@_factories.addMelDocs('disconnectAttr')
def disconnectAttr(self, attr, destination=None, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.disconnectAttr("%s.%s" % (self, attr), destination, **kwargs)
listAnimatable = _listAnimatable
def listAttr(self, **kwargs):
"""
listAttr
Modifications:
- returns an empty list when the result is None
- added 'alias' keyword to list attributes that have aliases
- added 'topLevel' keyword to only return attributes that are not
compound children; may not be used in combination with
'descendants'
- added 'descendants' keyword to return all top-level attributes
and all their descendants; note that the standard call may return
some attributes that 'descendants' will not, if there are compound
multi attributes with no existing indices; ie, the standard call
might return "node.parentAttr[-1].childAttr", but the 'descendants'
version would only return childAttr if an index exists for
parentAttr, ie "node.parentAttr[0].childAttr"; may not be used in
combination with 'topLevel'
:rtype: `Attribute` list
"""
topLevel = kwargs.pop('topLevel', False)
descendants = kwargs.pop('descendants', False)
if descendants:
if topLevel:
raise ValueError("may not specify both topLevel and descendants")
# get the topLevel ones, then aggregate all the descendants...
topChildren = self.listAttr(topLevel=True, **kwargs)
res = list(topChildren)
for child in topChildren:
res.extend(child.iterDescendants())
return res
alias = kwargs.pop('alias', False)
# stringify fix
res = [self.attr(x) for x in _util.listForNone(cmds.listAttr(self.name(), **kwargs))]
if alias:
# need to make sure that our alias wasn't filtered out by one of
# the other kwargs (keyable, etc)...
# HOWEVER, we can't just do a straight up check to see if the
# results of listAlias() are in res - because the attributes in
# res are index-less (ie, ,myAttr[-1]), while the results returned
# by listAliases() have indices (ie, .myAttr[25])... so instead we
# just do a comparison of the names (which are easily hashable)
res = set(x.attrName() for x in res)
res = [x[1] for x in self.listAliases() if x[1].attrName() in res]
if topLevel:
res = [x for x in res if x.getParent() is None]
return res
def listAliases(self):
"""
aliasAttr
Modifications:
- returns an empty list when the result is None
- when queried, returns a list of (alias, `Attribute`) pairs.
:rtype: (`str`, `Attribute`) list
"""
#tmp = _util.listForNone(cmds.aliasAttr(self.name(),query=True))
tmp = []
self.__apimfn__().getAliasList(tmp)
res = []
for i in range(0, len(tmp), 2):
res.append((tmp[i], general.Attribute(self.node() + '.' + tmp[i + 1])))
return res
def attrInfo(self, **kwargs):
"""attributeInfo
:rtype: `Attribute` list
"""
# stringify fix
return map(lambda x: self.attr(x), _util.listForNone(cmds.attributeInfo(self.name(), **kwargs)))
#}
#-----------------------------------------
# xxx{ Name Info and Manipulation
#-----------------------------------------
# Now just wraps NameParser functions
def stripNum(self):
"""Return the name of the node with trailing numbers stripped off. If no trailing numbers are found
the name will be returned unchanged.
>>> from pymel.core import *
>>> SCENE.lambert1.stripNum()
u'lambert'
:rtype: `unicode`
"""
return other.NameParser(self).stripNum()
def extractNum(self):
"""Return the trailing numbers of the node name. If no trailing numbers are found
an error will be raised.
>>> from pymel.core import *
>>> SCENE.lambert1.extractNum()
u'1'
:rtype: `unicode`
"""
return other.NameParser(self).extractNum()
def nextUniqueName(self):
"""Increment the trailing number of the object until a unique name is found
If there is no trailing number, appends '1' to the name.
:rtype: `unicode`
"""
return other.NameParser(self).nextUniqueName()
def nextName(self):
"""Increment the trailing number of the object by 1
Raises an error if the name has no trailing number.
>>> from pymel.core import *
>>> SCENE.lambert1.nextName()
DependNodeName(u'lambert2')
:rtype: `unicode`
"""
return other.NameParser(self).nextName()
def prevName(self):
"""Decrement the trailing number of the object by 1
Raises an error if the name has no trailing number.
:rtype: `unicode`
"""
return other.NameParser(self).prevName()
@classmethod
def registerVirtualSubClass(cls, nameRequired=False):
"""
Deprecated
"""
_factories.registerVirtualClass(cls, nameRequired)
#}
if versions.current() >= versions.v2011:
class ContainerBase(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class Entity(ContainerBase):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
else:
class Entity(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class DagNode(Entity):
#:group Path Info and Modification: ``*parent*``, ``*Parent*``, ``*child*``, ``*Child*``
"""
"""
__apicls__ = _api.MFnDagNode
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self.__apimdagpath__() )
_componentAttributes = {}
def comp(self, compName):
"""
Will retrieve a Component object for this node; similar to
DependNode.attr(), but for components.
:rtype: `Component`
"""
if compName in self._componentAttributes:
compClass = self._componentAttributes[compName]
if isinstance(compClass, tuple):
# We have something like:
# 'uIsoparm' : (NurbsSurfaceIsoparm, 'u')
# need to specify what 'flavor' of the basic
# component we need...
return compClass[0](self, {compClass[1]: general.ComponentIndex(label=compClass[1])})
else:
return compClass(self)
# if we do self.getShape(), and this is a shape node, we will
# enter a recursive loop if compName isn't actually a comp:
# since shape doesn't have 'getShape', it will call __getattr__
# for 'getShape', which in turn call comp to check if it's a comp,
# which will call __getattr__, etc
# ..soo... check if we have a 'getShape'!
# ...also, don't use 'hasattr', as this will also call __getattr__!
try:
object.__getattribute__(self, 'getShape')
except AttributeError:
raise general.MayaComponentError('%s.%s' % (self, compName))
else:
shape = self.getShape()
if shape:
return shape.comp(compName)
def listComp(self, names=False):
"""Will return a list of all component objects for this object
Is to .comp() what .listAttr() is to .attr(); will NOT check the shape
node.
Parameters
----------
names : bool
By default, will return a list of actual usabale pymel Component
objects; if you just want a list of string names which would
be compatible with .comp(), set names to True
"""
keys = sorted(self._componentAttributes.keys())
if names:
return keys
compTypes = set()
comps = []
# use the sorted keys, so the order matches that returned by names,
# minus duplicate entries for aliases
for name in keys:
compType = self._componentAttributes[name]
if compType not in compTypes:
compTypes.add(compType)
comps.append(self.comp(name))
return comps
def _updateName(self, long=False):
# if _api.isValidMObjectHandle(self._apiobject) :
#obj = self._apiobject.object()
#dagFn = _api.MFnDagNode(obj)
#dagPath = _api.MDagPath()
# dagFn.getPath(dagPath)
dag = self.__apimdagpath__()
if dag:
name = dag.partialPathName()
if not name:
raise general.MayaNodeError
self._name = name
if long:
return dag.fullPathName()
return self._name
# TODO: unify handling of name parsing (perhaps around the name parser
# classes?
# TODO: support for underworld nodes
def name(self, update=True, long=False, stripNamespace=False, levels=0,
stripUnderWorld=False):
'''The name of the node
Parameters
----------
update : bool
if True, will always query to underlying maya object to get it's
current name (and will therefore detect renames, re-parenting, etc);
if False, it will use a cached value if available (which is slightly
faster, but may be out of date)
long : {True, False, None}
if True, will include always include the full dag path, starting
from the world root, including leading pipe ( | ); if False, will
return the shortest-unique path; if None, node names will always be
returned without any parents, if if they are not unique
stripNamespace : bool
if True, all nodes will have their namespaces stipped off of them
(or a certain number of them, if levels is also used)
levels : int
if stripNamespace is True, then this number will determine the how
many namespaces will be removed; if 0 (the default), then all
leading namespaces will be removed; otherwise, this value gives the
number of left-most levels to strip
stripUnderWorld : bool
if stripUnderWorld is True, and the name has underworld components
(ie, topNode|topNodeShape->underWorld|underWorldShape), then only
the portion in the "deepest underworld" is returned (ie,
underWorld|underWorldShape)
Returns
-------
unicode
Examples
--------
>>> import pymel.core as pm
>>> pm.newFile(f=1)
''
>>> cube1 = pm.polyCube()[0]
>>> cube2 = pm.polyCube()[0]
>>> cube3 = pm.polyCube()[0]
>>> cube3Shape = cube3.getShape()
>>> cube2.setParent(cube1)
nt.Transform(u'pCube2')
>>> cube3.setParent(cube2)
nt.Transform(u'pCube3')
>>> pm.namespace(add='foo')
u'foo'
>>> pm.namespace(add='bar', parent='foo')
u'foo:bar'
>>> pm.namespace(add='stuff', parent='foo:bar')
u'foo:bar:stuff'
>>> cube2.rename(':foo:pCube2')
nt.Transform(u'foo:pCube2')
>>> cube3.rename(':foo:bar:pCube3')
nt.Transform(u'foo:bar:pCube3')
>>> cube3Shape.rename(':foo:bar:stuff:pCubeShape3')
nt.Mesh(u'foo:bar:stuff:pCubeShape3')
>>> cube3Shape.name()
u'foo:bar:stuff:pCubeShape3'
>>> cube3Shape.name(stripNamespace=True)
u'pCubeShape3'
>>> cube3Shape.name(long=True)
u'|pCube1|foo:pCube2|foo:bar:pCube3|foo:bar:stuff:pCubeShape3'
>>> cube3Shape.name(long=True, stripNamespace=True)
u'|pCube1|pCube2|pCube3|pCubeShape3'
>>> cube3Shape.name(long=True, stripNamespace=True, levels=1)
u'|pCube1|pCube2|bar:pCube3|bar:stuff:pCubeShape3'
>>> cube3Shape.name(long=True, stripNamespace=True, levels=2)
u'|pCube1|pCube2|pCube3|stuff:pCubeShape3'
>>> cam = pm.camera()[0]
>>> cam.setParent(cube2)
nt.Transform(u'camera1')
>>> imagePlane = pm.imagePlane(camera=cam.getShape())[1]
>>> imagePlane.rename('foo:bar:stuff:imagePlaneShape1')
nt.ImagePlane(u'cameraShape1->foo:bar:stuff:imagePlaneShape1')
>>> imagePlane.name()
u'cameraShape1->foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.name(stripUnderWorld=True)
u'foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.name(stripNamespace=True, levels=1)
u'cameraShape1->bar:stuff:imagePlaneShape1'
>>> imagePlane.name(stripUnderWorld=True, long=True)
u'|imagePlane1|foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.name(stripUnderWorld=True, stripNamespace=True, long=True)
u'|imagePlane1|imagePlaneShape1'
'''
if update or long or self._name is None:
try:
name = self._updateName(long)
except general.MayaObjectError:
# if we have an error, but we're only looking for the nodeName,
# use the non-dag version
if long is None:
# don't use DependNode._updateName, as that can still
# raise MayaInstanceError - want this to work, so people
# have a way to get the correct instance, assuming they know
# what the parent should be
name = _api.MFnDependencyNode(self.__apimobject__()).name()
else:
_logger.warn("object %s no longer exists" % self._name)
name = self._name
else:
name = self._name
if stripNamespace or stripUnderWorld or long is None:
worlds = []
underworldSplit = name.split('->')
if stripUnderWorld:
underworldSplit = [underworldSplit[-1]]
for worldName in underworldSplit:
if stripNamespace or long is None:
parentSplit = worldName.split('|')
if long is None:
parentSplit = [parentSplit[-1]]
if stripNamespace:
nodes = []
for node in parentSplit:
# not sure what dag node has a "." in it's name, but keeping
# this code here just in case...
dotSplit = node.split('.')
spaceSplit = dotSplit[0].split(':')
if levels:
dotSplit[0] = ':'.join(spaceSplit[min(len(spaceSplit) - 1,
levels):])
else:
dotSplit[0] = spaceSplit[-1]
nodes.append('.'.join(dotSplit))
else:
nodes = parentSplit
worldName = '|'.join(nodes)
worlds.append(worldName)
name = '->'.join(worlds)
return name
def longName(self, **kwargs):
"""
The full dag path to the object, including leading pipe ( | )
Returns
-------
unicode
Examples
--------
>>> import pymel.core as pm
>>> pm.newFile(f=1)
''
>>> cube1 = pm.polyCube()[0]
>>> cube2 = pm.polyCube()[0]
>>> cube3 = pm.polyCube()[0]
>>> cube3Shape = cube3.getShape()
>>> cube2.setParent(cube1)
nt.Transform(u'pCube2')
>>> cube3.setParent(cube2)
nt.Transform(u'pCube3')
>>> pm.namespace(add='foo')
u'foo'
>>> pm.namespace(add='bar', parent='foo')
u'foo:bar'
>>> pm.namespace(add='stuff', parent='foo:bar')
u'foo:bar:stuff'
>>> cube2.rename(':foo:pCube2')
nt.Transform(u'foo:pCube2')
>>> cube3.rename(':foo:bar:pCube3')
nt.Transform(u'foo:bar:pCube3')
>>> cube3Shape.rename(':foo:bar:stuff:pCubeShape3')
nt.Mesh(u'foo:bar:stuff:pCubeShape3')
>>> cube3Shape.longName()
u'|pCube1|foo:pCube2|foo:bar:pCube3|foo:bar:stuff:pCubeShape3'
>>> cube3Shape.longName(stripNamespace=True)
u'|pCube1|pCube2|pCube3|pCubeShape3'
>>> cube3Shape.longName(stripNamespace=True, levels=1)
u'|pCube1|pCube2|bar:pCube3|bar:stuff:pCubeShape3'
>>> cube3Shape.longName(stripNamespace=True, levels=2)
u'|pCube1|pCube2|pCube3|stuff:pCubeShape3'
>>> cam = pm.camera()[0]
>>> cam.setParent(cube2)
nt.Transform(u'camera1')
>>> imagePlane = pm.imagePlane(camera=cam.getShape())[1]
>>> imagePlane.rename('foo:bar:stuff:imagePlaneShape1')
nt.ImagePlane(u'cameraShape1->foo:bar:stuff:imagePlaneShape1')
>>> imagePlane.longName()
u'|pCube1|foo:pCube2|camera1|cameraShape1->|imagePlane1|foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.longName(stripUnderWorld=True)
u'|imagePlane1|foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.longName(stripNamespace=True, levels=1)
u'|pCube1|pCube2|camera1|cameraShape1->|imagePlane1|bar:stuff:imagePlaneShape1'
>>> imagePlane.longName(stripUnderWorld=True, stripNamespace=True)
u'|imagePlane1|imagePlaneShape1'
"""
return self.name(long=True, **kwargs)
fullPath = longName
def shortName(self, **kwargs):
"""
The shortest unique name.
Returns
-------
unicode
Examples
--------
>>> import pymel.core as pm
>>> pm.newFile(f=1)
''
>>> cube1 = pm.polyCube()[0]
>>> cube2 = pm.polyCube()[0]
>>> cube3 = pm.polyCube()[0]
>>> cube3Shape = cube3.getShape()
>>> cube2.setParent(cube1)
nt.Transform(u'pCube2')
>>> cube3.setParent(cube2)
nt.Transform(u'pCube3')
>>> pm.namespace(add='foo')
u'foo'
>>> pm.namespace(add='bar', parent='foo')
u'foo:bar'
>>> pm.namespace(add='stuff', parent='foo:bar')
u'foo:bar:stuff'
>>> cube2.rename(':foo:pCube2')
nt.Transform(u'foo:pCube2')
>>> cube3.rename(':foo:bar:pCube3')
nt.Transform(u'foo:bar:pCube3')
>>> cube3Shape.rename(':foo:bar:stuff:pCubeShape3')
nt.Mesh(u'foo:bar:stuff:pCubeShape3')
>>> cube3Shape.shortName()
u'foo:bar:stuff:pCubeShape3'
>>> cube3Shape.shortName(stripNamespace=True)
u'pCubeShape3'
>>> cube3Shape.shortName(stripNamespace=True, levels=1)
u'bar:stuff:pCubeShape3'
>>> cube3Shape.shortName(stripNamespace=True, levels=2)
u'stuff:pCubeShape3'
>>> cam = pm.camera()[0]
>>> cam.setParent(cube2)
nt.Transform(u'camera1')
>>> imagePlane = pm.imagePlane(camera=cam.getShape())[1]
>>> imagePlane.rename('foo:bar:stuff:imagePlaneShape1')
nt.ImagePlane(u'cameraShape1->foo:bar:stuff:imagePlaneShape1')
>>> imagePlane.shortName()
u'cameraShape1->foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.shortName(stripUnderWorld=True)
u'foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.shortName(stripNamespace=True, levels=1)
u'cameraShape1->bar:stuff:imagePlaneShape1'
>>> imagePlane.shortName(stripUnderWorld=True)
u'foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.shortName(stripUnderWorld=True, stripNamespace=True)
u'imagePlaneShape1'
"""
return self.name(long=False, **kwargs)
# TODO: better support for underworld nodes (ie, in conjunction with
# stripNamespace)
def nodeName(self, stripUnderWorld=True, **kwargs):
"""
Just the name of the node, without any dag path
Returns
-------
unicode
Examples
--------
>>> import pymel.core as pm
>>> pm.newFile(f=1)
''
>>> cube1 = pm.polyCube()[0]
>>> cube2 = pm.polyCube()[0]
>>> cube3 = pm.polyCube()[0]
>>> cube3Shape = cube3.getShape()
>>> cube2.setParent(cube1)
nt.Transform(u'pCube2')
>>> cube3.setParent(cube2)
nt.Transform(u'pCube3')
>>> pm.namespace(add='foo')
u'foo'
>>> pm.namespace(add='bar', parent='foo')
u'foo:bar'
>>> pm.namespace(add='stuff', parent='foo:bar')
u'foo:bar:stuff'
>>> cube2.rename(':foo:pCube2')
nt.Transform(u'foo:pCube2')
>>> cube3.rename(':foo:bar:pCube3')
nt.Transform(u'foo:bar:pCube3')
>>> cube3Shape.rename(':foo:bar:stuff:pCubeShape3')
nt.Mesh(u'foo:bar:stuff:pCubeShape3')
>>> # create an object with the same name as pCube3 / pCube4
>>> cube3Twin = pm.polyCube()[0]
>>> cube3Twin.rename('foo:bar:pCube3')
nt.Transform(u'|foo:bar:pCube3')
>>> cube3ShapeTwin = cube3Twin.getShape()
>>> cube3ShapeTwin.rename('foo:bar:stuff:pCubeShape3')
nt.Mesh(u'|foo:bar:pCube3|foo:bar:stuff:pCubeShape3')
>>> cube3Shape.shortName()
u'foo:pCube2|foo:bar:pCube3|foo:bar:stuff:pCubeShape3'
>>> cube3Shape.nodeName()
u'foo:bar:stuff:pCubeShape3'
>>> cube3Shape.nodeName(stripNamespace=True)
u'pCubeShape3'
>>> cube3Shape.nodeName(stripNamespace=True, levels=1)
u'bar:stuff:pCubeShape3'
>>> cube3Shape.nodeName(stripNamespace=True, levels=2)
u'stuff:pCubeShape3'
>>> cam = pm.camera()[0]
>>> cam.setParent(cube2)
nt.Transform(u'camera1')
>>> imagePlaneTrans, imagePlane = pm.imagePlane(camera=cam.getShape())
>>> imagePlane.rename('foo:bar:stuff:imagePlaneShape1')
nt.ImagePlane(u'cameraShape1->foo:bar:stuff:imagePlaneShape1')
>>> # create an object with the same name as cam
>>> pm.camera()[0].setParent(cube3Twin).rename('camera1')
nt.Transform(u'|foo:bar:pCube3|camera1')
>>> # create an object with the same name as imagePlane
>>> imagePlaneTwinTrans, imagePlaneTwin = pm.imagePlane(camera=cam.getShape())
>>> imagePlaneTwin.rename('foo:bar:stuff:imagePlaneShape1')
nt.ImagePlane(u'foo:pCube2|camera1|cameraShape1->imagePlane2|foo:bar:stuff:imagePlaneShape1')
>>> imagePlane.shortName()
u'foo:pCube2|camera1|cameraShape1->imagePlane1|foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.nodeName()
u'foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.nodeName(stripUnderWorld=False)
u'cameraShape1->foo:bar:stuff:imagePlaneShape1'
>>> imagePlane.nodeName(stripNamespace=True)
u'imagePlaneShape1'
>>> imagePlane.nodeName(stripNamespace=True, levels=1)
u'bar:stuff:imagePlaneShape1'
"""
return self.name(long=None, stripUnderWorld=stripUnderWorld, **kwargs)
def __apiobject__(self):
"get the ``maya.OpenMaya.MDagPath`` for this object if it is valid"
return self.__apimdagpath__()
def __apimdagpath__(self):
"get the ``maya.OpenMaya.MDagPath`` for this object if it is valid"
try:
dag = self.__apiobjects__['MDagPath']
# If we have a cached mobject, test for validity: if the object is
# not valid an error will be raised
# Check if MObjectHandle in self.__apiobjects__ to avoid recursive
# loop...
if 'MObjectHandle' in self.__apiobjects__:
self.__apimobject__()
if not dag.isValid():
# Usually this only happens if the object was reparented, with
# instancing.
#
# Most of the time, this makes sense - there's no way to know
# which of the new instances we "want". Hoever, occasionally,
# when the object was reparented, there were multiple instances,
# and the MDagPath was invalidated; however, subsequently, other
# instances were removed, so it's no longer instanced. Check for
# this...
# in some cases, doing dag.node() will crash maya if the dag
# isn't valid... so try using MObjectHandle
handle = self.__apiobjects__.get('MObjectHandle')
if handle is not None and handle.isValid():
mfnDag = _api.MFnDagNode(handle.object())
if not mfnDag.isInstanced():
# throw a KeyError, this will cause to regen from
# first MDagPath
raise KeyError
raise general.MayaInstanceError(mfnDag.name())
else:
name = getattr(self, '_name', '<unknown>')
raise general.MayaInstanceError(name)
return dag
except KeyError:
# class was instantiated from an MObject, but we can still retrieve the first MDagPath
#assert argObj.hasFn( _api.MFn.kDagNode )
dag = _api.MDagPath()
# we can't use self.__apimfn__() becaue the mfn is instantiated from an MDagPath
# which we are in the process of finding out
mfn = _api.MFnDagNode(self.__apimobject__())
mfn.getPath(dag)
self.__apiobjects__['MDagPath'] = dag
return dag
# if dag.isValid():
# #argObj = dag
# if dag.fullPathName():
# argObj = dag
# else:
# print 'produced valid MDagPath with no name: %s(%s)' % ( argObj.apiTypeStr(), _api.MFnDependencyNode(argObj).name() )
def __apihandle__(self):
"get the ``maya.OpenMaya.MObjectHandle`` for this node if it is valid"
try:
handle = self.__apiobjects__['MObjectHandle']
except KeyError:
try:
handle = _api.MObjectHandle(self.__apimdagpath__().node())
except general.MayaInstanceError:
if 'MDagPath' in self.__apiobjects__:
handle = _api.MObjectHandle(self.__apiobjects__['MDagPath'].node())
else:
raise general.MayaNodeError(self._name)
except RuntimeError:
raise general.MayaNodeError(self._name)
self.__apiobjects__['MObjectHandle'] = handle
return handle
# def __apimfn__(self):
# if self._apimfn:
# return self._apimfn
# elif self.__apicls__:
# obj = self._apiobject
# if _api.isValidMDagPath(obj):
# try:
# self._apimfn = self.__apicls__(obj)
# return self._apimfn
# except KeyError:
# pass
# def __init__(self, *args, **kwargs):
# if self._apiobject:
# if isinstance(self._apiobject, _api.MObjectHandle):
# dagPath = _api.MDagPath()
# _api.MDagPath.getAPathTo( self._apiobject.object(), dagPath )
# self._apiobject = dagPath
#
# assert _api.isValidMDagPath( self._apiobject )
# def __init__(self, *args, **kwargs) :
# if args :
# arg = args[0]
# if len(args) > 1 :
# comp = args[1]
# if isinstance(arg, DagNode) :
# self._name = unicode(arg.name())
# self._apiobject = _api.MObjectHandle(arg.object())
# elif _api.isValidMObject(arg) or _api.isValidMObjectHandle(arg) :
# objHandle = _api.MObjectHandle(arg)
# obj = objHandle.object()
# if _api.isValidMDagNode(obj) :
# self._apiobject = objHandle
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# elif isinstance(arg, basestring) :
# obj = _api.toMObject (arg)
# if obj :
# # creation for existing object
# if _api.isValidMDagNode (obj):
# self._apiobject = _api.MObjectHandle(obj)
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# else :
# # creation for inexistent object
# self._name = arg
# else :
# raise TypeError, "don't know how to make a DagNode out of a %s : %r" % (type(arg), arg)
#--------------------------------
# xxx{ Path Info and Modification
#--------------------------------
def root(self):
"""rootOf
:rtype: `unicode`
"""
return DagNode('|' + self.longName()[1:].split('|')[0])
# For some reason, this wasn't defined on Transform...?
# maya seems to have a bug right now (2016.53) that causes crashes when
# accessing MDagPaths after creating an instance, so not enabling this
# at the moment...
# def getAllPaths(self):
# dagPaths = _api.MDagPathArray()
# self.__apimfn__().getAllPaths(dagPaths)
# return [DagNode(dagPaths[i]) for i in xrange(dagPaths.length())]
def hasParent(self, parent):
'''
Modifications:
- handles underworld nodes correctly
'''
toMObj = _factories.ApiTypeRegister.inCast['MObject']
# This will error if parent is not a dag, or not a node, like default
# wrapped implementation
parentMObj = toMObj(parent)
thisMFn = self.__apimfn__()
if thisMFn.hasParent(parentMObj):
return True
# quick out... MFnDagNode handles things right if all instances aren't
# underworld nodes
if not thisMFn.isInstanced() and not thisMFn.inUnderWorld():
return False
# See if it's underworld parent is the given parent...
# Note that MFnDagPath implementation considers all instances, so we
# should too...
allPaths = _api.MDagPathArray()
thisMFn.getAllPaths(allPaths)
for i in xrange(allPaths.length()):
path = allPaths[i]
pathCount = path.pathCount()
if pathCount <= 1:
continue
# if there's an underworld, there should be at least 3 nodes -
# the top parent, the underworld root, and the node in the
# underworld
assert path.length() >= 3
# if they are in the same underworld, MFnDagNode.hasParent would
# work - only care about the case where the "immediate" parent is
# outside of this node's underworld
# Unfortunately, MDagPath.pop() has some strange behavior - ie, if
# path = |camera1|cameraShape1->|imagePlane1
# Then popping it once gives:
# path = |camera1|cameraShape1->|
# ...and again gives:
# path = |camera1|cameraShape1
# So, we check that popping once has the same pathCount, but twice
# has a different path count
path.pop()
if path.pathCount() != pathCount:
continue
path.pop()
if path.pathCount() != pathCount -1:
continue
if path.node() == parentMObj:
return True
return False
def hasChild(self, child):
'''
Modifications:
- handles underworld nodes correctly
'''
toMObj = _factories.ApiTypeRegister.inCast['MObject']
# This will error if parent is not a dag, or not a node, like default
# wrapped implementation
childMObj = toMObj(child)
thisMFn = self.__apimfn__()
if self.__apimfn__().hasChild(childMObj):
return True
# because hasChild / hasParent consider all instances,
# self.hasChild(child) is equivalent to child.hasParent(self)...
if not isinstance(child, general.PyNode):
child = DagNode(childMObj)
return child.hasParent(self)
def isChildOf(self, parent):
'''
Modifications:
- handles underworld nodes correctly
'''
toMObj = _factories.ApiTypeRegister.inCast['MObject']
# This will error if parent is not a dag, or not a node, like default
# wrapped implementation
parentMObj = toMObj(parent)
thisMFn = self.__apimfn__()
if thisMFn.isChildOf(parentMObj):
return True
# quick out... MFnDagNode handles things right if all instances aren't
# underworld nodes
if not thisMFn.isInstanced() and not thisMFn.inUnderWorld():
return False
# For each instance path, if it's in the underworld, check to see
# if the parent at the same "underworld" level as the parent is the
# parent, or a child of it
dagArray = _api.MDagPathArray()
_api.MDagPath.getAllPathsTo(parentMObj, dagArray)
allParentLevels = set()
for i in xrange(dagArray.length()):
parentMDag = dagArray[i]
allParentLevels.add(parentMDag.pathCount())
# we only get one parentMFn, but this should be fine as (aside from
# not dealing with underworld correctly), MFnDagNode.isParentOf works
# correctly for all instances...
parentMFn = _api.MFnDagNode(parentMObj)
dagArray.clear()
thisMFn.getAllPaths(dagArray)
for i in xrange(dagArray.length()):
childMDag = dagArray[i]
childPathLevels = childMDag.pathCount()
if childPathLevels <= 1:
continue
for parentUnderworldLevel in allParentLevels:
if childMDag.pathCount() <= parentUnderworldLevel:
# standard MFnDagNode.isChildOf would have handled this...
continue
sameLevelMDag = _api.MDagPath()
childMDag.getPath(sameLevelMDag, parentUnderworldLevel - 1)
sameLevelMObj = sameLevelMDag.node()
if sameLevelMObj == parentMObj:
return True
if parentMFn.isParentOf(sameLevelMObj):
return True
return False
def isParentOf(self, child):
'''
Modifications:
- handles underworld nodes correctly
'''
toMObj = _factories.ApiTypeRegister.inCast['MObject']
# This will error if parent is not a dag, or not a node, like default
# wrapped implementation
childMObj = toMObj(child)
thisMFn = self.__apimfn__()
if thisMFn.isParentOf(childMObj):
return True
# because isChildOf / isParentOf consider all instances,
# self.isParentOf(child) is equivalent to child.isChildOf(self)...
if not isinstance(child, general.PyNode):
child = DagNode(childMObj)
return child.isChildOf(self)
def isInstanceOf(self, other):
"""
:rtype: `bool`
"""
if isinstance(other, general.PyNode):
return self.__apimobject__() == other.__apimobject__()
else:
try:
return self.__apimobject__() == general.PyNode(other).__apimobject__()
except:
return False
def instanceNumber(self):
"""
returns the instance number that this path represents in the DAG. The instance number can be used to determine which
element of the world space array attributes of a DAG node to connect to get information regarding this instance.
:rtype: `int`
"""
return self.__apimdagpath__().instanceNumber()
def getInstances(self, includeSelf=True):
"""
:rtype: `DagNode` list
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = polyPlane()[0]
>>> instance(s)
[nt.Transform(u'pPlane2')]
>>> instance(s)
[nt.Transform(u'pPlane3')]
>>> s.getShape().getInstances()
[nt.Mesh(u'pPlane1|pPlaneShape1'), nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
>>> s.getShape().getInstances(includeSelf=False)
[nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
"""
d = _api.MDagPathArray()
self.__apimfn__().getAllPaths(d)
thisDagPath = self.__apimdagpath__()
result = [general.PyNode(_api.MDagPath(d[i])) for i in range(d.length()) if includeSelf or not d[i] == thisDagPath]
return result
def getOtherInstances(self):
"""
same as `DagNode.getInstances` with includeSelf=False.
:rtype: `DagNode` list
"""
return self.getInstances(includeSelf=False)
def firstParent(self):
"""firstParentOf
:rtype: `DagNode`
"""
try:
return DagNode('|'.join(self.longName().split('|')[:-1]))
except TypeError:
return DagNode('|'.join(self.split('|')[:-1]))
# def numChildren(self):
# """
# see also `childCount`
#
# :rtype: `int`
# """
# return self.__apimdagpath__().childCount()
# def getParent(self, **kwargs):
# # TODO : print warning regarding removal of kwargs, test speed difference
# parent = _api.MDagPath( self.__apiobject__() )
# try:
# parent.pop()
# return general.PyNode(parent)
# except RuntimeError:
# pass
#
# def getChildren(self, **kwargs):
# # TODO : print warning regarding removal of kwargs
# children = []
# thisDag = self.__apiobject__()
# for i in range( thisDag.childCount() ):
# child = _api.MDagPath( thisDag )
# child.push( thisDag.child(i) )
# children.append( general.PyNode(child) )
# return children
def firstParent2(self, **kwargs):
"""unlike the firstParent command which determines the parent via string formatting, this
command uses the listRelatives command
"""
kwargs['parent'] = True
kwargs.pop('p', None)
# if longNames:
kwargs['fullPath'] = True
kwargs.pop('f', None)
try:
res = cmds.listRelatives(self, **kwargs)[0]
except TypeError:
return None
res = general.PyNode(res)
return res
@staticmethod
def _getDagParent(dag):
if dag.length() <= 1:
return None
# Need a copy as we'll be modifying it...
parentDag = _api.MDagPath(dag)
parentDag.pop()
# do a check for underworld paths - if we have a path:
# |rootTrans|rootShape -> |underwoldTrans|underworldShape
# then two parents up, we will get:
# |rootTrans|rootShape ->
# ...whose .node() will be unusable. check for this scenario, and if
# we get it, skip this dag path, so we go straight to:
# |rootTrans|rootShape
pathCount = parentDag.pathCount()
if pathCount > 1:
# get just the last "path piece" - if it is "empty", do an extra
# pop, to get out of the underworld
underworldPath = _api.MDagPath()
parentDag.getPath(underworldPath, pathCount - 1)
if underworldPath.length() == 0:
parentDag.pop()
return parentDag
def getParent(self, generations=1):
"""
Modifications:
- added optional generations flag, which gives the number of levels up that you wish to go for the parent;
ie:
>>> from pymel.core import *
>>> select(cl=1)
>>> bottom = group(n='bottom')
>>> group(n='almostThere')
nt.Transform(u'almostThere')
>>> group(n='nextLevel')
nt.Transform(u'nextLevel')
>>> group(n='topLevel')
nt.Transform(u'topLevel')
>>> bottom.longName()
u'|topLevel|nextLevel|almostThere|bottom'
>>> bottom.getParent(2)
nt.Transform(u'nextLevel')
Negative values will traverse from the top:
>>> bottom.getParent(generations=-3)
nt.Transform(u'almostThere')
A value of 0 will return the same node.
The default value is 1.
If generations is None, it will be interpreted as 'return all
parents', and a list will be returned.
Since the original command returned None if there is no parent, to sync with this behavior, None will
be returned if generations is out of bounds (no IndexError will be thrown).
:rtype: `DagNode`
"""
# Get the parent through the api - listRelatives doesn't handle instances correctly,
# and string processing seems unreliable...
res = general._getParent(self._getDagParent, self.__apimdagpath__(), generations)
if generations is None:
if res is None:
return []
return [general.PyNode(x) for x in res]
elif res is not None:
return general.PyNode(res)
def getAllParents(self):
"""
Return a list of all parents above this.
Starts from the parent immediately above, going up.
:rtype: `DagNode` list
"""
return self.getParent(generations=None)
def getChildren(self, **kwargs):
"""
see also `childAtIndex`
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
kwargs['children'] = True
kwargs.pop('c', None)
return general.listRelatives(self, **kwargs)
def getSiblings(self, **kwargs):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
# pass
try:
return [x for x in self.getParent().getChildren(**kwargs) if x != self]
except:
return []
def listRelatives(self, **kwargs):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `PyNode` list
"""
return general.listRelatives(self, **kwargs)
def setParent(self, *args, **kwargs):
"""
parent
Modifications:
- if parent is 'None', world=True is automatically set
- if the given parent is the current parent, don't error
"""
result = general.parent(self, *args, **kwargs)
if result:
result = result[0]
return result
def addChild(self, child, **kwargs):
"""parent (reversed)
:rtype: `DagNode`
"""
cmds.parent(child, self, **kwargs)
if not isinstance(child, general.PyNode):
child = general.PyNode(child)
return child
def __or__(self, child, **kwargs):
"""
operator for `addChild`. Use to easily daisy-chain together parenting operations.
The operation order visually mimics the resulting dag path:
>>> from pymel.core import *
>>> s = polySphere(name='sphere')[0]
>>> c = polyCube(name='cube')[0]
>>> t = polyTorus(name='torus')[0]
>>> s | c | t
nt.Transform(u'torus')
>>> print t.fullPath()
|sphere|cube|torus
:rtype: `DagNode`
"""
return self.addChild(child, **kwargs)
#}
#instance = instance
#--------------------------
# Shading
#--------------------------
def isDisplaced(self):
"""Returns whether any of this object's shading groups have a displacement shader input
:rtype: `bool`
"""
for sg in self.shadingGroups():
if len(sg.attr('displacementShader').inputs()):
return True
return False
def hide(self):
self.visibility.set(0)
def show(self):
self.visibility.set(1)
def isVisible(self, checkOverride=True):
if not self.attr('visibility').get():
return False
if (checkOverride and self.attr('overrideEnabled').get()
and not self.attr('overrideVisibility').get()):
return False
parent = self.getParent()
if not parent:
return True
else:
return parent.isVisible(checkOverride=checkOverride)
def setObjectColor(self, color=None):
"""This command sets the dormant wireframe color of the specified objects to an integer
representing one of the user defined colors, or, if set to None, to the default class color"""
kwargs = {}
if color:
kwargs['userDefined'] = color
cmds.color(self, **kwargs)
def makeLive(self, state=True):
if not state:
cmds.makeLive(none=True)
else:
cmds.makeLive(self)
class Shape(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getTransform(self):
return self.getParent(generations=1)
def setParent(self, *args, **kwargs):
if 'shape' not in kwargs and 's' not in kwargs:
kwargs['s'] = True
super(Shape, self).setParent(*args, **kwargs)
# class Joint(Transform):
# pass
class Camera(Shape):
__metaclass__ = _factories.MetaMayaNodeWrapper
def applyBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setCamera'] = True
cmds.cameraView(bookmark, **kwargs)
def addBookmark(self, bookmark=None):
kwargs = {}
kwargs['camera'] = self
kwargs['addBookmark'] = True
if bookmark:
kwargs['name'] = bookmark
cmds.cameraView(**kwargs)
def removeBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['removeBookmark'] = True
kwargs['name'] = bookmark
cmds.cameraView(**kwargs)
def updateBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setView'] = True
cmds.cameraView(bookmark, **kwargs)
def listBookmarks(self):
return self.bookmarks.inputs()
@_factories.addMelDocs('dolly')
def dolly(self, distance, relative=True):
kwargs = {}
kwargs['distance'] = distance
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.dolly(self, **kwargs)
@_factories.addMelDocs('roll')
def roll(self, degree, relative=True):
kwargs = {}
kwargs['degree'] = degree
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.roll(self, **kwargs)
# TODO: the functionFactory is causing these methods to have their docs doubled-up, in both pymel.track, and pymel.Camera.track
#dolly = _factories.functionFactory( cmds.dolly )
#roll = _factories.functionFactory( cmds.roll )
orbit = _factories.functionFactory(cmds.orbit)
track = _factories.functionFactory(cmds.track)
tumble = _factories.functionFactory(cmds.tumble)
class Transform(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'rotatePivot': (general.Pivot, 'rotatePivot'),
'scalePivot': (general.Pivot, 'scalePivot')}
# def __getattr__(self, attr):
# try :
# return super(general.PyNode, self).__getattr__(attr)
# except AttributeError, msg:
# try:
# return self.getShape().attr(attr)
# except AttributeError:
# pass
#
# # it doesn't exist on the class
# try:
# return self.attr(attr)
# except MayaAttributeError, msg:
# # try the shape
# try: return self.getShape().attr(attr)
# except AttributeError: pass
# # since we're being called via __getattr__ we don't know whether the user was trying
# # to get a class method or a maya attribute, so we raise a more generic AttributeError
# raise AttributeError, msg
def __getattr__(self, attr):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try:
# print "Transform.__getattr__(%r)" % attr
# Functions through normal inheritance
res = DependNode.__getattr__(self, attr)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
for shape in self.getShapes():
try:
return getattr(shape, attr)
except AttributeError:
pass
raise e
return res
def __setattr__(self, attr, val):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try:
# print "Transform.__setattr__", attr, val
# Functions through normal inheritance
return DependNode.__setattr__(self, attr, val)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
# print "Trying shape"
shape = self.getShape()
if shape:
try:
return setattr(shape, attr, val)
except AttributeError:
pass
raise e
def attr(self, attr, checkShape=True):
"""
when checkShape is enabled, if the attribute does not exist the transform but does on the shape, then the shape's attribute will
be returned.
:rtype: `Attribute`
"""
# print "ATTR: Transform"
try:
res = self._attr(attr, checkShape)
except general.MayaAttributeError, e:
if checkShape:
try:
res = self.getShape().attr(attr)
except AttributeError:
raise e
raise e
return res
# def __getattr__(self, attr):
# if attr.startswith('__') and attr.endswith('__'):
# return super(general.PyNode, self).__getattr__(attr)
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr)
# try:
# if childAttr.exists():
# return childAttr
# except AttributeError:
# return childAttr
# except (AttributeError,TypeError):
# pass
#
# return at
#
# def __setattr__(self, attr,val):
# if attr.startswith('_'):
# attr = attr[1:]
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr )
# try:
# if childAttr.exists():
# return childAttr.set(val)
# except AttributeError:
# return childAttr.set(val)
# except (AttributeError,TypeError):
# pass
#
# return at.set(val)
"""
def move( self, *args, **kwargs ):
return move( self, *args, **kwargs )
def scale( self, *args, **kwargs ):
return scale( self, *args, **kwargs )
def rotate( self, *args, **kwargs ):
return rotate( self, *args, **kwargs )
def align( self, *args, **kwargs):
args = (self,) + args
cmds.align(self, *args, **kwargs)
"""
# NOTE : removed this via proxyClass
# # workaround for conflict with translate method on basestring
# def _getTranslate(self):
# return self.__getattr__("translate")
# def _setTranslate(self, val):
# return self.__setattr__("translate", val)
# translate = property( _getTranslate , _setTranslate )
def getShape(self, **kwargs):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
try:
return self.getChildren(**kwargs)[0]
except IndexError:
pass
def getShapes(self, **kwargs):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
return self.getChildren(**kwargs)
def ungroup(self, **kwargs):
return cmds.ungroup(self, **kwargs)
# @_factories.editflag('xform','scale')
# def setScale( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotation')
# def setRotationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','translation')
# def setTranslationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','scalePivot')
# def setScalePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','rotatePivot')
# def setRotatePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','pivots')
# def setPivots( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotateAxis')
# def setRotateAxisOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','shear')
# def setShearingOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
@_factories.addMelDocs('xform', 'rotateAxis')
def setMatrix(self, val, **kwargs):
"""xform -scale"""
kwargs['matrix'] = val
cmds.xform(self, **kwargs)
# @_factories.queryflag('xform','scale')
# def getScaleOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
def _getSpaceArg(self, space, kwargs):
"for internal use only"
if kwargs.pop('worldSpace', kwargs.pop('ws', False)):
space = 'world'
elif kwargs.pop('objectSpace', kwargs.pop('os', False)):
space = 'object'
return space
def _isRelativeArg(self, kwargs):
isRelative = kwargs.pop('relative', kwargs.pop('r', None))
if isRelative is None:
isRelative = not kwargs.pop('absolute', kwargs.pop('a', True))
return isRelative
# @_factories.queryflag('xform','translation')
# def getTranslationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs(_api.MFnTransform, 'setTranslation')
def setTranslation(self, vector, space='object', **kwargs):
if self._isRelativeArg(kwargs):
return self.translateBy(vector, space, **kwargs)
space = self._getSpaceArg(space, kwargs)
return self._setTranslation(vector, space=space)
@_factories.addApiDocs(_api.MFnTransform, 'getTranslation')
def getTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._getTranslation(space=space)
@_factories.addApiDocs(_api.MFnTransform, 'translateBy')
def translateBy(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
curr = self._getTranslation(space)
self._translateBy(vector, space)
new = self._getTranslation(space)
undoItem = _factories.ApiUndoItem(Transform.setTranslation, (self, new, space), (self, curr, space))
_factories.apiUndo.append(undoItem)
@_factories.addApiDocs(_api.MFnTransform, 'setScale')
def setScale(self, scale, **kwargs):
if self._isRelativeArg(kwargs):
return self.scaleBy(scale, **kwargs)
return self._setScale(scale)
@_factories.addApiDocs(_api.MFnTransform, 'scaleBy')
def scaleBy(self, scale, **kwargs):
curr = self.getScale()
self._scaleBy(scale)
new = self.getScale()
undoItem = _factories.ApiUndoItem(Transform.setScale, (self, new), (self, curr))
_factories.apiUndo.append(undoItem)
@_factories.addApiDocs(_api.MFnTransform, 'setShear')
def setShear(self, shear, **kwargs):
if self._isRelativeArg(kwargs):
return self.shearBy(shear, **kwargs)
return self._setShear(shear)
@_factories.addApiDocs(_api.MFnTransform, 'shearBy')
def shearBy(self, shear, **kwargs):
curr = self.getShear()
self._shearBy(shear)
new = self.getShear()
undoItem = _factories.ApiUndoItem(Transform.setShear, (self, new), (self, curr))
_factories.apiUndo.append(undoItem)
# @_factories.queryflag('xform','rotatePivot')
# def getRotatePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs(_api.MFnTransform, 'setRotatePivot')
def setRotatePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._setRotatePivot(point, space=space, balance=balance)
@_factories.addApiDocs(_api.MFnTransform, 'rotatePivot')
def getRotatePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._getRotatePivot(space=space)
@_factories.addApiDocs(_api.MFnTransform, 'setRotatePivotTranslation')
def setRotatePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._setRotatePivotTranslation(vector, space=space)
@_factories.addApiDocs(_api.MFnTransform, 'rotatePivotTranslation')
def getRotatePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._getRotatePivotTranslation(space=space)
# @_factories.queryflag('xform','rotation')
# def getRotationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs(_api.MFnTransform, 'setRotation')
def setRotation(self, rotation, space='object', **kwargs):
'''
Modifications:
- rotation may be given as an EulerRotation, Quaternion, or iterable of 3
or 4 components (to specify an euler/quaternion, respectively)
'''
# quaternions are the only method that support a space parameter
if self._isRelativeArg(kwargs):
return self.rotateBy(rotation, space, **kwargs)
spaceIndex = datatypes.Spaces.getIndex(self._getSpaceArg(space, kwargs))
if not isinstance(rotation, (_api.MQuaternion, _api.MEulerRotation)):
rotation = list(rotation)
if len(rotation) == 3:
# using datatypes.Angle(x) means current angle-unit should be
# respected
rotation = [datatypes.Angle(x).asRadians() for x in rotation]
rotation = _api.MEulerRotation(*rotation)
elif len(rotation) == 4:
rotation = _api.MQuaternion(*rotation)
else:
raise ValueError("rotation given to setRotation must have either 3 or 4 elements (for euler or quaternion, respectively)")
if isinstance(rotation, _api.MEulerRotation):
# MFnTransform.setRotation doesn't have a (non-deprecated) override
# which takes euler angles AND a transform space... this sort of
# makes sense, since the "unique" information that euler angles can
# potentially carry - ie, rotation > 360 degress - only really makes
# sense within the "transform" space. So, only use EulerRotation if
# we're using transform space...
if datatypes.equivalentSpace(spaceIndex, _api.MSpace.kTransform,
rotationOnly=True):
self.__apimfn__().setRotation(rotation)
return
else:
rotation = rotation.asQuaternion()
self.__apimfn__().setRotation(rotation, spaceIndex)
# @_factories.addApiDocs( _api.MFnTransform, 'getRotation' )
# def getRotation(self, space='object', **kwargs):
# # quaternions are the only method that support a space parameter
# space = self._getSpaceArg(space, kwargs )
# quat = _api.MQuaternion()
# _api.MFnTransform(self.__apimfn__()).getRotation(quat, datatypes.Spaces.getIndex(space) )
# return datatypes.EulerRotation( quat.asEulerRotation() )
@_factories.addApiDocs(_api.MFnTransform, 'getRotation', overloadIndex=1)
def getRotation(self, space='object', quaternion=False, **kwargs):
'''
Modifications:
- added 'quaternion' keyword arg, to specify whether the result
be returned as a Quaternion object, as opposed to the default
EulerRotation object
- added 'space' keyword arg, which defaults to 'object'
'''
# quaternions are the only method that support a space parameter
space = self._getSpaceArg(space, kwargs)
if space.lower() in ('object', 'pretransform', 'transform') and not quaternion:
# In this case, we can just go straight to the EulerRotation,
# without having to go through Quaternion - this means we will
# get information like angles > 360 degrees
euler = _api.MEulerRotation()
self.__apimfn__().getRotation(euler)
rot = datatypes.EulerRotation(euler)
else:
rot = self._getRotation(space=space)
if not quaternion:
rot = rot.asEulerRotation()
if isinstance(rot, datatypes.EulerRotation):
rot.setDisplayUnit(datatypes.Angle.getUIUnit())
return rot
@_factories.addApiDocs(_api.MFnTransform, 'rotateBy')
def rotateBy(self, rotation, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
curr = self.getRotation(space)
self._rotateBy(rotation, space)
new = self.getRotation(space)
undoItem = _factories.ApiUndoItem(Transform.setRotation, (self, new, space), (self, curr, space))
_factories.apiUndo.append(undoItem)
# @_factories.queryflag('xform','scalePivot')
# def getScalePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs(_api.MFnTransform, 'setScalePivot')
def setScalePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._setScalePivot(point, space=space, balance=balance)
@_factories.addApiDocs(_api.MFnTransform, 'scalePivot')
def getScalePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._getScalePivot(space=space)
@_factories.addApiDocs(_api.MFnTransform, 'setScalePivotTranslation')
def setScalePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._setScalePivotTranslation(vector, space=space)
@_factories.addApiDocs(_api.MFnTransform, 'scalePivotTranslation')
def getScalePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs)
return self._getScalePivotTranslation(space=space)
@_factories.queryflag('xform', 'pivots')
def getPivots(self, **kwargs):
res = cmds.xform(self, **kwargs)
return (datatypes.Vector(res[:3]), datatypes.Vector(res[3:]))
@_factories.queryflag('xform', 'rotateAxis')
def getRotateAxis(self, **kwargs):
return datatypes.Vector(cmds.xform(self, **kwargs))
# @_factories.queryflag('xform','shear')
# def getShearOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.queryflag('xform', 'matrix')
def getMatrix(self, **kwargs):
return datatypes.Matrix(cmds.xform(self, **kwargs))
# TODO: create API equivalent of `xform -boundingBoxInvisible` so we can replace this with _api.
def getBoundingBox(self, invisible=False, space='object'):
"""xform -boundingBox and xform -boundingBoxInvisible
:rtype: `BoundingBox`
"""
kwargs = {'query': True}
if invisible:
kwargs['boundingBoxInvisible'] = True
else:
kwargs['boundingBox'] = True
if space == 'object':
kwargs['objectSpace'] = True
elif space == 'world':
kwargs['worldSpace'] = True
else:
raise ValueError('unknown space %r' % space)
res = cmds.xform(self, **kwargs)
# return ( datatypes.Vector(res[:3]), datatypes.Vector(res[3:]) )
return datatypes.BoundingBox(res[:3], res[3:])
def getBoundingBoxMin(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[0]
# return self.getBoundingBox(invisible).min()
def getBoundingBoxMax(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[1]
# return self.getBoundingBox(invisible).max()
# def centerPivots(self, **kwargs):
# """xform -centerPivots"""
# kwargs['centerPivots'] = True
# cmds.xform( self, **kwargs )
#
# def zeroTransformPivots(self, **kwargs):
# """xform -zeroTransformPivots"""
# kwargs['zeroTransformPivots'] = True
# cmds.xform( self, **kwargs )
class Joint(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
connect = _factories.functionFactory(cmds.connectJoint, rename='connect')
disconnect = _factories.functionFactory(cmds.disconnectJoint, rename='disconnect')
insert = _factories.functionFactory(cmds.insertJoint, rename='insert')
if versions.isUnlimited():
class FluidEmitter(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
fluidVoxelInfo = _factories.functionFactory(cmds.fluidVoxelInfo, rename='fluidVoxelInfo')
loadFluid = _factories.functionFactory(cmds.loadFluid, rename='loadFluid')
resampleFluid = _factories.functionFactory(cmds.resampleFluid, rename='resampleFluid')
saveFluid = _factories.functionFactory(cmds.saveFluid, rename='saveFluid')
setFluidAttr = _factories.functionFactory(cmds.setFluidAttr, rename='setFluidAttr')
getFluidAttr = _factories.functionFactory(cmds.getFluidAttr, rename='getFluidAttr')
class RenderLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map(general.PyNode, _util.listForNone(cmds.editRenderLayerMembers(self, q=1, fullNames=True)))
else:
return _util.listForNone(cmds.editRenderLayerMembers(self, q=1, fullNames=False))
def addMembers(self, members, noRecurse=True):
cmds.editRenderLayerMembers(self, members, noRecurse=noRecurse)
def removeMembers(self, members):
cmds.editRenderLayerMembers(self, members, remove=True)
def listAdjustments(self):
return map(general.PyNode, _util.listForNone(cmds.editRenderLayerAdjustment(self, layer=1, q=1)))
def addAdjustments(self, members):
return cmds.editRenderLayerAdjustment(members, layer=self)
def removeAdjustments(self, members):
return cmds.editRenderLayerAdjustment(members, layer=self, remove=True)
def setCurrent(self):
cmds.editRenderLayerGlobals(currentRenderLayer=self)
class DisplayLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map(general.PyNode, _util.listForNone(cmds.editDisplayLayerMembers(self, q=1, fullNames=True)))
else:
return _util.listForNone(cmds.editDisplayLayerMembers(self, q=1, fullNames=False))
def addMembers(self, members, noRecurse=True):
cmds.editDisplayLayerMembers(self, members, noRecurse=noRecurse)
def removeMembers(self, members):
cmds.editDisplayLayerMembers(self, members, remove=True)
def setCurrent(self):
cmds.editDisplayLayerMembers(currentDisplayLayer=self)
class Constraint(Transform):
def setWeight(self, weight, *targetObjects):
inFunc = getattr(cmds, self.type())
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc(*args, **{'edit': True, 'weight': weight})
def getWeight(self, *targetObjects):
inFunc = getattr(cmds, self.type())
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc(*args, **{'query': True, 'weight': True})
class GeometryShape(Shape):
def __getattr__(self, attr):
# print "Mesh.__getattr__", attr
try:
return self.comp(attr)
except general.MayaComponentError:
# print "getting super", attr
return super(GeometryShape, self).__getattr__(attr)
class DeformableShape(GeometryShape):
@classmethod
def _numCVsFunc_generator(cls, formFunc, spansPlusDegreeFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable CVs,
as opposed to just numSpans + degree.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc/etc will not be defined
until then, as they are added by the metaclass.
"""
def _numCvs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansPlusDegreeFunc(self)
if name:
_numCvs_generatedFunc.__name__ = name
if doc:
_numCvs_generatedFunc.__doc__ = doc
return _numCvs_generatedFunc
@classmethod
def _numEPsFunc_generator(cls, formFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable EPs,
as opposed to just numSpans.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc will not be defined
until then, as they are added by the metaclass.
"""
def _numEPs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansFunc(self) + 1
if name:
_numEPs_generatedFunc.__name__ = name
if doc:
_numEPs_generatedFunc.__doc__ = doc
return _numEPs_generatedFunc
class ControlPoint(DeformableShape):
pass
class CurveShape(DeformableShape):
pass
class NurbsCurve(CurveShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u': general.NurbsCurveParameter,
'cv': general.NurbsCurveCV,
'controlVerts': general.NurbsCurveCV,
'ep': general.NurbsCurveEP,
'editPoints': general.NurbsCurveEP,
'knot': general.NurbsCurveKnot,
'knots': general.NurbsCurveKnot}
# apiToMelBridge maps MFnNurbsCurve.numCVs => NurbsCurve._numCVsApi
NurbsCurve.numCVs = \
NurbsCurve._numCVsFunc_generator(NurbsCurve.form,
NurbsCurve._numCVsApi,
NurbsCurve.numSpans,
name='numCVs',
doc="""
Returns the number of CVs.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for cv's - ie, if
myCurve.numCVs(editableOnly=True) == 4
then allowable cv indices go from
myCurve.cv[0] to mySurf.cv[3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve -
degree + numSpans.
These will only differ if the form is 'periodic', in which
case the editable number will be numSpans (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degree + numSpans.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve1', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'periodicCurveShape1.cv[0:7]')
>>> myCurve.numCVs()
8
>>> myCurve.numCVs(editableOnly=False)
11
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve1', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'openCurveShape1.cv[0:10]')
>>> myCurve.numCVs()
11
>>> myCurve.numCVs(editableOnly=False)
11
:rtype: `int`
""")
NurbsCurve.numEPs = \
NurbsCurve._numEPsFunc_generator(NurbsCurve.form,
NurbsCurve.numSpans,
name='numEPs',
doc="""
Returns the number of EPs.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve2', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'periodicCurveShape2.ep[0:7]')
>>> myCurve.numEPs()
8
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve2', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'openCurveShape2.ep[0:8]')
>>> myCurve.numEPs()
9
:rtype: `int`
""")
class SurfaceShape(ControlPoint):
pass
class NurbsSurface(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u': (general.NurbsSurfaceRange, 'u'),
'uIsoparm': (general.NurbsSurfaceRange, 'u'),
'v': (general.NurbsSurfaceRange, 'v'),
'vIsoparm': (general.NurbsSurfaceRange, 'v'),
'uv': (general.NurbsSurfaceRange, 'uv'),
'cv': general.NurbsSurfaceCV,
'controlVerts': general.NurbsSurfaceCV,
'ep': general.NurbsSurfaceEP,
'editPoints': general.NurbsSurfaceEP,
'knot': general.NurbsSurfaceKnot,
'knots': general.NurbsSurfaceKnot,
'sf': general.NurbsSurfaceFace,
'faces': general.NurbsSurfaceFace}
# apiToMelBridge maps MFnNurbsCurve._numCVsInU => NurbsCurve._numCVsInUApi
NurbsSurface.numCVsInU = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInU,
NurbsSurface._numCVsInUApi,
NurbsSurface.numSpansInU,
name='numCVsInU',
doc="""
Returns the number of CVs in the U direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for u - ie, if
mySurf.numCVsInU(editableOnly=True) == 4
then allowable u indices go from
mySurf.cv[0][*] to mySurf.cv[3][*]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in u -
degreeU + numSpansInU.
These will only differ if the form in u is 'periodic', in which
case the editable number will be numSpansInU (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeU + numSpansInU.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf1', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numCVsInU()
8
>>> mySurf.numCVsInU(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf1', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=((4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)) )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((10, 0), label=None)]
>>> mySurf.numCVsInU()
11
>>> mySurf.numCVsInU(editableOnly=False)
11
:rtype: `int`
""")
# apiToMelBridge maps MFnNurbsCurve._numCVsInV => NurbsCurve._numCVsInVApi
NurbsSurface.numCVsInV = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInV,
NurbsSurface._numCVsInVApi,
NurbsSurface.numSpansInV,
name='numCVsInV',
doc="""
Returns the number of CVs in the V direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for v - ie, if
mySurf.numCVsInV(editableOnly=True) == 4
then allowable v indices go from
mySurf.cv[*][0] to mySurf.cv[*][3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in v -
degreeV + numSpansInV.
These will only differ if the form in v is 'periodic', in which
case the editable number will be numSpansInV (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeV + numSpansInV.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf2', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label='cv')]
>>> mySurf.numCVsInV()
8
>>> mySurf.numCVsInV(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf2', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 10), label='cv')]
>>> mySurf.numCVsInV()
11
>>> mySurf.numCVsInV(editableOnly=False)
11
:rtype: `int`
""")
NurbsSurface.numEPsInU = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInU,
NurbsSurface.numSpansInU,
name='numEPsInU',
doc="""
Returns the number of EPs in the U direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf3', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numEPsInU()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf3', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((8, 0), label=None)]
>>> mySurf.numEPsInU()
9
:rtype: `int`
""")
NurbsSurface.numEPsInV = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInV,
NurbsSurface.numSpansInV,
name='numEPsInV',
doc="""
Returns the number of EPs in the V direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf4', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label=None)]
>>> mySurf.numEPsInV()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf4', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 8), label=None)]
>>> mySurf.numEPsInV()
9
:rtype: `int`
""")
class Mesh(SurfaceShape):
"""
The Mesh class provides wrapped access to many API methods for querying and modifying meshes. Be aware that
modifying meshes using API commands outside of the context of a plugin is still somewhat uncharted territory,
so proceed at our own risk.
The component types can be accessed from the `Mesh` type (or it's transform) using the names you are
familiar with from MEL:
>>> from pymel.core import *
>>> p = polySphere( name='theMoon', sa=7, sh=7 )[0]
>>> p.vtx
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.e
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.f
MeshFace(u'theMoonShape.f[0:48]')
They are also accessible from their more descriptive alternatives:
>>> p.verts
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.edges
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.faces
MeshFace(u'theMoonShape.f[0:48]')
As you'd expect, these components are all indexible:
>>> p.vtx[0]
MeshVertex(u'theMoonShape.vtx[0]')
The classes themselves contain methods for getting information about the component.
>>> p.vtx[0].connectedEdges()
MeshEdge(u'theMoonShape.e[0,6,42,77]')
This class provides support for python's extended slice notation. Typical maya ranges express a start and stop value separated
by a colon. Extended slices add a step parameter and can also represent multiple ranges separated by commas.
Thus, a single component object can represent any collection of indices.
This includes start, stop, and step values.
>>> # do every other edge between 0 and 10
>>> for edge in p.e[0:10:2]:
... print edge
...
theMoonShape.e[0]
theMoonShape.e[2]
theMoonShape.e[4]
theMoonShape.e[6]
theMoonShape.e[8]
theMoonShape.e[10]
Negative indices can be used for getting indices relative to the end:
>>> p.edges # the full range
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.edges[5:-10] # index 5 through to 10 from the last
MeshEdge(u'theMoonShape.e[5:80]')
Just like with python ranges, you can leave an index out, and the logical result will follow:
>>> p.edges[:-10] # from the beginning
MeshEdge(u'theMoonShape.e[0:80]')
>>> p.edges[20:]
MeshEdge(u'theMoonShape.e[20:90]')
Or maybe you want the position of every tenth vert:
>>> for x in p.vtx[::10]:
... print x, x.getPosition()
...
theMoonShape.vtx[0] [0.270522117615, -0.900968849659, -0.339223951101]
theMoonShape.vtx[10] [-0.704405844212, -0.623489797115, 0.339223951101]
theMoonShape.vtx[20] [0.974927902222, -0.222520858049, 0.0]
theMoonShape.vtx[30] [-0.704405784607, 0.623489797115, -0.339224010706]
theMoonShape.vtx[40] [0.270522087812, 0.900968849659, 0.339223980904]
To be compatible with Maya's range notation, these slices are inclusive of the stop index.
>>> # face at index 8 will be included in the sequence
>>> for f in p.f[4:8]: print f
...
theMoonShape.f[4]
theMoonShape.f[5]
theMoonShape.f[6]
theMoonShape.f[7]
theMoonShape.f[8]
>>> from pymel.core import *
>>> obj = polyTorus()[0]
>>> colors = []
>>> for i, vtx in enumerate(obj.vtx): # doctest: +SKIP
... edgs=vtx.toEdges() # doctest: +SKIP
... totalLen=0 # doctest: +SKIP
... edgCnt=0 # doctest: +SKIP
... for edg in edgs: # doctest: +SKIP
... edgCnt += 1 # doctest: +SKIP
... l = edg.getLength() # doctest: +SKIP
... totalLen += l # doctest: +SKIP
... avgLen=totalLen / edgCnt # doctest: +SKIP
... #print avgLen # doctest: +SKIP
... currColor = vtx.getColor(0) # doctest: +SKIP
... color = datatypes.Color.black # doctest: +SKIP
... # only set blue if it has not been set before
... if currColor.b<=0.0: # doctest: +SKIP
... color.b = avgLen # doctest: +SKIP
... color.r = avgLen # doctest: +SKIP
... colors.append(color) # doctest: +SKIP
"""
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# SurfaceShape.__init__(self, self._apiobject )
# self.vtx = MeshEdge(self.__apimobject__() )
_componentAttributes = {'vtx': general.MeshVertex,
'verts': general.MeshVertex,
'e': general.MeshEdge,
'edges': general.MeshEdge,
'f': general.MeshFace,
'faces': general.MeshFace,
'map': general.MeshUV,
'uvs': general.MeshUV,
'vtxFace': general.MeshVertexFace,
'faceVerts': general.MeshVertexFace}
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
numVertices = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'vertex', 'numVertices')
numEdges = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'edge', 'numEdges')
numFaces = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'face', 'numFaces')
numTriangles = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'triangle', 'numTriangles')
numSelectedTriangles = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'triangleComponent', 'numSelectedTriangles')
numSelectedFaces = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'faceComponent', 'numSelectedFaces')
numSelectedEdges = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'edgeComponent', 'numSelectedEdges')
numSelectedVertices = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'vertexComponent', 'numSelectedVertices')
area = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'area')
worldArea = _factories.makeCreateFlagMethod(cmds.polyEvaluate, 'worldArea')
if versions.current() >= versions.v2016:
@_factories.addApiDocs(_api.MFnMesh, 'getUVAtPoint')
def getUVAtPoint(self, uvPoint, space=_api.MSpace.kObject, uvSet=None, returnClosestPolygon=False):
result = self._getUVAtPoint(uvPoint, space, uvSet)
if returnClosestPolygon:
return result
return result[0]
if versions.current() >= versions.v2009:
@_factories.addApiDocs(_api.MFnMesh, 'currentUVSetName')
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName(self.instanceNumber())
@_factories.addApiDocs(_api.MFnMesh, 'currentColorSetName')
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName(self.instanceNumber())
else:
@_factories.addApiDocs(_api.MFnMesh, 'currentUVSetName')
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName()
@_factories.addApiDocs(_api.MFnMesh, 'currentColorSetName')
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName()
@_factories.addApiDocs(_api.MFnMesh, 'numColors')
def numColors(self, colorSet=None):
mfn = self.__apimfn__()
# If we have an empty mesh, we will get an MFnDagNode...
if not isinstance(mfn, _api.MFnMesh):
return 0
args = []
if colorSet:
args.append(colorSet)
return mfn.numColors(*args)
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
def _makeApiMethodWrapForEmptyMesh(apiMethodName, baseMethodName=None,
resultName=None, defaultVal=0):
if baseMethodName is None:
baseMethodName = '_' + apiMethodName
if resultName is None:
resultName = apiMethodName
baseMethod = getattr(Mesh, baseMethodName)
@_factories.addApiDocs(_api.MFnMesh, apiMethodName)
def methodWrapForEmptyMesh(self, *args, **kwargs):
# If we have an empty mesh, we will get an MFnDagNode...
mfn = self.__apimfn__()
if not isinstance(mfn, _api.MFnMesh):
return defaultVal
return baseMethod(self, *args, **kwargs)
methodWrapForEmptyMesh.__name__ = resultName
return methodWrapForEmptyMesh
for _apiMethodName in '''numColorSets
numFaceVertices
numNormals
numUVSets
numUVs'''.split():
_wrappedFunc = _makeApiMethodWrapForEmptyMesh(_apiMethodName)
setattr(Mesh, _wrappedFunc.__name__, _wrappedFunc)
class Subdiv(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'smp': general.SubdVertex,
'verts': general.SubdVertex,
'sme': general.SubdEdge,
'edges': general.SubdEdge,
'smf': general.SubdFace,
'faces': general.SubdFace,
'smm': general.SubdUV,
'uvs': general.SubdUV}
def getTweakedVerts(self, **kwargs):
return cmds.querySubdiv(action=1, **kwargs)
def getSharpenedVerts(self, **kwargs):
return cmds.querySubdiv(action=2, **kwargs)
def getSharpenedEdges(self, **kwargs):
return cmds.querySubdiv(action=3, **kwargs)
def getEdges(self, **kwargs):
return cmds.querySubdiv(action=4, **kwargs)
def cleanTopology(self):
cmds.subdCleanTopology(self)
class Lattice(ControlPoint):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt': general.LatticePoint,
'points': general.LatticePoint}
class Particle(DeformableShape):
__apicls__ = _api.MFnParticleSystem
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt': general.ParticleComponent,
'points': general.ParticleComponent}
# for backwards compatibility
Point = general.ParticleComponent
# for backwards compatibility, keep these two, even though the api wrap
# will also provide 'count'
def pointCount(self):
return cmds.particle(self, q=1, count=1)
num = pointCount
class SelectionSet(_api.MSelectionList):
apicls = _api.MSelectionList
__metaclass__ = _factories.MetaMayaTypeWrapper
def __init__(self, objs):
""" can be initialized from a list of objects, another SelectionSet, an MSelectionList, or an ObjectSet"""
if isinstance(objs, _api.MSelectionList):
_api.MSelectionList.__init__(self, objs)
elif isinstance(objs, ObjectSet):
_api.MSelectionList.__init__(self, objs.asSelectionSet())
else:
_api.MSelectionList.__init__(self)
for obj in objs:
if isinstance(obj, (DependNode, DagNode)):
self.apicls.add(self, obj.__apiobject__())
elif isinstance(obj, general.Attribute):
self.apicls.add(self, obj.__apiobject__(), True)
# elif isinstance(obj, Component):
# sel.add( obj.__apiobject__(), True )
elif isinstance(obj, basestring):
self.apicls.add(self, obj)
else:
raise TypeError
def __melobject__(self):
# If the list contains components, THEIR __melobject__ is a list -
# so need to iterate through, and flatten if needed
melList = []
for selItem in self:
selItem = selItem.__melobject__()
if _util.isIterable(selItem):
melList.extend(selItem)
else:
melList.append(selItem)
return melList
def __len__(self):
""":rtype: `int` """
return self.apicls.length(self)
def __contains__(self, item):
""":rtype: `bool` """
if isinstance(item, (DependNode, DagNode, general.Attribute)):
return self.apicls.hasItem(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.hasItem(self, general.PyNode(item).__apiobject__())
def __repr__(self):
""":rtype: `str` """
names = []
self.apicls.getSelectionStrings(self, names)
return 'nt.%s(%s)' % (self.__class__.__name__, names)
def __getitem__(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
plug = _api.MPlug()
obj = _api.MObject()
dag = _api.MDagPath()
comp = _api.MObject()
# Go from most specific to least - plug, dagPath, dependNode
try:
self.apicls.getPlug(self, index, plug)
assert not plug.isNull()
except (RuntimeError, AssertionError):
try:
self.apicls.getDagPath(self, index, dag, comp)
except RuntimeError:
try:
self.apicls.getDependNode(self, index, obj)
return general.PyNode(obj)
except:
pass
else:
if comp.isNull():
return general.PyNode(dag)
else:
return general.PyNode(dag, comp)
else:
return general.PyNode(plug)
def __setitem__(self, index, item):
if isinstance(item, (DependNode, DagNode, general.Attribute)):
return self.apicls.replace(self, index, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.replace(self, general.PyNode(item).__apiobject__())
def __and__(self, s):
"operator for `SelectionSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `SelectionSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `SelectionSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `SelectionSet.union`"
return self.union(s)
def __lt__(self, s):
"operator for `SelectionSet.isSubSet`"
return self.isSubSet(s)
def __gt__(self, s):
"operator for `SelectionSet.isSuperSet`"
return self.isSuperSet(s)
def __sub__(self, s):
"operator for `SelectionSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `SelectionSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.symmetricDifference(s)
def add(self, item):
if isinstance(item, (DependNode, DagNode, general.Attribute)):
return self.apicls.add(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.add(self, general.PyNode(item).__apiobject__())
def pop(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
return self.apicls.remove(self, index)
def isSubSet(self, other):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issubset(other)
def isSuperSet(self, other, flatten=True):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issuperset(other)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
# diff = self-other
# intersect = self-diff
diff = self.getDifference(other)
return self.getDifference(diff)
def intersection(self, other):
diff = self.getDifference(other)
self.difference(diff)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet(self)
newSet.difference(other)
return newSet
def difference(self, other):
if not isinstance(other, _api.MSelectionList):
other = SelectionSet(other)
self.apicls.merge(self, other, _api.MSelectionList.kRemoveFromList)
def getUnion(self, other):
""":rtype: `SelectionSet`"""
newSet = SelectionSet(self)
newSet.union(other)
return newSet
def union(self, other):
if not isinstance(other, _api.MSelectionList):
other = SelectionSet(other)
self.apicls.merge(self, other, _api.MSelectionList.kMergeNormal)
def getSymmetricDifference(self, other):
"""
Also known as XOR
:rtype: `SelectionSet`
"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet(self)
newSet.symmetricDifference(other)
return newSet
def symmetricDifference(self, other):
if not isinstance(other, _api.MSelectionList):
other = SelectionSet(other)
# FIXME: does kXOR exist? completion says only kXORWithList exists
self.apicls.merge(self, other, _api.MSelectionList.kXOR)
def asObjectSet(self):
return general.sets(self)
# def intersect(self, other):
# self.apicls.merge( other, _api.MSelectionList.kXORWithList )
class ObjectSet(Entity):
"""
The ObjectSet class and `SelectionSet` class work together. Both classes have a very similar interface,
the primary difference is that the ObjectSet class represents connections to an objectSet node, while the
`SelectionSet` class is a generic set, akin to pythons built-in `set`.
create some sets:
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = sets() # create an empty set
>>> s.union( ls( type='camera') ) # add some cameras to it
>>> s.members() # doctest: +SKIP
[nt.Camera(u'sideShape'), nt.Camera(u'frontShape'), nt.Camera(u'topShape'), nt.Camera(u'perspShape')]
>>> sel = s.asSelectionSet() # or as a SelectionSet
>>> sel # doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape', u'perspShape'])
>>> sorted(sel) # as a sorted list
[nt.Camera(u'frontShape'), nt.Camera(u'perspShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
Operations between sets result in `SelectionSet` objects:
>>> t = sets() # create another set
>>> t.add( 'perspShape' ) # add the persp camera shape to it
>>> s.getIntersection(t)
nt.SelectionSet([u'perspShape'])
>>> diff = s.getDifference(t)
>>> diff #doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape'])
>>> sorted(diff)
[nt.Camera(u'frontShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
>>> s.isSuperSet(t)
True
"""
# >>> u = sets( s&t ) # intersection
# >>> print u.elements(), s.elements()
# >>> if u < s: print "%s is a sub-set of %s" % (u, s)
#
# place a set inside another, take1
#
# >>> # like python's built-in set, the add command expects a single element
# >>> s.add( t )
#
# place a set inside another, take2
#
# >>> # like python's built-in set, the update command expects a set or a list
# >>> t.update([u])
#
# >>> # put the sets back where they were
# >>> s.remove(t)
# >>> t.remove(u)
#
# now put the **contents** of a set into another set
#
# >>> t.update(u)
#
# mixed operation between pymel.core.ObjectSet and built-in set
#
# >>> v = set(['polyCube3', 'pSphere3'])
# >>> print s.intersection(v)
# >>> print v.intersection(s) # not supported yet
# >>> u.clear()
#
# >>> delete( s )
# >>> delete( t )
# >>> delete( u )
#
#
# these will return the results of the operation as python sets containing lists of pymel node classes::
#
# s&t # s.intersection(t)
# s|t # s.union(t)
# s^t # s.symmetric_difference(t)
# s-t # s.difference(t)
#
# the following will alter the contents of the maya set::
#
# s&=t # s.intersection_update(t)
# s|=t # s.update(t)
# s^=t # s.symmetric_difference_update(t)
# s-=t # s.difference_update(t)
#
# def _elements(self):
# """ used internally to get a list of elements without casting to node classes"""
# return sets( self, q=True)
# #-----------------------
# # Maya Methods
# #-----------------------
__metaclass__ = _factories.MetaMayaNodeWrapper
#-----------------------
# Python ObjectSet Methods
#-----------------------
@classmethod
def _getApiObjs(cls, item, tryCast=True):
"""
Returns a tuple of api objects suitable (after unpacking) for
feeding to most of the MFnSet methods (ie, remove, isMember, etc)
"""
if isinstance(item, DagNode):
return (item.__apimdagpath__(), _api.MObject())
elif isinstance(item, (DependNode, general.Attribute)):
return (item.__apiobject__(), )
elif isinstance(item, general.Component):
return (item.__apimdagpath__(), item.__apimobject__())
elif tryCast:
return cls._getApiObjs(general.PyNode(item), tryCast=False)
else:
raise TypeError(item)
def __contains__(self, item):
""":rtype: `bool` """
return self.__apimfn__().isMember(*self._getApiObjs(item))
def __getitem__(self, index):
return self.asSelectionSet()[index]
def __len__(self):
""":rtype: `int`"""
return cmds.sets(self, q=1, size=1)
# def __eq__(self, s):
# return s == self._elements()
# def __ne__(self, s):
# return s != self._elements()
def __and__(self, s):
"operator for `ObjectSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `ObjectSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `ObjectSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `ObjectSet.union`"
return self.union(s)
# def __lt__(self, s):
# "operator for `ObjectSet.isSubSet`"
# return self.isSubSet(s)
#
# def __gt__(self, s):
# "operator for `ObjectSet.isSuperSet`"
# return self.isSuperSet(s)
def __sub__(self, s):
"operator for `ObjectSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `ObjectSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.symmetricDifference(s)
#
# def subtract(self, set2):
# return sets( self, subtract=set2 )
#
# def add(self, element):
# return sets( self, add=[element] )
#
# def clear(self):
# return sets( self, clear=True )
#
# def copy(self ):
# return sets( self, copy=True )
#
# def difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return list(set(self.elements()).difference(elements))
#
# '''
# if isinstance(s, ObjectSet) or isinstance(s, str):
# return sets( s, subtract=self )
#
# s = sets( s )
# res = sets( s, subtract=self )
# cmds.delete(s)
# return res'''
#
# def difference_update(self, elements ):
# return sets( self, remove=elements)
#
# def discard( self, element ):
# try:
# return self.remove(element)
# except TypeError:
# pass
#
# def intersection(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).intersection(elements)
#
# def intersection_update(self, elements):
# self.clear()
# sets( self, add=self.intersections(elements) )
#
#
# def remove( self, element ):
# return sets( self, remove=[element])
#
# def symmetric_difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).symmetric_difference(elements)
#
# def union( self, elements ):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).union(elements)
#
# def update( self, set2 ):
# sets( self, forceElement=set2 )
def forceElement(self, member):
"""Forces addition of the items to the set. If the items are in
another set which is in the same partition as the given set,
the items will be removed from the other set in order to keep the
sets in the partition mutually exclusive with respect to membership."""
cmds.sets(member, forceElement=self)
def members(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list(self.asSelectionSet(flatten))
@_warnings.deprecated('Use ObjectSet.members instead', 'ObjectSet')
def elements(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list(self.asSelectionSet(flatten))
def flattened(self):
"""return a flattened list of members. equivalent to `ObjectSet.members(flatten=True)`
:rtype: `list`
"""
return self.members(flatten=True)
def resetTo(self, newContents):
"""clear and set the members to the passed list/set"""
self.clear()
self.addMembers(newContents)
def add(self, item):
return self.__apimfn__().addMember(*self._getApiObjs(item))
def remove(self, item):
try:
return self.__apimfn__().removeMember(*self._getApiObjs(item))
except RuntimeError:
# Provide a more informative error if object is not in set
if item not in self:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise ValueError("%s not in set %r" % (itemStr, self))
else:
raise
def isSubSet(self, other):
""":rtype: `bool`"""
return self.asSelectionSet().isSubSet(other)
def isSuperSet(self, other):
""":rtype: `bool`"""
return self.asSelectionSet().isSuperSet(other)
def isEqual(self, other):
"""
do not use __eq__ to test equality of set contents. __eq__ will only tell you if
the passed object is the same node, not if this set and the passed set
have the same contents.
:rtype: `bool`
"""
return self.asSelectionSet() == SelectionSet(other)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
sel = self.asSelectionSet()
sel.difference(other)
return sel
def difference(self, other):
sel = self.getDifference(other)
self.resetTo(sel)
def getSymmetricDifference(self, other):
"""also known as XOR
:rtype: `SelectionSet`
"""
sel = self.getSymmetricDifference()
sel.difference(other)
return sel
def symmetricDifference(self, other):
sel = self.symmetricDifference(other)
self.resetTo(sel)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getIntersection(other)
# elif isinstance(other, SelectionSet) or hasattr(other, '__iter__'):
selSet = self.asSelectionSet()
selSet.intersection(other)
return selSet
#raise TypeError, 'Cannot perform intersection with non-iterable type %s' % type(other)
def intersection(self, other):
sel = self.getIntersection(other)
self.resetTo(sel)
def getUnion(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getUnion(other)
selSet = self.asSelectionSet()
selSet.union(other)
return selSet
def union(self, other):
self.addMembers(other)
def isRenderable(self):
'''Mimics cmds.sets(self, q=True, renderable=True).
Alternatively you can use isinstance(someset, pm.nt.ShadingEngine)
since shadingEngine is the only renderable set in maya now
'''
return bool(cmds.sets(self, q=True, r=True))
class ShadingEngine(ObjectSet):
@classmethod
def _getApiObjs(cls, item, tryCast=True):
# Since shading groups can't contain transforms, as a convenience,
# use getShape on any transforms
if isinstance(item, Transform):
shape = item.getShape()
if shape:
return cls._getApiObjs(shape)
else:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise TypeError("%s has no shape, and %s objects cannot contain Transforms" % (itemStr, cls.__name__))
else:
return super(ShadingEngine, cls)._getApiObjs(item, tryCast=tryCast)
class AnimLayer(ObjectSet):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getAttribute(self):
'''Retrieve the attributes animated on this AnimLayer
'''
# Unfortunately, cmds.animLayer('MyAnimLayer', q=1, attribute=1)
# returns none unique attribute names, ie,
# MyNode.myAttr
# even if there are foo|MyNode and bar|MyNode in the scene, and there
# doesn't seem to be a flag to tell it to give unique / full paths.
# Therefore, query it ourselves, by gettin inputs to dagSetMembers.
# Testing has shown that animLayers only use dagSetMembers, and never
# dnSetMembers - if you add a non-dag node to an animLayer, it makes
# a connection to dagSetMembers; and even if you manually make a connection
# to dnSetMembers, those connections don't seem to show up in
# animLayer(q=1, attribute=1)
return self.attr('dagSetMembers').inputs(plugs=1)
getAttributes = getAttribute
class AnimCurve(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def addKeys(self, time, values, tangentInType='linear', tangentOutType='linear', unit=None):
if not unit:
unit = _api.MTime.uiUnit()
times = _api.MTimeArray()
for frame in time:
times.append(_api.MTime(frame, unit))
keys = _api.MDoubleArray()
for value in values:
keys.append(value)
return self.__apimfn__().addKeys(times, keys,
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent' + tangentInType.capitalize()),
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent' + tangentOutType.capitalize()))
def numKeyframes(self):
# just because MFnAnimCurve.numKeyframes is deprecated...
return self.numKeys()
class GeometryFilter(DependNode):
pass
class SkinCluster(GeometryFilter):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getWeights(self, geometry, influenceIndex=None):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance(geometry, Transform):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject(geometry.__apimdagpath__())
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if influenceIndex is not None:
weights = _api.MDoubleArray()
self.__apimfn__().getWeights(geometry.__apimdagpath__(), components, influenceIndex, weights)
return iter(weights)
else:
weights = _api.MDoubleArray()
index = _api.SafeApiPtr('uint')
self.__apimfn__().getWeights(geometry.__apimdagpath__(), components, weights, index())
index = index.get()
args = [iter(weights)] * index
return itertools.izip(*args)
def setWeights(self, geometry, influnces, weights, normalize=True):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance(geometry, Transform):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject(geometry.__apimdagpath__())
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if not isinstance(influnces, _api.MIntArray):
api_influnces = _api.MIntArray()
for influnce in influnces:
api_influnces.append(influnce)
influnces = api_influnces
if not isinstance(weights, _api.MDoubleArray):
api_weights = _api.MDoubleArray()
for weight in weights:
api_weights.append(weight)
weights = api_weights
old_weights = _api.MDoubleArray()
su = _api.MScriptUtil()
su.createFromInt(0)
index = su.asUintPtr()
self.__apimfn__().getWeights(geometry.__apimdagpath__(), components, old_weights, index)
return self.__apimfn__().setWeights(geometry.__apimdagpath__(), components, influnces, weights, normalize, old_weights)
@_factories.addApiDocs(_api.MFnSkinCluster, 'influenceObjects')
def influenceObjects(self):
return self._influenceObjects()[1]
def numInfluenceObjects(self):
return self._influenceObjects()[0]
# TODO: if nucleus/symmetryConstraint bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of Nucleus's parent to DependNode
# - remove 2 checks in allapi.toApiObject for objects which can have an MDagPath
# but can't use MFnDagNode
if _apicache.NUCLEUS_MFNDAG_BUG:
# nucleus has a weird bug where, even though it inherits from transform, and
# can be parented in the dag, etc, you can't create an MFnTransform or
# MFnDagNode for it... therefore, hardcode it's PyNode to inherit from
# DependNode
class Nucleus(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
if _apicache.SYMMETRY_CONSTRAINT_MFNDAG_BUG:
class SymmetryConstraint(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
# TODO: if hikHandle bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of HikHandle's parent to Transform
class HikHandle(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
class JointFfd(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
class TransferAttributes(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_factories.ApiTypeRegister.register('MSelectionList', SelectionSet)
class NodetypesLazyLoadModule(_util.LazyLoadModule):
'''Like a standard lazy load module, but with dynamic PyNode class creation
'''
_checkedForNewReservedTypes = False
@classmethod
def _unwrappedNodeTypes(cls):
# get node types, but avoid checking inheritance for all nodes for
# speed. Note that this means we're potentially missing some abstract
# edge cases - TadskAssetInstanceNode_TdependNode type stuff, that only
# shows up in inheritance hierarchies - but I can't see people wanting
# to access those directly from nodetypes anyway, so I'm judging that
# an acceptable risk
allNodes = _apicache._getAllMayaTypes(addAncestors=False,
noManips='fast')
unknownNodes = allNodes - set(mayaTypeNameToPymelTypeName)
if unknownNodes:
# first, check for any new abstract node types - this can happen
# if, for instance, we have an "extension" release of maya,
# which introduces new nodes (and abstract nodes), but the caches
# were built with the "base" release
# we do these first because they can't be queried by creating nodes,
# and for derived nodes, we may be able to get by just using the
# type for the abstract node...
if not cls._checkedForNewReservedTypes:
cls._checkedForNewReservedTypes = True
# this should build mayaTypesToApiTypes and mayaTypesToApiEnums
# for all reserved types...
cache = _apicache.ApiCache()
cache._buildMayaToApiInfo(reservedOnly=True)
# and update the cache in use with these results...
# ...now update with any that were missing...
for mayaType, apiType in cache.mayaTypesToApiTypes.iteritems():
if mayaType not in _factories._apiCacheInst.mayaTypesToApiTypes:
_factories._apiCacheInst.mayaTypesToApiTypes[mayaType] = apiType
_factories._apiCacheInst.mayaTypesToApiEnums[mayaType] = cache.mayaTypesToApiEnums[mayaType]
return unknownNodes - set(mayaTypeNameToPymelTypeName)
def __getattr__(self, name):
'''Check to see if the name corresponds to a PyNode that hasn't been
added yet'''
# In the normal course of operation, this __getattr__ shouldn't be
# needed - PyNodes corresponding to maya node types should be created
# when pymel starts up, or when a plugin loads.
# However, there are some unusual sitations that can arise where new
# node types are missed... because a plugin can actually register new
# nodes at any time, not just during it's initialization!
#
# This happened with mtoa - if you have an mtoa extension, which adds
# some maya nodes, but requires another maya plugin... those nodes
# will not be added until that other maya plugin is loaded... but
# the nodes will be added to mtoa, NOT the plugin that triggered
# the plugin-loaded callback. Also, the node adding happens within
# ANOTHER plugin-loaded callback, which generally runs AFTER pymel's
# plugin-loaded callback!
uncapName = _util.uncapitalize(name)
if uncapName in self._unwrappedNodeTypes():
# it's a maya node we haven't wrapped yet! Wrap it and return!
import pymel.core
mayaType = uncapName
# See if it's a plugin node...
nodeClass = _api.MNodeClass(mayaType)
try:
pluginPath = nodeClass.pluginName()
plugin = cmds.pluginInfo(pluginPath, q=1, name=1)
except RuntimeError:
# if we can't find a plugin
pyNodeName =_factories.addCustomPyNode(self, mayaType,
immediate=True)
else:
pyNodeName = pymel.core._addPluginNode(plugin, mayaType,
immediate=True)
if pyNodeName != name:
_logger.raiseLog(_logger.WARNING,
"dynamically added node when %r requested, but"
" returned PyNode had name %r" % (
name, pyNodeName))
return self.__dict__[pyNodeName]
raise AttributeError(name)
def _createPyNodes():
dynModule = NodetypesLazyLoadModule(__name__, globals())
for mayaType, parents, children in _factories.nodeHierarchy:
if mayaType == 'dependNode':
# This seems like the more 'correct' way of doing it - only node types
# that are currently available have PyNodes created for them - but
# changing it so some PyNodes are no longer available until their
# plugin is loaded may create backwards incompatibility issues...
# if (mayaType == 'dependNode'
# or mayaType not in _factories.mayaTypesToApiTypes):
continue
parentMayaType = parents[0]
# print "superNodeType: ", superNodeType, type(superNodeType)
if parentMayaType is None:
_logger.warning("could not find parent node: %s", mayaType)
continue
#className = _util.capitalize(mayaType)
#if className not in __all__: __all__.append( className )
if _factories.isMayaType(mayaType):
_factories.addPyNode(dynModule, mayaType, parentMayaType)
sys.modules[__name__] = dynModule
# Initialize Pymel classes to API types lookup
#_startTime = time.time()
_createPyNodes()
#_logger.debug( "Initialized Pymel PyNodes types list in %.2f sec" % time.time() - _startTime )
dynModule = sys.modules[__name__]
# def listToMSelection( objs ):
# sel = _api.MSelectionList()
# for obj in objs:
# if isinstance(obj, DependNode):
# sel.add( obj.__apiobject__() )
# elif isinstance(obj, Attribute):
# sel.add( obj.__apiobject__(), True )
# elif isinstance(obj, Component):
# pass
# #sel.add( obj.__apiobject__(), True )
# else:
# raise TypeError
|
py | 7dfdb3c2a24037edc0302d6f886a3751cc80c771 | """
Fingerbank tools.
Usage:
>>> import fingerbank
>>> oses, groups = fingerbank.read('data/dhcp_fingerprints.conf')
then implement your own filtering and selection on top of those nifty
values. More tools forthcoming in other modules
"""
from .parser import create_systems_and_groups, parse_config_with_heredocs
from .database import System, Group
def read(fn):
"""
return a list of the operating systems and a list of the groups in
the given fingerbank config file
"""
cfg = parse_config_with_heredocs(fn)
return create_systems_and_groups(cfg)
|
py | 7dfdb3ca24baf00c3ea937e641ceed2b589aa5a7 | """
PRACTICE Exam 1, problem 0.
These problems illustrate concepts that previous problems have not emphasized:
-- determining whether a number is odd or even (Problem 0a)
-- returning True or False (Problem 0a)
-- is_prime (Problem 0b)
-- animation (Problem 0c)
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Valerie Galluzzi, Mark Hays, Amanda Stouder, Aaron Wilkin,
their colleagues, and Shengjun Guan.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import testing_helper
import time
def main():
""" Calls the TEST functions in this module. """
run_test_problem0a()
run_test_problem0b()
run_test_problem0c()
###############################################################################
# DONE: 2. READ the green doc-string for the:
# - is_prime
# - sum_of_digits
# functions defined below. You do NOT need to understand their
# implementations, just their specification (per the doc-string).
# You should ** CALL ** those functions as needed in implementing the
# other functions. After you have READ this, change its _TODO_ to DONE.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no _TODO_.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
def sum_of_digits(number):
"""
What comes in: An integer.
What goes out: Returns the sum of the digits in the given integer.
Side effects: None.
Example:
If the integer is 83135,
this function returns (8 + 3 + 1 + 3 + 5), which is 20.
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above sum_of_digits function - it has no _TODO_.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum
def run_test_problem0a():
""" Tests the problem0a function. """
print()
print('--------------------------------------------------')
print('Testing the problem0a function:')
print('--------------------------------------------------')
format_string = ' problem0a( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = False
print_expected_result_of_test([83135], expected, test_results,
format_string)
actual = problem0a(83135) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
if actual == 'False':
print('Your function returned the STRING "False",')
print('which is WRONG. It should have returned')
print('the built-in constant False.')
print('Ask for help as needed.')
# Test 2:
expected = True
print_expected_result_of_test([306], expected, test_results, format_string)
actual = problem0a(306) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
if actual == 'True':
print('Your function returned the STRING "True",')
print('which is WRONG. It should have returned')
print('the built-in constant True.')
print('Ask for help as needed.')
# Test 3:
expected = False
print_expected_result_of_test([246], expected, test_results, format_string)
actual = problem0a(246) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = False
print_expected_result_of_test([830931], expected, test_results,
format_string)
actual = problem0a(830931) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
expected = True
print_expected_result_of_test([730931], expected, test_results,
format_string)
actual = problem0a(730931) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
expected = False
print_expected_result_of_test([200], expected, test_results, format_string)
actual = problem0a(200) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
expected = True
print_expected_result_of_test([562], expected, test_results,
format_string)
actual = problem0a(562) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
expected = True
print_expected_result_of_test([555], expected, test_results,
format_string)
actual = problem0a(555) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
expected = False
print_expected_result_of_test([13], expected, test_results,
format_string)
actual = problem0a(13) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def problem0a(n):
"""
What comes in: An integer.
What goes out:
-- Returns True if the sum of the digits in the given integer
is odd, else returns False.
Side effects: None.
Examples:
-- If the given integer is 83135, this function returns False,
since (8 + 3 + 1 + 3 + 5) is 20, which is NOT odd.
-- If the given integer is 306, this function returns True,
since (3 + 0 + 6) is 9, which IS odd.
-- If the given integer is 246, this function returns False,
since (2 + 4 + 6) is 12, which is NOT odd.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# IMPORTANT:
# ** For full credit you must appropriately
# ** use (call) the sum_of_digits function
# ** that is DEFINED ABOVE.
###########################################################################
#
# HINT: To test whether a number m is even or odd,
# compute m % 2, i.e., the REMAINDER from m // 2.
# If that remainder is 0, the number is even.
# If that remainder is 1, the number is odd.
# Simply try a few examples to convince yourself of this.
# ASK FOR HELP if you do not understand this hint.
# if sum_of_digits(n) % 2 ==1:
# return True
# else:
# return False
# or
# -------------------------------------------------------------------------
if sum_of_digits(n)%2 == 0:
return False
else:
return True
def run_test_problem0b():
""" Tests the problem0b function. """
print()
print('--------------------------------------------------')
print('Testing the problem0b function:')
print('--------------------------------------------------')
format_string = ' problem0b( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = 6
print_expected_result_of_test([13], expected, test_results, format_string)
actual = problem0b(13) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
expected = 1
print_expected_result_of_test([2], expected, test_results, format_string)
actual = problem0b(2) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
expected = 46
print_expected_result_of_test([200], expected, test_results, format_string)
actual = problem0b(200) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = 168
print_expected_result_of_test([997], expected, test_results, format_string)
actual = problem0b(997) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def problem0b(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns the number of integers from 2 to n, inclusive,
that are prime.
Side effects: None.
Examples:
-- If n is 13, this function returns 6,
since there are 6 primes -- namely, 2, 3, 5, 7, 11, and 13 --
between 2 and 13.
-- If n is 2, this function returns 1,
since there is one prime (namely, 2) between 2 and 2.
-- If n is 200, the correct answer is 46,
since there are 46 primes between 2 and 200.
"""
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# IMPORTANT:
# ** For full credit you must appropriately
# ** use (call) the is_prime function that is DEFINED ABOVE.
###########################################################################
# ------------------------------------------------------------------
total = 0
for i in range(n-1):
if is_prime(i+2): total = total + 1
return total
def run_test_problem0c():
""" Tests the problem0c function. """
print()
print('--------------------------------------------------')
print('Testing the problem0c function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# TWO tests on ONE window.
title = 'Tests 1 & 2 of problem0c: blue circle + 6 circles;'
title += ' then green circle + 3 circles'
window1 = rg.RoseWindow(650, 300, title)
circle1 = rg.Circle(rg.Point(100, 50), 30)
circle1.fill_color = 'blue'
problem0c(circle1, 6, window1)
window1.continue_on_mouse_click()
circle2 = rg.Circle(rg.Point(75, 200), 75)
circle2.fill_color = 'green'
problem0c(circle2, 3, window1)
window1.close_on_mouse_click()
# A third test on ANOTHER window.
title = 'Test 3 of problem0c: red circle + 10 circles'
window2 = rg.RoseWindow(600, 200, title)
circle3 = rg.Circle(rg.Point(50, 50), 20)
circle3.fill_color = 'red'
problem0c(circle3, 10, window2)
window2.close_on_mouse_click()
def problem0c(circle, n, window):
"""
See problem0c_picture.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- An rg.Circle.
-- A positive integer n.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle and n additional rg.Circles
on the given rg.RoseWindow such that:
-- The circles form a row of touching rg.Circles with the
leftmost circle being the given rg.Circle.
-- There is a 0.5 second pause after each rg.Circle is drawn.
Must ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type n: int
:type window: rg.RoseWindow
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# HINT: render(0.5)
# renders with a half-second pause after rendering.
###########################################################################
# -------------------------------------------------------------------------
circle.attach_to(window)
x = circle.center.x
y = circle.center.y
for i in range(n):
x = x + 2 *circle.radius
circlen = rg.Circle(rg.Point(x,y),circle.radius)
circlen.attach_to(window)
window.render(0.5)
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results, format_string)
def print_actual_result_of_test(expected, actual, test_results,
precision=None):
testing_helper.print_actual_result_of_test(expected, actual,
test_results, precision)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
|
py | 7dfdb437ff0b4b082d85c53e19548aa86112dc5b | """
Base implementation for data objects exposed through a provider or service
"""
import inspect
import itertools
import json
import logging
import shutil
import time
import six
from cloudbridge.cloud.interfaces.resources \
import InvalidConfigurationException
from cloudbridge.cloud.interfaces.resources import AttachmentInfo
from cloudbridge.cloud.interfaces.resources import Bucket
from cloudbridge.cloud.interfaces.resources import BucketObject
from cloudbridge.cloud.interfaces.resources import CloudResource
from cloudbridge.cloud.interfaces.resources import Instance
from cloudbridge.cloud.interfaces.resources import InstanceState
from cloudbridge.cloud.interfaces.resources import InstanceType
from cloudbridge.cloud.interfaces.resources import KeyPair
from cloudbridge.cloud.interfaces.resources import LaunchConfig
from cloudbridge.cloud.interfaces.resources import MachineImage
from cloudbridge.cloud.interfaces.resources import MachineImageState
from cloudbridge.cloud.interfaces.resources import Network
from cloudbridge.cloud.interfaces.resources import NetworkState
from cloudbridge.cloud.interfaces.resources import ObjectLifeCycleMixin
from cloudbridge.cloud.interfaces.resources import PageableObjectMixin
from cloudbridge.cloud.interfaces.resources import PlacementZone
from cloudbridge.cloud.interfaces.resources import Region
from cloudbridge.cloud.interfaces.resources import Router
from cloudbridge.cloud.interfaces.resources import ResultList
from cloudbridge.cloud.interfaces.resources import SecurityGroup
from cloudbridge.cloud.interfaces.resources import SecurityGroupRule
from cloudbridge.cloud.interfaces.resources import Snapshot
from cloudbridge.cloud.interfaces.resources import SnapshotState
from cloudbridge.cloud.interfaces.resources import Subnet
from cloudbridge.cloud.interfaces.resources import FloatingIP
from cloudbridge.cloud.interfaces.resources import Volume
from cloudbridge.cloud.interfaces.resources import VolumeState
from cloudbridge.cloud.interfaces.resources import WaitStateException
log = logging.getLogger(__name__)
class BaseCloudResource(CloudResource):
def __init__(self, provider):
self.__provider = provider
@property
def _provider(self):
return self.__provider
def to_json(self):
# Get all attributes but filter methods and private/magic ones
attr = inspect.getmembers(self, lambda a: not(inspect.isroutine(a)))
js = {k: v for(k, v) in attr if not k.startswith('_')}
return json.dumps(js, sort_keys=True)
class BaseObjectLifeCycleMixin(ObjectLifeCycleMixin):
"""
A base implementation of an ObjectLifeCycleMixin.
This base implementation has an implementation of wait_for
which refreshes the object's state till the desired ready states
are reached. Subclasses must still implement the wait_till_ready
method, since the desired ready states are object specific.
"""
def wait_for(self, target_states, terminal_states=None, timeout=None,
interval=None):
if timeout is None:
timeout = self._provider.config.default_wait_timeout
if interval is None:
interval = self._provider.config.default_wait_interval
assert timeout >= 0
assert interval >= 0
assert timeout >= interval
end_time = time.time() + timeout
while self.state not in target_states:
if self.state in (terminal_states or []):
raise WaitStateException(
"Object: {0} is in state: {1} which is a terminal state"
" and cannot be waited on.".format(self, self.state))
else:
log.debug(
"Object %s is in state: %s. Waiting another %s"
" seconds to reach target state(s): %s...",
self,
self.state,
int(end_time - time.time()),
target_states)
time.sleep(interval)
if time.time() > end_time:
raise WaitStateException(
"Waited too long for object: {0} to become ready. It's"
" still in state: {1}".format(self, self.state))
self.refresh()
log.debug("Object: %s successfully reached target state: %s",
self, self.state)
return True
class BaseResultList(ResultList):
def __init__(
self, is_truncated, marker, supports_total, total=None, data=None):
# call list constructor
super(BaseResultList, self).__init__(data or [])
self._marker = marker
self._is_truncated = is_truncated
self._supports_total = True if supports_total else False
self._total = total
@property
def marker(self):
return self._marker
@property
def is_truncated(self):
return self._is_truncated
@property
def supports_total(self):
return self._supports_total
@property
def total_results(self):
return self._total
class ServerPagedResultList(BaseResultList):
"""
This is a convenience class that extends the :class:`BaseResultList` class
and provides a server side implementation of paging. It is meant for use by
provider developers and is not meant for direct use by end-users.
This class can be used to wrap a partial result list when an operation
supports server side paging.
"""
@property
def supports_server_paging(self):
return True
@property
def data(self):
raise NotImplementedError(
"ServerPagedResultLists do not support the data property")
class ClientPagedResultList(BaseResultList):
"""
This is a convenience class that extends the :class:`BaseResultList` class
and provides a client side implementation of paging. It is meant for use by
provider developers and is not meant for direct use by end-users.
This class can be used to wrap a full result list when an operation does
not support server side paging. This class will then provide a paged view
of the full result set entirely on the client side.
"""
def __init__(self, provider, objects, limit=None, marker=None):
self._objects = objects
limit = limit or provider.config.default_result_limit
total_size = len(objects)
if marker:
from_marker = itertools.dropwhile(
lambda obj: not obj.id == marker, objects)
# skip one past the marker
next(from_marker, None)
objects = list(from_marker)
is_truncated = len(objects) > limit
results = list(itertools.islice(objects, limit))
super(ClientPagedResultList, self).__init__(
is_truncated,
results[-1].id if is_truncated else None,
True, total=total_size,
data=results)
@property
def supports_server_paging(self):
return False
@property
def data(self):
return self._objects
class BasePageableObjectMixin(PageableObjectMixin):
"""
A mixin to provide iteration capability for a class
that support a list(limit, marker) method.
"""
def __iter__(self):
marker = None
result_list = self.list(marker=marker)
if result_list.supports_server_paging:
for result in result_list:
yield result
while result_list.is_truncated:
result_list = self.list(marker=marker)
for result in result_list:
yield result
marker = result_list.marker
else:
for result in result_list.data:
yield result
class BaseInstanceType(InstanceType, BaseCloudResource):
def __init__(self, provider):
super(BaseInstanceType, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, InstanceType) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
@property
def size_total_disk(self):
return self.size_root_disk + self.size_ephemeral_disks
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.name, self.id)
class BaseInstance(BaseCloudResource, BaseObjectLifeCycleMixin, Instance):
def __init__(self, provider):
super(BaseInstance, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, Instance) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.state == other.state and
self.name == other.name and
self.security_groups == other.security_groups and
self.public_ips == other.public_ips and
self.private_ips == other.private_ips and
self.image_id == other.image_id)
def wait_till_ready(self, timeout=None, interval=None):
self.wait_for(
[InstanceState.RUNNING],
terminal_states=[InstanceState.TERMINATED, InstanceState.ERROR],
timeout=timeout,
interval=interval)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.name, self.id)
class BaseLaunchConfig(LaunchConfig):
def __init__(self, provider):
self.provider = provider
self.block_devices = []
self.network_interfaces = []
class BlockDeviceMapping(object):
"""
Represents a block device mapping
"""
def __init__(self, is_volume=False, source=None, is_root=None,
size=None, delete_on_terminate=None):
self.is_volume = is_volume
self.source = source
self.is_root = is_root
self.size = size
self.delete_on_terminate = delete_on_terminate
def add_ephemeral_device(self):
block_device = BaseLaunchConfig.BlockDeviceMapping()
self.block_devices.append(block_device)
def add_volume_device(self, source=None, is_root=None, size=None,
delete_on_terminate=None):
block_device = self._validate_volume_device(
source=source, is_root=is_root, size=size,
delete_on_terminate=delete_on_terminate)
self.block_devices.append(block_device)
def _validate_volume_device(self, source=None, is_root=None,
size=None, delete_on_terminate=None):
"""
Validates a volume based device and throws an
InvalidConfigurationException if the configuration is incorrect.
"""
if source is None and not size:
raise InvalidConfigurationException(
"A size must be specified for a blank new volume")
if source and \
not isinstance(source, (Snapshot, Volume, MachineImage)):
raise InvalidConfigurationException(
"Source must be a Snapshot, Volume, MachineImage or None")
if size:
if not isinstance(size, six.integer_types) or not size > 0:
raise InvalidConfigurationException(
"The size must be None or a number greater than 0")
if is_root:
for bd in self.block_devices:
if bd.is_root:
raise InvalidConfigurationException(
"An existing block device: {0} has already been"
" marked as root. There can only be one root device.")
return BaseLaunchConfig.BlockDeviceMapping(
is_volume=True, source=source, is_root=is_root, size=size,
delete_on_terminate=delete_on_terminate)
def add_network_interface(self, net_id):
self.network_interfaces.append(net_id)
class BaseMachineImage(
BaseCloudResource, BaseObjectLifeCycleMixin, MachineImage):
def __init__(self, provider):
super(BaseMachineImage, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, MachineImage) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.state == other.state and
self.name == other.name and
self.description == other.description)
def wait_till_ready(self, timeout=None, interval=None):
self.wait_for(
[MachineImageState.AVAILABLE],
terminal_states=[MachineImageState.ERROR],
timeout=timeout,
interval=interval)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.name, self.id)
class BaseAttachmentInfo(AttachmentInfo):
def __init__(self, volume, instance_id, device):
self._volume = volume
self._instance_id = instance_id
self._device = device
@property
def volume(self):
return self._volume
@property
def instance_id(self):
return self._instance_id
@property
def device(self):
return self._device
class BaseVolume(BaseCloudResource, BaseObjectLifeCycleMixin, Volume):
def __init__(self, provider):
super(BaseVolume, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, Volume) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.state == other.state and
self.name == other.name)
def wait_till_ready(self, timeout=None, interval=None):
self.wait_for(
[VolumeState.AVAILABLE],
terminal_states=[VolumeState.ERROR, VolumeState.DELETED],
timeout=timeout,
interval=interval)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.name, self.id)
class BaseSnapshot(BaseCloudResource, BaseObjectLifeCycleMixin, Snapshot):
def __init__(self, provider):
super(BaseSnapshot, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, Snapshot) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.state == other.state and
self.name == other.name)
def wait_till_ready(self, timeout=None, interval=None):
self.wait_for(
[SnapshotState.AVAILABLE],
terminal_states=[SnapshotState.ERROR],
timeout=timeout,
interval=interval)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.name, self.id)
class BaseKeyPair(KeyPair, BaseCloudResource):
def __init__(self, provider, key_pair):
super(BaseKeyPair, self).__init__(provider)
self._key_pair = key_pair
def __eq__(self, other):
return (isinstance(other, KeyPair) and
# pylint:disable=protected-access
self._provider == other._provider and
self.name == other.name)
@property
def id(self):
"""
Return the id of this key pair.
"""
return self._key_pair.name
@property
def name(self):
"""
Return the name of this key pair.
"""
return self._key_pair.name
def delete(self):
"""
Delete this KeyPair.
:rtype: bool
:return: True if successful, otherwise False.
"""
# This implementation assumes the `delete` method exists across
# multiple providers.
self._key_pair.delete()
def __repr__(self):
return "<CBKeyPair: {0}>".format(self.name)
class BaseSecurityGroup(SecurityGroup, BaseCloudResource):
def __init__(self, provider, security_group):
super(BaseSecurityGroup, self).__init__(provider)
self._security_group = security_group
def __eq__(self, other):
"""
Check if all the defined rules match across both security groups.
"""
return (isinstance(other, SecurityGroup) and
# pylint:disable=protected-access
self._provider == other._provider and
len(self.rules) == len(other.rules) and # Shortcut
set(self.rules) == set(other.rules))
def __ne__(self, other):
return not self.__eq__(other)
@property
def id(self):
"""
Get the ID of this security group.
:rtype: str
:return: Security group ID
"""
return self._security_group.id
@property
def name(self):
"""
Return the name of this security group.
"""
return self._security_group.name
@property
def description(self):
"""
Return the description of this security group.
"""
return self._security_group.description
def delete(self):
"""
Delete this security group.
"""
return self._security_group.delete()
def __repr__(self):
return "<CB-{0}: {1}>".format(self.__class__.__name__,
self.id)
class BaseSecurityGroupRule(SecurityGroupRule, BaseCloudResource):
def __init__(self, provider, rule, parent):
super(BaseSecurityGroupRule, self).__init__(provider)
self._rule = rule
self.parent = parent
def __repr__(self):
return ("<CBSecurityGroupRule: IP: {0}; from: {1}; to: {2}; grp: {3}>"
.format(self.ip_protocol, self.from_port, self.to_port,
self.group))
def __eq__(self, other):
return self.ip_protocol == other.ip_protocol and \
self.from_port == other.from_port and \
self.to_port == other.to_port and \
self.cidr_ip == other.cidr_ip
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Return a hash-based interpretation of all of the object's field values.
This is requried for operations on hashed collections including
``set``, ``frozenset``, and ``dict``.
"""
return hash("{0}{1}{2}{3}{4}".format(self.ip_protocol, self.from_port,
self.to_port, self.cidr_ip,
self.group))
class BasePlacementZone(PlacementZone, BaseCloudResource):
def __init__(self, provider):
super(BasePlacementZone, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1}>".format(self.__class__.__name__,
self.id)
def __eq__(self, other):
return (isinstance(other, PlacementZone) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
class BaseRegion(Region, BaseCloudResource):
def __init__(self, provider):
super(BaseRegion, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1}>".format(self.__class__.__name__,
self.id)
def __eq__(self, other):
return (isinstance(other, Region) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
def to_json(self):
attr = inspect.getmembers(self, lambda a: not(inspect.isroutine(a)))
js = {k: v for(k, v) in attr if not k.startswith('_')}
js['zones'] = [z.name for z in self.zones]
return json.dumps(js, sort_keys=True)
class BaseBucketObject(BucketObject, BaseCloudResource):
def __init__(self, provider):
super(BaseBucketObject, self).__init__(provider)
def save_content(self, target_stream):
"""
Download this object and write its
contents to the target_stream.
"""
shutil.copyfileobj(self.iter_content(), target_stream)
def __eq__(self, other):
return (isinstance(other, BucketObject) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.name == other.name)
def __repr__(self):
return "<CB-{0}: {1}>".format(self.__class__.__name__,
self.name)
class BaseBucket(BasePageableObjectMixin, Bucket, BaseCloudResource):
def __init__(self, provider):
super(BaseBucket, self).__init__(provider)
def __eq__(self, other):
return (isinstance(other, Bucket) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id and
# check from most to least likely mutables
self.name == other.name)
def __repr__(self):
return "<CB-{0}: {1}>".format(self.__class__.__name__,
self.name)
class BaseNetwork(BaseCloudResource, Network, BaseObjectLifeCycleMixin):
def __init__(self, provider):
super(BaseNetwork, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.id, self.name)
def wait_till_ready(self, timeout=None, interval=None):
self.wait_for(
[NetworkState.AVAILABLE],
terminal_states=[NetworkState.ERROR],
timeout=timeout,
interval=interval)
def __eq__(self, other):
return (isinstance(other, Network) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
class BaseSubnet(Subnet, BaseCloudResource):
def __init__(self, provider):
super(BaseSubnet, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.id, self.name)
def __eq__(self, other):
return (isinstance(other, Subnet) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
class BaseFloatingIP(FloatingIP, BaseCloudResource):
def __init__(self, provider):
super(BaseFloatingIP, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
self.id, self.public_ip)
def __eq__(self, other):
return (isinstance(other, FloatingIP) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
class BaseRouter(Router, BaseCloudResource):
def __init__(self, provider):
super(BaseRouter, self).__init__(provider)
def __repr__(self):
return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__, self.id,
self.name)
def __eq__(self, other):
return (isinstance(other, Router) and
# pylint:disable=protected-access
self._provider == other._provider and
self.id == other.id)
|
py | 7dfdb545ce4984b94046a8a4e9434e8f02effa4e | """
HSIC independence test
"""
# Author: Jose A. R. Fonollosa <[email protected]>
#
# License: Apache, Version 2.0
import numpy as np
def rbf_dot2(p1, p2, deg):
if p1.ndim == 1:
p1 = p1[:, np.newaxis]
p2 = p2[:, np.newaxis]
size1 = p1.shape
size2 = p2.shape
G = np.sum(p1*p1, axis=1)[:, np.newaxis]
H = np.sum(p2*p2, axis=1)[:, np.newaxis]
Q = np.tile(G, (1,size2[0]))
R = np.tile(H.T, (size1[0],1))
H = Q + R - 2.0*np.dot(p1,p2.T)
H = np.exp(-H/2.0/(deg**2))
return H
def rbf_dot(X, deg):
#Set kernel size to median distance between points, if no kernel specified
if X.ndim == 1:
X = X[:, np.newaxis]
m = X.shape[0]
G = np.sum(X*X, axis=1)[:, np.newaxis]
Q = np.tile(G, (1, m))
H = Q + Q.T - 2.0*np.dot(X, X.T)
if deg == -1:
dists = (H - np.tril(H)).flatten()
deg = np.sqrt(0.5*np.median(dists[dists>0]))
H = np.exp(-H/2.0/(deg**2))
return H
def FastHsicTestGamma(X, Y, sig=[-1,-1], maxpnt = 200):
#This function implements the HSIC independence test using a Gamma approximation
#to the test threshold
#Inputs:
# X contains dx columns, m rows. Each row is an i.i.d sample
# Y contains dy columns, m rows. Each row is an i.i.d sample
# sig[0] is kernel size for x (set to median distance if -1)
# sig[1] is kernel size for y (set to median distance if -1)
#Outputs:
# testStat: test statistic
#
#Use at most maxpnt points to save time.
m = X.shape[0]
if m>maxpnt:
indx = np.floor(np.r_[0:m:float(m-1)/(maxpnt-1)]).astype(int);
# indx = np.r_[0:maxpnt]
Xm = X[indx].astype(float)
Ym = Y[indx].astype(float)
m = Xm.shape[0]
else:
Xm = X.astype(float)
Ym = Y.astype(float)
H = np.eye(m) - 1.0/m*np.ones((m,m))
K = rbf_dot(Xm,sig[0]);
L = rbf_dot(Ym,sig[1]);
Kc = np.dot(H, np.dot(K, H));
Lc = np.dot(H, np.dot(L, H));
testStat = (1.0/m)*(Kc.T*Lc).sum();
if ~np.isfinite(testStat):
testStat = 0
return testStat
|
py | 7dfdb59ac7fe34db8ccb4f66283746555b1eded8 | import os
from avgamah.core import Bot
bot = Bot()
if os.name != "nt":
import uvloop
uvloop.install()
if __name__ == "__main__":
bot.run()
|
py | 7dfdb5a859f9aba5ba57d6ccff9c91a4218c96ad | # Generated by Django 3.0.4 on 2020-03-15 12:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0010_remove_mesimi_lenda'),
]
operations = [
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('title', models.CharField(max_length=30)),
('video_url', models.FileField(null=True, upload_to='videos/')),
('thumbnail', models.ImageField(upload_to='')),
('position', models.IntegerField()),
('course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='courses.Lendet')),
],
),
migrations.DeleteModel(
name='Mesimi',
),
]
|
py | 7dfdb5f977665942e820c1c3d6fe148ffeef9ba7 | a = {}
b = 123
c = ''
d = {}
print(a)
e = 123.456
f = [1, "2", "3.14", False, {}, [4, "5", {}, True]]
g = (1, "2", True, None) |
py | 7dfdb64b94816c61361359d9f603aeef6f6da9b3 | import os
from m2cgen import assemblers, interpreters
from tests import utils
from tests.e2e.executors import base
EXECUTOR_CODE_TPL = """
input_array = ARGV.map(&:to_f)
{model_code}
res = score(input_array)
{print_code}
"""
PRINT_SCALAR = """
puts res
"""
PRINT_VECTOR = """
puts res.join(" ")
"""
class RubyExecutor(base.BaseExecutor):
model_name = "score"
def __init__(self, model):
self.model = model
self.interpreter = interpreters.RubyInterpreter()
assembler_cls = assemblers.get_assembler_cls(model)
self.model_ast = assembler_cls(model).assemble()
self._ruby = "ruby"
def predict(self, X):
file_name = os.path.join(self._resource_tmp_dir,
f"{self.model_name}.rb")
exec_args = [self._ruby,
file_name,
*map(utils.format_arg, X)]
return utils.predict_from_commandline(exec_args)
def prepare(self):
if self.model_ast.output_size > 1:
print_code = PRINT_VECTOR
else:
print_code = PRINT_SCALAR
executor_code = EXECUTOR_CODE_TPL.format(
model_code=self.interpreter.interpret(self.model_ast),
print_code=print_code)
file_name = os.path.join(
self._resource_tmp_dir, f"{self.model_name}.rb")
with open(file_name, "w") as f:
f.write(executor_code)
|
py | 7dfdb64e140d3b1ef2528ebbc937606d795b4cb6 | #!/usr/bin/env python
def replaced(phenny, input):
command = input.group(1)
responses = {
'cp': '.cp has been replaced by .u',
'pc': '.pc has been replaced by .u',
'unicode': '.unicode has been replaced by .u',
'compare': '.compare has been replaced by .gcs (googlecounts)',
# 'map': 'the .map command has been removed; ask sbp for details',
'acronym': 'the .acronym command has been removed; ask sbp for details',
# 'img': 'the .img command has been removed; ask sbp for details',
'v': '.v has been replaced by .val',
'validate': '.validate has been replaced by .validate',
# 'rates': "moon wanter. moOOoon wanter!",
'web': 'the .web command has been removed; ask sbp for details',
'origin': ".origin hasn't been ported to my new codebase yet"
# 'gs': 'sorry, .gs no longer works'
}
try: response = responses[command]
except KeyError: return
else: phenny.reply(response)
replaced.commands = [
'cp', 'pc', 'unicode', 'compare', 'map', 'acronym',
'v', 'validate', 'thesaurus', 'web', 'mangle', 'origin',
'swhack'
]
replaced.priority = 'low'
if __name__ == '__main__':
print __doc__.strip()
|
py | 7dfdb6baeb783c3c24a064876b9b5c9b1632a6dd | #!/usr/bin/env python
"""
Check FASTA and GFF files for compatibility.
Usage::
python -m riboviz.tools.check_fasta_gff.py [-h] -f FASTA -g GFF
-h, --help show this help message and exit
-f FASTA, --fasta FASTA
fasta file input
-g GFF, --gff GFF gff3 file input
See :py:func:`riboviz.check_fasta_gff.check_fasta_gff`.
"""
import argparse
from riboviz import check_fasta_gff
from riboviz import provenance
def parse_command_line_options():
"""
Parse command-line options.
:returns: command-line options
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(
description="Check FASTA and GFF files for compatibility")
parser.add_argument("-f",
"--fasta",
dest="fasta",
required=True,
help="fasta file input")
parser.add_argument("-g",
"--gff",
dest="gff",
required=True,
help="gff3 file input")
options = parser.parse_args()
return options
def invoke_check_fasta_gff():
"""
Parse command-line options then invoke
:py:func:`riboviz.check_fasta_gff.check_fasta_gff`.
"""
print(provenance.write_provenance_to_str(__file__))
options = parse_command_line_options()
fasta = options.fasta
gff = options.gff
check_fasta_gff.check_fasta_gff(fasta, gff)
if __name__ == "__main__":
invoke_check_fasta_gff()
|
py | 7dfdb70fc1d323fabd4a7a811495a1ac56a111bf | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ConfigurationsOperations(object):
"""ConfigurationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2017-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, server_name, configuration_name, value=None, source=None, custom_headers=None, raw=False, **operation_config):
parameters = models.Configuration(value=value, source=source)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Configuration')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Configuration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, configuration_name, value=None, source=None, custom_headers=None, raw=False, **operation_config):
"""Updates a configuration of a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param configuration_name: The name of the server configuration.
:type configuration_name: str
:param value: Value of the configuration.
:type value: str
:param source: Source of the configuration.
:type source: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
Configuration or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.rdbms.mysql.models.Configuration]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
configuration_name=configuration_name,
value=value,
source=source,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Configuration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/configurations/{configurationName}'}
def get(
self, resource_group_name, server_name, configuration_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a configuration of server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param configuration_name: The name of the server configuration.
:type configuration_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Configuration or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.rdbms.mysql.models.Configuration or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Configuration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/configurations/{configurationName}'}
def list_by_server(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""List all the configurations in a given server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Configuration
:rtype:
~azure.mgmt.rdbms.mysql.models.ConfigurationPaged[~azure.mgmt.rdbms.mysql.models.Configuration]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ConfigurationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ConfigurationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/configurations'}
|
py | 7dfdb7a5224160e3ad809e1e99c7e82ee392a8b8 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import importlib
from scipy.ndimage import zoom as zoom_scipy
from parameterized import parameterized
from monai.transforms import RandZoom
from tests.utils import NumpyImageTestCase2D
VALID_CASES = [(0.9, 1.1, 3, 'constant', 0, True, False, False)]
class TestRandZoom(NumpyImageTestCase2D):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, min_zoom, max_zoom, order, mode,
cval, prefilter, use_gpu, keep_size):
random_zoom = RandZoom(prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order,
mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu,
keep_size=keep_size)
random_zoom.set_random_state(234)
zoomed = random_zoom(self.imt[0])
expected = list()
for channel in self.imt[0]:
expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order,
cval=cval, prefilter=prefilter))
expected = np.stack(expected).astype(np.float32)
self.assertTrue(np.allclose(expected, zoomed))
@parameterized.expand([
(0.8, 1.2, 1, 'constant', 0, True)
])
def test_gpu_zoom(self, min_zoom, max_zoom, order, mode, cval, prefilter):
if importlib.util.find_spec('cupy'):
random_zoom = RandZoom(
prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order,
mode=mode, cval=cval, prefilter=prefilter, use_gpu=True,
keep_size=False)
random_zoom.set_random_state(234)
zoomed = random_zoom(self.imt[0])
expected = list()
for channel in self.imt[0]:
expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order,
cval=cval, prefilter=prefilter))
expected = np.stack(expected).astype(np.float32)
self.assertTrue(np.allclose(expected, zoomed))
def test_keep_size(self):
random_zoom = RandZoom(prob=1.0, min_zoom=0.6,
max_zoom=0.7, keep_size=True)
zoomed = random_zoom(self.imt[0])
self.assertTrue(np.array_equal(zoomed.shape, self.imt.shape[1:]))
@parameterized.expand([
("no_min_zoom", None, 1.1, 1, TypeError),
("invalid_order", 0.9, 1.1 , 's', AssertionError)
])
def test_invalid_inputs(self, _, min_zoom, max_zoom, order, raises):
with self.assertRaises(raises):
random_zoom = RandZoom(prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order)
zoomed = random_zoom(self.imt[0])
if __name__ == '__main__':
unittest.main()
|
py | 7dfdb87f6f7d80d65182a10c8fad194c1fee9c40 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Google Cloud Storage hook.
"""
import gzip as gz
import os
import shutil
import warnings
from io import BytesIO
from os import path
from typing import Optional, Set, Tuple, Union
from urllib.parse import urlparse
from google.api_core.exceptions import NotFound
from google.cloud import storage
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.version import version
class GCSHook(GoogleBaseHook):
"""
Interact with Google Cloud Storage. This hook uses the Google Cloud Platform
connection.
"""
_conn = None # type: Optional[storage.Client]
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
google_cloud_storage_conn_id: Optional[str] = None
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=2)
gcp_conn_id = google_cloud_storage_conn_id
super().__init__(gcp_conn_id=gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
if not self._conn:
self._conn = storage.Client(credentials=self._get_credentials(),
client_info=self.client_info,
project=self.project_id)
return self._conn
def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object)
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob(
blob=source_object,
destination_bucket=destination_bucket,
new_name=destination_object)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object.name, destination_bucket.name)
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s rewritten to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name)
def download(self, bucket_name, object_name, filename=None):
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return filename
else:
return blob.download_as_string()
def upload(self, bucket_name: str, object_name: str, filename: Optional[str] = None,
data: Optional[Union[str, bytes]] = None, mime_type: Optional[str] = None, gzip: bool = False,
encoding: str = 'utf-8') -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param data: The file's data as a string or bytes to be uploaded.
:type data: str
:param mime_type: The file's mime type set when uploading the file.
:type mime_type: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param encoding: bytes encoding for file data if provided as string
:type encoding: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename and data:
raise ValueError("'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads.")
elif filename:
if not mime_type:
mime_type = 'application/octet-stream'
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
blob.upload_from_filename(filename=filename,
content_type=mime_type)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = 'text/plain'
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
blob.upload_from_string(data,
content_type=mime_type)
self.log.info('Data stream uploaded to %s in %s bucket', object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. "
"One is required to upload to gcs.")
def exists(self, bucket_name, object_name):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
def get_blob_update_time(self, bucket_name, object_name):
"""
Get the update time of a file in Google Cloud Storage
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError("Object ({}) not found in Bucket ({})".format(
object_name, bucket_name))
return blob.updated
def is_updated_after(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(self, bucket_name, object_name, min_ts, max_ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param min_ts: The minimum timestamp to check against.
:type min_ts: datetime.datetime
:param max_ts: The maximum timestamp to check against.
:type max_ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
import dateutil.tz
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=dateutil.tz.tzutc())
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name, object_name, seconds):
"""
Check if object is older than given time
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param seconds: The time in seconds to check against
:type seconds: int
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from airflow.utils import timezone
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name, object_name):
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
def delete_bucket(self, bucket_name: str, force: bool = False):
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:type bucket_name: str
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
:type: bool
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def get_size(self, bucket_name, object_name):
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name, object_name):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name, object_name):
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',
bucket_name, location, storage_class)
# Add airflow-version label to the bucket
labels = labels or {}
labels['airflow-version'] = 'v' + version.replace('.', '-').replace('+', '-')
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item]) # pylint: disable=protected-access
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(self, bucket_name, entity, role, user_project=None):
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
def insert_object_acl(self, bucket_name, object_name, entity, role, generation=None, user_project=None):
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param generation: Optional. If present, selects a specific revision of this object.
:type generation: long
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s',
object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s',
object_name, bucket_name)
def compose(self, bucket_name, source_objects, destination_object):
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects:
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s",
source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[
bucket.blob(blob_name=source_object) for source_object in source_objects
])
self.log.info("Completed successfully.")
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: Optional[str] = None,
destination_object: Optional[str] = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False
):
"""
Synchronizes the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:type source_bucket: str
:param destination_bucket: The name of the bucket containing the destination objects.
:type destination_bucket: str
:param source_object: The root sync directory in the source bucket.
:type source_object: Optional[str]
:param destination_object: The root sync directory in the destination bucket.
:type destination_object: Optional[str]
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:type allow_overwrite: bool
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:type delete_extra_files: bool
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = self._normalize_directory_path(source_object)
destination_object = self._normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.copy(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(blob, destination_object,
source_object_prefix_len)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self,
blob: storage.Blob,
destination_object: Optional[str],
source_object_prefix_len: int
) -> str:
return (
path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
def _normalize_directory_path(self, source_object: Optional[str]) -> Optional[str]:
return (
source_object + "/" if source_object and not source_object.endswith("/") else source_object
)
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: Optional[str],
destination_object: Optional[str],
recursive: bool,
) -> Tuple[Set[storage.Blob], Set[storage.Blob], Set[storage.Blob]]:
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter))
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs = {source_names_index[a] for a in to_copy} # type: Set[storage.Blob]
to_delete_blobs = {destination_names_index[a] for a in to_delete} # type: Set[storage.Blob]
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs = set() # type: Set[storage.Blob]
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def _parse_gcs_url(gsurl):
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
else:
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip('/')
return bucket, blob
|
py | 7dfdb9a6a0851b166e8381a95692fb2f79abc1c7 | from scoring_engine.models.team import Team
from scoring_engine.models.service import Service
from scoring_engine.models.account import Account
from scoring_engine.models.check import Check
from scoring_engine.models.environment import Environment
from scoring_engine.models.round import Round
from tests.scoring_engine.helpers import generate_sample_model_tree
from tests.scoring_engine.unit_test import UnitTest
class TestService(UnitTest):
def test_init_service(self):
service = Service(name="Example Service", check_name="ICMP IPv4 Check", host='127.0.0.1')
assert service.id is None
assert service.name == "Example Service"
assert service.team is None
assert service.team is None
assert service.check_name == "ICMP IPv4 Check"
assert service.points is None
def test_basic_service(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service)
self.session.commit()
assert service.id is not None
assert service.name == "Example Service"
assert service.team == team
assert service.team_id == team.id
assert service.check_name == "ICMP IPv4 Check"
assert service.port == 0
assert service.points == 100
assert service.worker_queue == 'main'
def test_basic_service_with_worker_queue(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1', worker_queue='somequeue')
self.session.add(service)
self.session.commit()
assert service.id is not None
assert service.name == "Example Service"
assert service.team == team
assert service.team_id == team.id
assert service.check_name == "ICMP IPv4 Check"
assert service.port == 0
assert service.points == 100
assert service.worker_queue == 'somequeue'
def test_basic_service_with_points(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", points=500, host='127.0.0.1', port=100)
self.session.add(service)
self.session.commit()
assert service.id is not None
assert service.name == "Example Service"
assert service.team == team
assert service.team_id == team.id
assert service.check_name == "ICMP IPv4 Check"
assert service.port == 100
assert service.points == 500
assert service.score_earned == 0
assert service.max_score == 0
assert service.percent_earned == 0
def test_last_check_result_false(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service)
round_obj = generate_sample_model_tree('Round', self.session)
check_1 = Check(round=round_obj, service=service, result=True, output='Good output')
self.session.add(check_1)
check_2 = Check(round=round_obj, service=service, result=True, output='Good output')
self.session.add(check_2)
check_3 = Check(round=round_obj, service=service, result=False, output='Check exceeded time')
self.session.add(check_3)
self.session.commit()
assert service.last_check_result() is False
def test_last_check_result_true(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service)
round_obj = generate_sample_model_tree('Round', self.session)
check_1 = Check(round=round_obj, service=service, result=False, output='Check exceeded time')
self.session.add(check_1)
check_2 = Check(round=round_obj, service=service, result=False, output='Check exceeded time')
self.session.add(check_2)
check_3 = Check(round=round_obj, service=service, result=True, output='Good output')
self.session.add(check_3)
self.session.commit()
assert service.last_check_result() is True
def test_last_check_result_not_found(self):
team = generate_sample_model_tree('Team', self.session)
service = Service(name="Example Service", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service)
self.session.commit()
assert service.last_check_result() is None
def test_checks(self):
service = generate_sample_model_tree('Service', self.session)
round_obj = generate_sample_model_tree('Round', self.session)
check_1 = Check(round=round_obj, service=service)
self.session.add(check_1)
check_2 = Check(round=round_obj, service=service)
self.session.add(check_2)
check_3 = Check(round=round_obj, service=service)
self.session.add(check_3)
self.session.commit()
assert service.checks == [check_1, check_2, check_3]
def test_checks_reversed(self):
service = generate_sample_model_tree('Service', self.session)
round_obj_1 = Round(number=1)
round_obj_2 = Round(number=2)
round_obj_3 = Round(number=3)
self.session.add(round_obj_1)
self.session.add(round_obj_2)
self.session.add(round_obj_3)
check_1 = Check(round=round_obj_1, service=service)
self.session.add(check_1)
check_2 = Check(round=round_obj_2, service=service)
self.session.add(check_2)
check_3 = Check(round=round_obj_3, service=service)
self.session.add(check_3)
self.session.commit()
assert service.checks_reversed == [check_3, check_2, check_1]
def test_environments(self):
service = generate_sample_model_tree('Service', self.session)
environment_1 = Environment(service=service, matching_content='*')
self.session.add(environment_1)
environment_2 = Environment(service=service, matching_content='*')
self.session.add(environment_2)
environment_3 = Environment(service=service, matching_content='*')
self.session.add(environment_3)
self.session.commit()
assert service.environments == [environment_1, environment_2, environment_3]
def test_accounts(self):
service = generate_sample_model_tree('Service', self.session)
account_1 = Account(username="testname", password="testpass", service=service)
self.session.add(account_1)
account_2 = Account(username="testname123", password="testpass", service=service)
self.session.add(account_2)
account_3 = Account(username="testusername", password="testpass", service=service)
self.session.add(account_3)
self.session.commit()
assert service.accounts == [account_1, account_2, account_3]
def test_score_earned(self):
service = generate_sample_model_tree('Service', self.session)
check_1 = Check(service=service, result=True, output='Good output')
check_2 = Check(service=service, result=True, output='Good output')
check_3 = Check(service=service, result=True, output='Good output')
check_4 = Check(service=service, result=True, output='Good output')
check_5 = Check(service=service, result=False, output='bad output')
self.session.add(check_1)
self.session.add(check_2)
self.session.add(check_3)
self.session.add(check_4)
self.session.add(check_5)
self.session.commit()
assert service.score_earned == 400
def test_max_score(self):
service = generate_sample_model_tree('Service', self.session)
check_1 = Check(service=service, result=True, output='Good output')
check_2 = Check(service=service, result=True, output='Good output')
check_3 = Check(service=service, result=True, output='Good output')
check_4 = Check(service=service, result=True, output='Good output')
check_5 = Check(service=service, result=False, output='bad output')
self.session.add(check_1)
self.session.add(check_2)
self.session.add(check_3)
self.session.add(check_4)
self.session.add(check_5)
self.session.commit()
assert service.max_score == 500
def test_percent_earned(self):
service = generate_sample_model_tree('Service', self.session)
service = generate_sample_model_tree('Service', self.session)
check_1 = Check(service=service, result=True, output='Good output')
check_2 = Check(service=service, result=True, output='Good output')
check_3 = Check(service=service, result=True, output='Good output')
check_4 = Check(service=service, result=True, output='Good output')
check_5 = Check(service=service, result=False, output='bad output')
self.session.add(check_1)
self.session.add(check_2)
self.session.add(check_3)
self.session.add(check_4)
self.session.add(check_5)
self.session.commit()
assert service.percent_earned == 80
def test_last_ten_checks_4_checks(self):
service = generate_sample_model_tree('Service', self.session)
check_1 = Check(service=service, result=True, output='Good output')
check_2 = Check(service=service, result=True, output='Good output')
check_3 = Check(service=service, result=True, output='Good output')
check_4 = Check(service=service, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.add(check_3)
self.session.add(check_4)
self.session.commit()
assert service.last_ten_checks == [check_4, check_3, check_2, check_1]
def test_last_ten_checks_15_checks(self):
service = generate_sample_model_tree('Service', self.session)
check_1 = Check(service=service, result=True, output='Good output')
check_2 = Check(service=service, result=True, output='Good output')
check_3 = Check(service=service, result=True, output='Good output')
check_4 = Check(service=service, result=True, output='Good output')
check_5 = Check(service=service, result=True, output='Good output')
check_6 = Check(service=service, result=True, output='Good output')
check_7 = Check(service=service, result=True, output='Good output')
check_8 = Check(service=service, result=True, output='Good output')
check_9 = Check(service=service, result=True, output='Good output')
check_10 = Check(service=service, result=True, output='Good output')
check_11 = Check(service=service, result=True, output='Good output')
check_12 = Check(service=service, result=True, output='Good output')
check_13 = Check(service=service, result=True, output='Good output')
check_14 = Check(service=service, result=True, output='Good output')
check_15 = Check(service=service, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.add(check_3)
self.session.add(check_4)
self.session.add(check_5)
self.session.add(check_6)
self.session.add(check_7)
self.session.add(check_8)
self.session.add(check_9)
self.session.add(check_10)
self.session.add(check_11)
self.session.add(check_12)
self.session.add(check_13)
self.session.add(check_14)
self.session.add(check_15)
self.session.commit()
assert service.last_ten_checks == [
check_15,
check_14,
check_13,
check_12,
check_11,
check_10,
check_9,
check_8,
check_7,
check_6
]
def test_check_result_for_round_no_rounds(self):
service = Service(name="Example Service", check_name="ICMP IPv4 Check", host='127.0.0.1')
assert service.check_result_for_round(1) is False
def test_check_result_for_round_3_rounds(self):
service = generate_sample_model_tree('Service', self.session)
round_1 = Round(number=1)
self.session.add(round_1)
check_1 = Check(round=round_1, result=True, service=service)
self.session.add(check_1)
round_2 = Round(number=2)
self.session.add(round_2)
check_2 = Check(round=round_2, result=True, service=service)
self.session.add(check_2)
round_3 = Round(number=3)
self.session.add(round_3)
check_3 = Check(round=round_3, result=False, service=service)
self.session.add(check_3)
self.session.commit()
assert service.check_result_for_round(1) is True
assert service.check_result_for_round(2) is True
assert service.check_result_for_round(3) is False
def test_rank(self):
team_1 = Team(name="Blue Team 1", color="Blue")
self.session.add(team_1)
service_1 = Service(name="Example Service 1", team=team_1, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
team_2 = Team(name="Blue Team 2", color="Blue")
self.session.add(team_2)
service_1 = Service(name="Example Service 1", team=team_2, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
team_3 = Team(name="Blue Team 3", color="Blue")
self.session.add(team_3)
service_1 = Service(name="Example Service 1", team=team_3, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=False, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
assert team_1.services[0].rank == 1
assert team_2.services[0].rank == 1
assert team_3.services[0].rank == 3
|
py | 7dfdba0086688ddfd7dfc763bdc52d0ad97787fc | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSqlPoolSensitivityLabelResult',
'AwaitableGetSqlPoolSensitivityLabelResult',
'get_sql_pool_sensitivity_label',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:synapse:getSqlPoolSensitivityLabel'.""", DeprecationWarning)
@pulumi.output_type
class GetSqlPoolSensitivityLabelResult:
"""
A sensitivity label.
"""
def __init__(__self__, column_name=None, id=None, information_type=None, information_type_id=None, is_disabled=None, label_id=None, label_name=None, managed_by=None, name=None, rank=None, schema_name=None, table_name=None, type=None):
if column_name and not isinstance(column_name, str):
raise TypeError("Expected argument 'column_name' to be a str")
pulumi.set(__self__, "column_name", column_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if information_type and not isinstance(information_type, str):
raise TypeError("Expected argument 'information_type' to be a str")
pulumi.set(__self__, "information_type", information_type)
if information_type_id and not isinstance(information_type_id, str):
raise TypeError("Expected argument 'information_type_id' to be a str")
pulumi.set(__self__, "information_type_id", information_type_id)
if is_disabled and not isinstance(is_disabled, bool):
raise TypeError("Expected argument 'is_disabled' to be a bool")
pulumi.set(__self__, "is_disabled", is_disabled)
if label_id and not isinstance(label_id, str):
raise TypeError("Expected argument 'label_id' to be a str")
pulumi.set(__self__, "label_id", label_id)
if label_name and not isinstance(label_name, str):
raise TypeError("Expected argument 'label_name' to be a str")
pulumi.set(__self__, "label_name", label_name)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rank and not isinstance(rank, str):
raise TypeError("Expected argument 'rank' to be a str")
pulumi.set(__self__, "rank", rank)
if schema_name and not isinstance(schema_name, str):
raise TypeError("Expected argument 'schema_name' to be a str")
pulumi.set(__self__, "schema_name", schema_name)
if table_name and not isinstance(table_name, str):
raise TypeError("Expected argument 'table_name' to be a str")
pulumi.set(__self__, "table_name", table_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="columnName")
def column_name(self) -> str:
"""
The column name.
"""
return pulumi.get(self, "column_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="informationType")
def information_type(self) -> Optional[str]:
"""
The information type.
"""
return pulumi.get(self, "information_type")
@property
@pulumi.getter(name="informationTypeId")
def information_type_id(self) -> Optional[str]:
"""
The information type ID.
"""
return pulumi.get(self, "information_type_id")
@property
@pulumi.getter(name="isDisabled")
def is_disabled(self) -> bool:
"""
Is sensitivity recommendation disabled. Applicable for recommended sensitivity label only. Specifies whether the sensitivity recommendation on this column is disabled (dismissed) or not.
"""
return pulumi.get(self, "is_disabled")
@property
@pulumi.getter(name="labelId")
def label_id(self) -> Optional[str]:
"""
The label ID.
"""
return pulumi.get(self, "label_id")
@property
@pulumi.getter(name="labelName")
def label_name(self) -> Optional[str]:
"""
The label name.
"""
return pulumi.get(self, "label_name")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> str:
"""
managed by
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rank(self) -> Optional[str]:
return pulumi.get(self, "rank")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> str:
"""
The schema name.
"""
return pulumi.get(self, "schema_name")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> str:
"""
The table name.
"""
return pulumi.get(self, "table_name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSqlPoolSensitivityLabelResult(GetSqlPoolSensitivityLabelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlPoolSensitivityLabelResult(
column_name=self.column_name,
id=self.id,
information_type=self.information_type,
information_type_id=self.information_type_id,
is_disabled=self.is_disabled,
label_id=self.label_id,
label_name=self.label_name,
managed_by=self.managed_by,
name=self.name,
rank=self.rank,
schema_name=self.schema_name,
table_name=self.table_name,
type=self.type)
def get_sql_pool_sensitivity_label(column_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
schema_name: Optional[str] = None,
sensitivity_label_source: Optional[str] = None,
sql_pool_name: Optional[str] = None,
table_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolSensitivityLabelResult:
"""
A sensitivity label.
Latest API Version: 2020-12-01.
:param str column_name: The name of the column.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str schema_name: The name of the schema.
:param str sensitivity_label_source: The source of the sensitivity label.
:param str sql_pool_name: SQL pool name
:param str table_name: The name of the table.
:param str workspace_name: The name of the workspace
"""
pulumi.log.warn("get_sql_pool_sensitivity_label is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:synapse:getSqlPoolSensitivityLabel'.")
__args__ = dict()
__args__['columnName'] = column_name
__args__['resourceGroupName'] = resource_group_name
__args__['schemaName'] = schema_name
__args__['sensitivityLabelSource'] = sensitivity_label_source
__args__['sqlPoolName'] = sql_pool_name
__args__['tableName'] = table_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:synapse/latest:getSqlPoolSensitivityLabel', __args__, opts=opts, typ=GetSqlPoolSensitivityLabelResult).value
return AwaitableGetSqlPoolSensitivityLabelResult(
column_name=__ret__.column_name,
id=__ret__.id,
information_type=__ret__.information_type,
information_type_id=__ret__.information_type_id,
is_disabled=__ret__.is_disabled,
label_id=__ret__.label_id,
label_name=__ret__.label_name,
managed_by=__ret__.managed_by,
name=__ret__.name,
rank=__ret__.rank,
schema_name=__ret__.schema_name,
table_name=__ret__.table_name,
type=__ret__.type)
|
py | 7dfdba811eecb300a71268219c9a21fcab0f040e | import io
import logging
import socket
import sys
import unittest
import time
import warnings
import pytest
import mock
from .. import TARPIT_HOST, VALID_SOURCE_ADDRESSES, INVALID_SOURCE_ADDRESSES
from ..port_helpers import find_unused_port
from urllib3 import encode_multipart_formdata, HTTPConnectionPool
from urllib3.exceptions import (
ConnectTimeoutError,
EmptyPoolError,
DecodeError,
MaxRetryError,
ReadTimeoutError,
NewConnectionError,
UnrewindableBodyError,
)
from urllib3.packages.six import b, u
from urllib3.packages.six.moves.urllib.parse import urlencode
from urllib3.util.retry import Retry, RequestHistory
from urllib3.util.timeout import Timeout
from dummyserver.testcase import HTTPDummyServerTestCase, SocketDummyServerTestCase
from dummyserver.server import NoIPv6Warning, HAS_IPV6_AND_DNS
from threading import Event
log = logging.getLogger("urllib3.connectionpool")
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
SHORT_TIMEOUT = 0.001
LONG_TIMEOUT = 0.03
def wait_for_socket(ready_event):
ready_event.wait()
ready_event.clear()
class TestConnectionPoolTimeouts(SocketDummyServerTestCase):
def test_timeout_float(self):
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
# Pool-global timeout
with HTTPConnectionPool(
self.host, self.port, timeout=SHORT_TIMEOUT, retries=False
) as pool:
wait_for_socket(ready_event)
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
block_event.set() # Release block
# Shouldn't raise this time
wait_for_socket(ready_event)
block_event.set() # Pre-release block
pool.request("GET", "/")
def test_conn_closed(self):
block_event = Event()
self.start_basic_handler(block_send=block_event, num=1)
with HTTPConnectionPool(
self.host, self.port, timeout=SHORT_TIMEOUT, retries=False
) as pool:
conn = pool._get_conn()
pool._put_conn(conn)
try:
pool.urlopen("GET", "/")
self.fail("The request should fail with a timeout error.")
except ReadTimeoutError:
if conn.sock:
with pytest.raises(socket.error):
conn.sock.recv(1024)
finally:
pool._put_conn(conn)
block_event.set()
def test_timeout(self):
# Requests should time out when expected
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=6)
# Pool-global timeout
timeout = Timeout(read=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=timeout, retries=False
) as pool:
wait_for_socket(ready_event)
conn = pool._get_conn()
with pytest.raises(ReadTimeoutError):
pool._make_request(conn, "GET", "/")
pool._put_conn(conn)
block_event.set() # Release request
wait_for_socket(ready_event)
block_event.clear()
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
block_event.set() # Release request
# Request-specific timeouts should raise errors
with HTTPConnectionPool(
self.host, self.port, timeout=LONG_TIMEOUT, retries=False
) as pool:
conn = pool._get_conn()
wait_for_socket(ready_event)
now = time.time()
with pytest.raises(ReadTimeoutError):
pool._make_request(conn, "GET", "/", timeout=timeout)
delta = time.time() - now
block_event.set() # Release request
message = "timeout was pool-level LONG_TIMEOUT rather than request-level SHORT_TIMEOUT"
assert delta < LONG_TIMEOUT, message
pool._put_conn(conn)
wait_for_socket(ready_event)
now = time.time()
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/", timeout=timeout)
delta = time.time() - now
message = "timeout was pool-level LONG_TIMEOUT rather than request-level SHORT_TIMEOUT"
assert delta < LONG_TIMEOUT, message
block_event.set() # Release request
# Timeout int/float passed directly to request and _make_request should
# raise a request timeout
wait_for_socket(ready_event)
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/", timeout=SHORT_TIMEOUT)
block_event.set() # Release request
wait_for_socket(ready_event)
conn = pool._new_conn()
# FIXME: This assert flakes sometimes. Not sure why.
with pytest.raises(ReadTimeoutError):
pool._make_request(conn, "GET", "/", timeout=SHORT_TIMEOUT)
block_event.set() # Release request
def test_connect_timeout(self):
url = "/"
host, port = TARPIT_HOST, 80
timeout = Timeout(connect=SHORT_TIMEOUT)
# Pool-global timeout
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", url)
# Retries
retries = Retry(connect=0)
with pytest.raises(MaxRetryError):
pool.request("GET", url, retries=retries)
# Request-specific connection timeouts
big_timeout = Timeout(read=LONG_TIMEOUT, connect=LONG_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=big_timeout, retries=False) as pool:
conn = pool._get_conn()
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", url, timeout=timeout)
pool._put_conn(conn)
with pytest.raises(ConnectTimeoutError):
pool.request("GET", url, timeout=timeout)
def test_total_applies_connect(self):
host, port = TARPIT_HOST, 80
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", "/")
finally:
conn.close()
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", "/")
finally:
conn.close()
def test_total_timeout(self):
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
wait_for_socket(ready_event)
# This will get the socket to raise an EAGAIN on the read
timeout = Timeout(connect=3, read=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=timeout, retries=False
) as pool:
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
block_event.set()
wait_for_socket(ready_event)
block_event.clear()
# The connect should succeed and this should hit the read timeout
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=timeout, retries=False
) as pool:
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
def test_create_connection_timeout(self):
self.start_basic_handler(block_send=Event(), num=0) # needed for self.port
timeout = Timeout(connect=SHORT_TIMEOUT, total=LONG_TIMEOUT)
with HTTPConnectionPool(
TARPIT_HOST, self.port, timeout=timeout, retries=False
) as pool:
conn = pool._new_conn()
with pytest.raises(ConnectTimeoutError):
conn.connect()
class TestConnectionPool(HTTPDummyServerTestCase):
def test_get(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "GET"})
assert r.status == 200, r.data
def test_post_url(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "POST"})
assert r.status == 200, r.data
def test_urlopen_put(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.urlopen("PUT", "/specific_method?method=PUT")
assert r.status == 200, r.data
def test_wrong_specific_method(self):
# To make sure the dummy server is actually returning failed responses
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "POST"})
assert r.status == 400, r.data
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "GET"})
assert r.status == 400, r.data
def test_upload(self):
data = "I'm in ur multipart form-data, hazing a cheezburgr"
fields = {
"upload_param": "filefield",
"upload_filename": "lolcat.txt",
"upload_size": len(data),
"filefield": ("lolcat.txt", data),
}
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_one_name_multiple_values(self):
fields = [("foo", "a"), ("foo", "b")]
with HTTPConnectionPool(self.host, self.port) as pool:
# urlencode
r = pool.request("GET", "/echo", fields=fields)
assert r.data == b"foo=a&foo=b"
# multipart
r = pool.request("POST", "/echo", fields=fields)
assert r.data.count(b'name="foo"') == 2
def test_request_method_body(self):
with HTTPConnectionPool(self.host, self.port) as pool:
body = b"hi"
r = pool.request("POST", "/echo", body=body)
assert r.data == body
fields = [("hi", "hello")]
with pytest.raises(TypeError):
pool.request("POST", "/echo", body=body, fields=fields)
def test_unicode_upload(self):
fieldname = u("myfile")
filename = u("\xe2\x99\xa5.txt")
data = u("\xe2\x99\xa5").encode("utf8")
size = len(data)
fields = {
u("upload_param"): fieldname,
u("upload_filename"): filename,
u("upload_size"): size,
fieldname: (filename, data),
}
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_nagle(self):
""" Test that connections have TCP_NODELAY turned on """
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
conn = pool._get_conn()
try:
pool._make_request(conn, "GET", "/")
tcp_nodelay_setting = conn.sock.getsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY
)
assert tcp_nodelay_setting
finally:
conn.close()
def test_socket_options(self):
"""Test that connections accept socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(
self.host,
self.port,
socket_options=[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)],
) as pool:
s = pool._new_conn()._new_conn() # Get the socket
try:
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert using_keepalive
finally:
s.close()
def test_disable_default_socket_options(self):
"""Test that passing None disables all socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port, socket_options=None) as pool:
s = pool._new_conn()._new_conn()
try:
using_nagle = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 0
assert using_nagle
finally:
s.close()
def test_defaults_are_applied(self):
"""Test that modifying the default socket options works."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
# Get the HTTPConnection instance
conn = pool._new_conn()
try:
# Update the default socket options
conn.default_socket_options += [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
]
s = conn._new_conn()
nagle_disabled = (
s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) > 0
)
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert nagle_disabled
assert using_keepalive
finally:
conn.close()
s.close()
def test_connection_error_retries(self):
""" ECONNREFUSED error should raise a connection error, with retries """
port = find_unused_port()
with HTTPConnectionPool(self.host, port) as pool:
try:
pool.request("GET", "/", retries=Retry(connect=3))
self.fail("Should have failed with a connection error.")
except MaxRetryError as e:
assert type(e.reason) == NewConnectionError
def test_timeout_success(self):
timeout = Timeout(connect=3, read=5, total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
# This should not raise a "Timeout already started" error
pool.request("GET", "/")
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
# This should also not raise a "Timeout already started" error
pool.request("GET", "/")
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
def test_tunnel(self):
# note the actual httplib.py has no tests for this functionality
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
conn.set_tunnel(self.host, self.port)
conn._tunnel = mock.Mock(return_value=None)
pool._make_request(conn, "GET", "/")
conn._tunnel.assert_called_once_with()
finally:
conn.close()
# test that it's not called when tunnel is not set
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
conn._tunnel = mock.Mock(return_value=None)
pool._make_request(conn, "GET", "/")
assert not conn._tunnel.called
finally:
conn.close()
def test_redirect(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, redirect=False)
assert r.status == 303
r = pool.request("GET", "/redirect", fields={"target": "/"})
assert r.status == 200
assert r.data == b"Dummy server!"
def test_bad_connect(self):
with HTTPConnectionPool("badhost.invalid", self.port) as pool:
try:
pool.request("GET", "/", retries=5)
self.fail("should raise timeout exception here")
except MaxRetryError as e:
assert type(e.reason) == NewConnectionError
def test_keepalive(self):
with HTTPConnectionPool(self.host, self.port, block=True, maxsize=1) as pool:
r = pool.request("GET", "/keepalive?close=0")
r = pool.request("GET", "/keepalive?close=0")
assert r.status == 200
assert pool.num_connections == 1
assert pool.num_requests == 2
def test_keepalive_close(self):
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert pool.num_connections == 1
# The dummyserver will have responded with Connection:close,
# and httplib will properly cleanup the socket.
# We grab the HTTPConnection object straight from the Queue,
# because _get_conn() is where the check & reset occurs
# pylint: disable-msg=W0212
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Now with keep-alive
r = pool.request(
"GET",
"/keepalive?close=0",
retries=0,
headers={"Connection": "keep-alive"},
)
# The dummyserver responded with Connection:keep-alive, the connection
# persists.
conn = pool.pool.get()
assert conn.sock is not None
pool._put_conn(conn)
# Another request asking the server to close the connection. This one
# should get cleaned up for the next request.
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert r.status == 200
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Next request
r = pool.request("GET", "/keepalive?close=0")
def test_post_with_urlencode(self):
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=False)
assert r.data.decode("utf-8") == urlencode(data)
def test_post_with_multipart(self):
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=True)
body = r.data.split(b"\r\n")
encoded_data = encode_multipart_formdata(data)[0]
expected_body = encoded_data.split(b"\r\n")
# TODO: Get rid of extra parsing stuff when you can specify
# a custom boundary to encode_multipart_formdata
"""
We need to loop the return lines because a timestamp is attached
from within encode_multipart_formdata. When the server echos back
the data, it has the timestamp from when the data was encoded, which
is not equivalent to when we run encode_multipart_formdata on
the data again.
"""
for i, line in enumerate(body):
if line.startswith(b"--"):
continue
assert body[i] == expected_body[i]
def test_post_with_multipart__iter__(self):
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"hello": "world"}
r = pool.request(
"POST",
"/echo",
fields=data,
preload_content=False,
multipart_boundary="boundary",
encode_multipart=True,
)
chunks = [chunk for chunk in r]
assert chunks == [
b"--boundary\r\n",
b'Content-Disposition: form-data; name="hello"\r\n',
b"\r\n",
b"world\r\n",
b"--boundary--\r\n",
]
def test_check_gzip(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "gzip"}
)
assert r.headers.get("content-encoding") == "gzip"
assert r.data == b"hello, world!"
def test_check_deflate(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "deflate"}
)
assert r.headers.get("content-encoding") == "deflate"
assert r.data == b"hello, world!"
def test_bad_decode(self):
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-deflate"},
)
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-gzip"},
)
def test_connection_count(self):
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
pool.request("GET", "/")
pool.request("GET", "/")
pool.request("GET", "/")
assert pool.num_connections == 1
assert pool.num_requests == 3
def test_connection_count_bigpool(self):
with HTTPConnectionPool(self.host, self.port, maxsize=16) as http_pool:
http_pool.request("GET", "/")
http_pool.request("GET", "/")
http_pool.request("GET", "/")
assert http_pool.num_connections == 1
assert http_pool.num_requests == 3
def test_partial_response(self):
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
req_data = {"lol": "cat"}
resp_data = urlencode(req_data).encode("utf-8")
r = pool.request("GET", "/echo", fields=req_data, preload_content=False)
assert r.read(5) == resp_data[:5]
assert r.read() == resp_data[5:]
def test_lazy_load_twice(self):
# This test is sad and confusing. Need to figure out what's
# going on with partial reads and socket reuse.
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
payload_size = 1024 * 2
first_chunk = 512
boundary = "foo"
req_data = {"count": "a" * payload_size}
resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]
req2_data = {"count": "b" * payload_size}
resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]
r1 = pool.request(
"POST",
"/echo",
fields=req_data,
multipart_boundary=boundary,
preload_content=False,
)
assert r1.read(first_chunk) == resp_data[:first_chunk]
try:
r2 = pool.request(
"POST",
"/echo",
fields=req2_data,
multipart_boundary=boundary,
preload_content=False,
pool_timeout=0.001,
)
# This branch should generally bail here, but maybe someday it will
# work? Perhaps by some sort of magic. Consider it a TODO.
assert r2.read(first_chunk) == resp2_data[:first_chunk]
assert r1.read() == resp_data[first_chunk:]
assert r2.read() == resp2_data[first_chunk:]
assert pool.num_requests == 2
except EmptyPoolError:
assert r1.read() == resp_data[first_chunk:]
assert pool.num_requests == 1
assert pool.num_connections == 1
def test_for_double_release(self):
MAXSIZE = 5
# Check default state
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.num_connections == 0
assert pool.pool.qsize() == MAXSIZE
# Make an empty slot for testing
pool.pool.get()
assert pool.pool.qsize() == MAXSIZE - 1
# Check state after simple request
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 1
# Check state without release
pool.urlopen("GET", "/", preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
# Check state after read
pool.urlopen("GET", "/").data
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
def test_release_conn_parameter(self):
MAXSIZE = 5
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.pool.qsize() == MAXSIZE
# Make request without releasing connection
pool.request("GET", "/", release_conn=False, preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 1
def test_dns_error(self):
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/test", retries=2)
def test_source_address(self):
for addr, is_ipv6 in VALID_SOURCE_ADDRESSES:
if is_ipv6 and not HAS_IPV6_AND_DNS:
warnings.warn("No IPv6 support: skipping.", NoIPv6Warning)
continue
with HTTPConnectionPool(
self.host, self.port, source_address=addr, retries=False
) as pool:
r = pool.request("GET", "/source_address")
assert r.data == b(addr[0])
def test_source_address_error(self):
for addr in INVALID_SOURCE_ADDRESSES:
with HTTPConnectionPool(
self.host, self.port, source_address=addr, retries=False
) as pool:
# FIXME: This assert flakes sometimes. Not sure why.
with pytest.raises(NewConnectionError):
pool.request("GET", "/source_address?{0}".format(addr))
def test_stream_keepalive(self):
x = 2
with HTTPConnectionPool(self.host, self.port) as pool:
for _ in range(x):
response = pool.request(
"GET",
"/chunked",
headers={"Connection": "keep-alive"},
preload_content=False,
retries=False,
)
for chunk in response.stream():
assert chunk == b"123"
assert pool.num_connections == 1
assert pool.num_requests == x
def test_read_chunked_short_circuit(self):
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.read()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_read_chunked_on_closed_response(self):
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.close()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_chunked_gzip(self):
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request(
"GET", "/chunked_gzip", preload_content=False, decode_content=True
)
assert b"123" * 4 == response.read()
def test_cleanup_on_connection_error(self):
"""
Test that connections are recycled to the pool on
connection errors where no http response is received.
"""
poolsize = 3
with HTTPConnectionPool(
self.host, self.port, maxsize=poolsize, block=True
) as http:
assert http.pool.qsize() == poolsize
# force a connection error by supplying a non-existent
# url. We won't get a response for this and so the
# conn won't be implicitly returned to the pool.
with pytest.raises(MaxRetryError):
http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=0,
)
r = http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=1,
)
r.release_conn()
# the pool should still contain poolsize elements
assert http.pool.qsize() == http.pool.maxsize
def test_mixed_case_hostname(self):
with HTTPConnectionPool("LoCaLhOsT", self.port) as pool:
response = pool.request("GET", "http://LoCaLhOsT:%d/" % self.port)
assert response.status == 200
class TestRetry(HTTPDummyServerTestCase):
def test_max_retry(self):
with HTTPConnectionPool(self.host, self.port) as pool:
try:
r = pool.request("GET", "/redirect", fields={"target": "/"}, retries=0)
self.fail(
"Failed to raise MaxRetryError exception, returned %r" % r.status
)
except MaxRetryError:
pass
def test_disabled_retry(self):
""" Disabled retries should disable redirect handling. """
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, retries=False)
assert r.status == 303
r = pool.request(
"GET",
"/redirect",
fields={"target": "/"},
retries=Retry(redirect=False),
)
assert r.status == 303
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(NewConnectionError):
pool.request("GET", "/test", retries=False)
def test_read_retries(self):
""" Should retry for status codes in the whitelist """
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(read=1, status_forcelist=[418])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_read_retries"},
retries=retry,
)
assert resp.status == 200
def test_read_total_retries(self):
""" HTTP response w/ status code in the whitelist should be retried """
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_total_retries"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retries_wrong_whitelist(self):
"""HTTP response w/ status code not in whitelist shouldn't be retried"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[202])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_wrong_whitelist"},
retries=retry,
)
assert resp.status == 418
def test_default_method_whitelist_retried(self):
""" urllib3 should retry methods in the default method whitelist """
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"OPTIONS",
"/successful_retry",
headers={"test-name": "test_default_whitelist"},
retries=retry,
)
assert resp.status == 200
def test_retries_wrong_method_list(self):
"""Method not in our whitelist should not be retried, even if code matches"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_wrong_method_whitelist"}
retry = Retry(total=1, status_forcelist=[418], method_whitelist=["POST"])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 418
def test_read_retries_unsuccessful(self):
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_retries_unsuccessful"}
resp = pool.request("GET", "/successful_retry", headers=headers, retries=1)
assert resp.status == 418
def test_retry_reuse_safe(self):
""" It should be possible to reuse a Retry object across requests """
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_safe"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retry_return_in_response(self):
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_return_in_response"}
retry = Retry(total=2, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
assert resp.retries.total == 1
assert resp.retries.history == (
RequestHistory("GET", "/successful_retry", None, 418, None),
)
def test_retry_redirect_history(self):
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request("GET", "/redirect", fields={"target": "/"})
assert resp.status == 200
assert resp.retries.history == (
RequestHistory("GET", "/redirect?target=%2F", None, 303, "/"),
)
def test_multi_redirect_history(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
fields={"redirect_codes": "303,302,200"},
redirect=False,
)
assert r.status == 303
assert r.retries.history == tuple()
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
retries=10,
fields={"redirect_codes": "303,302,301,307,302,200"},
)
assert r.status == 200
assert r.data == b"Done redirecting"
expected = [
(303, "/multi_redirect?redirect_codes=302,301,307,302,200"),
(302, "/multi_redirect?redirect_codes=301,307,302,200"),
(301, "/multi_redirect?redirect_codes=307,302,200"),
(307, "/multi_redirect?redirect_codes=302,200"),
(302, "/multi_redirect?redirect_codes=200"),
]
actual = [
(history.status, history.redirect_location)
for history in r.retries.history
]
assert actual == expected
class TestRetryAfter(HTTPDummyServerTestCase):
def test_retry_after(self):
# Request twice in a second to get a 429 response.
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=False,
)
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=False,
)
assert r.status == 429
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=True,
)
assert r.status == 200
# Request twice in a second to get a 503 response.
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=False,
)
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=False,
)
assert r.status == 503
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=True,
)
assert r.status == 200
# Ignore Retry-After header on status which is not defined in
# Retry.RETRY_AFTER_STATUS_CODES.
r = pool.request(
"GET",
"/retry_after",
fields={"status": "418 I'm a teapot"},
retries=True,
)
assert r.status == 418
def test_redirect_after(self):
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect_after", retries=False)
assert r.status == 303
t = time.time()
r = pool.request("GET", "/redirect_after")
assert r.status == 200
delta = time.time() - t
assert delta >= 1
t = time.time()
timestamp = t + 2
r = pool.request("GET", "/redirect_after?date=" + str(timestamp))
assert r.status == 200
delta = time.time() - t
assert delta >= 1
# Retry-After is past
t = time.time()
timestamp = t - 1
r = pool.request("GET", "/redirect_after?date=" + str(timestamp))
delta = time.time() - t
assert r.status == 200
assert delta < 1
class TestFileBodiesOnRetryOrRedirect(HTTPDummyServerTestCase):
def test_retries_put_filehandle(self):
"""HTTP PUT retry with a file-like object should not timeout"""
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
retry = Retry(total=3, status_forcelist=[418])
# httplib reads in 8k chunks; use a larger content length
content_length = 65535
data = b"A" * content_length
uploaded_file = io.BytesIO(data)
headers = {
"test-name": "test_retries_put_filehandle",
"Content-Length": str(content_length),
}
resp = pool.urlopen(
"PUT",
"/successful_retry",
headers=headers,
retries=retry,
body=uploaded_file,
assert_same_host=False,
redirect=False,
)
assert resp.status == 200
def test_redirect_put_file(self):
"""PUT with file object should work with a redirection response"""
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
retry = Retry(total=3, status_forcelist=[418])
# httplib reads in 8k chunks; use a larger content length
content_length = 65535
data = b"A" * content_length
uploaded_file = io.BytesIO(data)
headers = {
"test-name": "test_redirect_put_file",
"Content-Length": str(content_length),
}
url = "/redirect?target=/echo&status=307"
resp = pool.urlopen(
"PUT",
url,
headers=headers,
retries=retry,
body=uploaded_file,
assert_same_host=False,
redirect=True,
)
assert resp.status == 200
assert resp.data == data
def test_redirect_with_failed_tell(self):
"""Abort request if failed to get a position from tell()"""
class BadTellObject(io.BytesIO):
def tell(self):
raise IOError
body = BadTellObject(b"the data")
url = "/redirect?target=/successful_retry"
# httplib uses fileno if Content-Length isn't supplied,
# which is unsupported by BytesIO.
headers = {"Content-Length": "8"}
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
try:
pool.urlopen("PUT", url, headers=headers, body=body)
self.fail("PUT successful despite failed rewind.")
except UnrewindableBodyError as e:
assert "Unable to record file position for" in str(e)
class TestRetryPoolSize(HTTPDummyServerTestCase):
def test_pool_size_retry(self):
retries = Retry(total=1, raise_on_status=False, status_forcelist=[404])
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/not_found", preload_content=False)
assert pool.num_connections == 1
class TestRedirectPoolSize(HTTPDummyServerTestCase):
def test_pool_size_redirect(self):
retries = Retry(
total=1, raise_on_status=False, status_forcelist=[404], redirect=True
)
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/redirect", preload_content=False)
assert pool.num_connections == 1
if __name__ == "__main__":
unittest.main()
|
py | 7dfdbaadd8bba9a06d151092811cd78739631179 | #!/usr/bin/env python3
import pathfinder as pf
import math
if __name__ == '__main__':
points = [
pf.Waypoint(-4, -1, math.radians(-45.0)),
pf.Waypoint(-2, -2, 0),
pf.Waypoint(0, 0, 0),
]
info, trajectory = pf.generate(points, pf.FIT_HERMITE_CUBIC, pf.SAMPLES_HIGH,
dt=0.05, # 50ms
max_velocity=1.7,
max_acceleration=2.0,
max_jerk=60.0)
# Wheelbase Width = 0.5m
modifier = pf.modifiers.TankModifier(trajectory).modify(0.5)
# Do something with the new Trajectories...
left = modifier.getLeftTrajectory()
right = modifier.getRightTrajectory()
pf.serialize_csv("left.csv", left)
pf.serialize_csv("right.csv", right)
|
py | 7dfdbb5f7a5d306f3c9cba3afeb8840dab331553 | from prestans.http import STATUS
from prestans.rest import RequestHandler
import pytest
import unittest
class NoContentHandler(RequestHandler):
def get(self):
self.response.status = STATUS.NO_CONTENT
def test_app():
from webtest import TestApp
from prestans.rest import RequestRouter
api = RequestRouter([
('/no-content', NoContentHandler)
], application_name="api", debug=True)
return TestApp(app=api)
class Issue154(unittest.TestCase):
def test_204_header_omitted(self):
"""
Request should return no content with header omitted
"""
app = test_app()
resp = app.get('/no-content')
self.assertEqual(resp.status_int, STATUS.NO_CONTENT)
self.assertIsNone(resp.content_type)
|
py | 7dfdbb7c4101cfb4b3fb03fe7c1dcf32c7c2db3a | from django.db.models.signals import post_save, pre_delete
from django.db.models import Q
import peeringdb_server.rest
from peeringdb_server.models import (
UTC,
InternetExchange,
Network,
Facility,
Organization,
)
import re
import time
import datetime
import unidecode
def unaccent(v):
return unidecode.unidecode(v).lower()
# SEARCH INDEX BE STORED HERE
SEARCH_CACHE = {"search_index": {}, "time": 0}
# We want to hook searchable objects into save and delete signals
# so we can update the search index as the data changes without having
# to reload the entire thing all the time
def hook_save(sender, **kwargs):
obj = kwargs.get("instance")
tag = obj._handleref.tag
idx = SEARCH_CACHE.get("search_index")
if obj.status == "ok":
if tag not in idx:
idx[tag] = {}
idx.get(tag)[obj.id] = obj
# print "%d %s refreshed in search index" % (obj.id, tag)
else:
try:
del idx[tag][obj.id]
except KeyError:
pass
# print "%d %s delete from search index" % (obj.id, tag)
def hook_delete(sender, **kwargs):
obj = kwargs.get("instance")
tag = obj._handleref.tag
try:
del SEARCH_CACHE.get["search_index"][tag][obj.id]
except TypeError:
pass
except KeyError:
pass
# print "%d %s deleted from search index " % (obj.id, tag)
searchable_models = [InternetExchange, Network, Facility, Organization]
for model in searchable_models:
post_save.connect(hook_save, sender=model)
pre_delete.connect(hook_delete, sender=model)
def search(term):
"""
Search searchable objects (ixp, network, facility ...) by term
Returns result dict
"""
search_tags = ("fac", "ix", "net", "org")
ref_dict = peeringdb_server.rest.ref_dict()
t = time.time()
if not SEARCH_CACHE.get("search_index"):
# whole db takes 5ish seconds, too slow to cache inline here
search_index = {
tag: {obj.id: obj for obj in model.objects.filter(status__in=["ok"])}
for tag, model in list(ref_dict.items())
if tag in search_tags
}
for typ, stor in list(search_index.items()):
print("CACHED: %d items in %s" % (len(stor), typ))
tag_id_re = re.compile(r"(" + r"|".join(search_tags) + r"|asn|as)(\d+)")
# FIXME: for now lets force a flush every 120 seconds, might want to look
# at an event based update solution instead
SEARCH_CACHE.update(
search_index=search_index, time=t, update_t=t, tag_id_re=tag_id_re
)
else:
search_index = SEARCH_CACHE.get("search_index")
tag_id_re = SEARCH_CACHE.get("tag_id_re")
# while we are using signals to make sure that the search index gets updated whenever
# a model is saved, right now we still have updates from external sources
# to which those signals cannot be easily connected (importer, fac_merge command etc.)
#
# in order to reflect search index changes made by external sources
# we need to find new / updated object regularily and update the
# search index from that
#
# FIXME: this can be taken out when we turn the importer off - or just leave it
# in as a fail-safe as it is fairly unobtrusive
ut = SEARCH_CACHE.get("update_t", 0)
if t - ut > 600:
dut = datetime.datetime.fromtimestamp(ut).replace(tzinfo=UTC())
print("Updating search index with newly created/updates objects")
search_index_update = {
tag: {
obj.id: obj
for obj in model.objects.filter(
Q(created__gte=dut) | Q(updated__gte=dut)
).filter(status="ok")
}
for tag, model in list(ref_dict.items())
if tag in search_tags
}
for tag, objects in list(search_index_update.items()):
if tag not in SEARCH_CACHE["search_index"]:
SEARCH_CACHE["search_index"][tag] = {
obj.id: obj for obj in ref_dict[tag].objects.filter(status="ok")
}
SEARCH_CACHE["search_index"][tag].update(objects)
SEARCH_CACHE["update_t"] = t
# FIXME: for some reason this gets unset sometimes - need to figure out
# why - for now just recreate when its missing
if not tag_id_re:
tag_id_re = re.compile(r"(" + r"|".join(search_tags) + r"|asn|as)(\d+)")
SEARCH_CACHE["tag_id_re"] = tag_id_re
print("Search index retrieval took %.5f seconds" % (time.time() - t))
result = {tag: [] for tag, model in list(ref_dict.items())}
term = unaccent(term)
# try to convert to int for numeric search matching
typed_q = {}
try:
typed_q["int"] = int(term)
except ValueError:
pass
# check for ref tags
try:
match = tag_id_re.match(term)
if match:
typed_q[match.group(1)] = match.group(2)
except ValueError:
pass
# FIXME model should have a search_fields attr on it
# this whole thing should be replaced with something more modular to get
# rid of all the ifs
for tag, index in list(search_index.items()):
for id, data in list(index.items()):
if tag == "org":
data.org_id = data.id
if unaccent(data.name).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if hasattr(data, "name_long") and unaccent(data.name_long).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if hasattr(data, "aka") and unaccent(data.aka).find(term) > -1:
result[tag].append(
{"id": id, "name": data.search_result_name, "org_id": data.org_id}
)
continue
if typed_q:
if tag in typed_q:
if str(data.id).startswith(typed_q[tag]):
result[tag].append(
{
"id": id,
"name": data.search_result_name,
"org_id": data.org_id,
}
)
continue
# search asn on everyting? probably just if asn in search
# fields
if hasattr(data, "asn"):
asn = typed_q.get(
"as", typed_q.get("asn", str(typed_q.get("int", "")))
)
if asn and str(data.asn).startswith(asn):
result[tag].append(
{
"id": id,
"name": data.search_result_name,
"org_id": data.org_id,
}
)
for k, items in list(result.items()):
result[k] = sorted(items, key=lambda row: row.get("name"))
return result
|
py | 7dfdbc660d101f81ad00c5685d7b1510b76ab941 | import os
import re
from copy import copy
# Imports for interactive wizard:
import ipywidgets as ipyw
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display
from cycler import cycler
from gdsii import types
from gdsii.record import Record
from qube.layout.config import LayoutConfig
default_rc_string = {
'color': 'black',
'weight': 'normal',
'size': 8,
'horizontalalignment': 'center',
'verticalalignment': 'center',
}
default_rc_shape = {
'facecolor': 'grey',
'alpha': 0.8,
'edgecolor': 'grey',
}
default_rc_figure = {
'figsize_x': 8,
'figsize_y': 4,
'lim_factor': 1.0,
'extra_left': 0.0,
'extra_right': 0.0,
'extra_top': 0.0,
'extra_bottom': 0.0,
}
class LayoutGDS(object):
elements = {
'id': [],
'xy': [],
'name': [],
'rc': [],
'type': [], # string or shape
'label': [],
}
def __init__(self, fullpath):
self.fullpath = fullpath
self.load_GDS()
self.read_elements()
self.read_strings()
self.read_shapes()
self.set_default_rc()
def load_GDS(self):
self.structure = structurefromGDS(self.fullpath)
def set_default_rc(self):
global default_rc_figure
global default_rc_string
global default_rc_shape
self.rc_figure = default_rc_figure
self.rc_string = default_rc_string
self.rc_shape = default_rc_shape
def load_layout_config(self, fullpath):
self.load_shapes_config(fullpath)
self.load_strings_config(fullpath)
# self.load_rc_string_config(fullpath)
# self.load_rc_shape_config(fullpath)
# self.load_rc_figure_config(fullpath)
def load_shapes_config(self, fullpath):
config = LayoutConfig(fullpath)
shapes_config = config.get_shapes_config()
for name, ids in shapes_config.items():
for id in ids:
self.set_elements_property_value(id, 'name', name)
self.set_elements_property_value(id, 'label', name)
def load_strings_config(self, fullpath):
config = LayoutConfig(fullpath)
strings_config = config.get_strings_config()
for name, ids in strings_config.items():
for id in ids:
self.set_elements_property_value(id, 'name', name)
self.set_elements_property_value(id, 'label', name)
def load_rc_string_config(self, fullpath):
config = LayoutConfig(fullpath)
section = 'RC_STRING'
if section in config.sections:
rc_string = config.get_section_config(section)
rc_string['size'] = float(rc_string['size'])
self.rc_string.update(rc_string)
def load_rc_shape_config(self, fullpath):
config = LayoutConfig(fullpath)
section = 'RC_SHAPE'
if section in config.sections:
rc_shape = config.get_section_config(section)
rc_shape['alpha'] = float(rc_shape['alpha'])
self.rc_shape.update(rc_shape)
def load_rc_figure_config(self, fullpath):
config = LayoutConfig(fullpath)
section = 'RC_FIGURE'
if section in config.sections:
rc_figure = config.get_section_config(section)
for key, value in rc_figure.items():
rc_figure[key] = float(rc_figure[key])
self.rc_figure.update(rc_figure)
def read_elements(self):
elements_list = self.structure()
self.elements['id'] = []
self.elements['xy'] = []
self.elements['name'] = []
self.elements['label'] = []
self.elements['rc'] = []
self.elements['type'] = []
for i, element in enumerate(elements_list):
self.elements['id'].append(i)
self.elements['xy'].append(element.transpose()) # For x,y = self.elements
self.elements['name'].append(None)
self.elements['label'].append(None)
self.elements['rc'].append({})
self.elements['type'].append('Undefined')
self.elements_size = i + 1
def read_strings(self):
self.strings_ids = []
for key, value in self.structure.string_infos.items():
index = self.get_elements_index('id', key)
self.strings_ids.append(key)
self.elements['type'][index] = 'string'
self.elements['name'][index] = value
self.elements['label'][index] = value
self.elements['rc'][index] = copy(default_rc_string)
self.strings_size = len(self.strings_ids)
def read_shapes(self):
self.shapes_ids = []
for i in range(self.elements_size):
if self.elements['type'][i] != 'string':
self.shapes_ids.append(self.elements['id'][i])
self.elements['type'][i] = 'shape'
self.elements['rc'][i] = copy(default_rc_shape)
self.shapes_size = len(self.shapes_ids)
def get_elements_index(self, key, target):
index = self.elements[key].index(target)
return index
def set_elements_property_value(self, id, property, value):
index = self.get_elements_index('id', id)
self.elements[property][index] = value
def get_elements_property_value(self, id, property):
index = self.get_elements_index('id', id)
value = self.elements[property][index]
return value
def get_ids_with_property_value(self, property, target_value):
ids = []
for i in range(self.elements_size):
cur_value = self.elements[property][i]
if cur_value == target_value:
id = self.elements['id'][i]
ids.append(id)
return id
def layout_limits(self, extra_factor=1.2):
xlim = np.array([0, 0])
ylim = np.array([0, 0])
for xy in self.elements['xy']:
x, y = xy
xlim[0] = min(xlim[0], np.amin(x))
xlim[1] = max(xlim[1], np.amax(x))
ylim[0] = min(ylim[0], np.amin(y))
ylim[1] = max(ylim[1], np.amax(y))
xlim = xlim * extra_factor
ylim = ylim * extra_factor
return xlim, ylim
def plot_elements(self):
nb = self.elements_size
fig, ax = plt.subplots()
colormap = plt.cm.hsv
colors = [colormap(i) for i in np.linspace(0, 1, nb)]
ax.set_prop_cycle(cycler('color', colors))
for i in range(nb):
x, y = self.elements['xy'][i]
ax.fill(x, y)
ax.text(x[0], y[0], i)
return fig, ax
def plot_layout(self):
rc_fig = self.rc_figure
figsize = (rc_fig['figsize_x'], rc_fig['figsize_y'])
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis('off')
for id in self.shapes_ids:
x, y = self.get_elements_property_value(id, 'xy')
rc = self.get_elements_property_value(id, 'rc')
rc_shape = copy(self.rc_shape)
rc_shape.update(rc)
# rc.update(self.rc_shape)
ax.fill(x, y, **rc_shape)
for id in self.strings_ids:
x, y = self.get_elements_property_value(id, 'xy')
label = self.get_elements_property_value(id, 'label')
rc = self.get_elements_property_value(id, 'rc')
rc_string = copy(self.rc_string)
rc_string.update(rc)
# rc.update(self.rc_string)
# rc['horizontalalignment']='center'
ax.text(x, y, label, **rc_string)
lim_factor = rc_fig['lim_factor']
xlim, ylim = self.layout_limits(extra_factor=lim_factor)
xlim = (xlim[0] + rc_fig['extra_left'], xlim[1] + rc_fig['extra_right'])
ylim = (ylim[0] + rc_fig['extra_bottom'], ylim[1] + rc_fig['extra_top'])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return fig, ax
def set_dummy_label(self, label):
elems = self.elements
n = len(elems['id'])
for i in range(n):
elems['label'][i] = label
def config_wizard(self,
names: list, # names of controls to allocate to the strings and shapes
export_file=None, # location of the file to export the GDS configuration
default_file=None, # location of reference file to import footer for GDS config
):
if not os.path.exists(default_file):
raise Exception('File with format-defaults \"{:s}\" not found!'.format(default_file))
shapes_data = list()
strings_data = list()
N_elements = len(self.elements['id'])
shape_id = ipyw.IntSlider(
value=0,
min=0,
max=N_elements,
step=1,
)
shape_id.layout.visibility = 'hidden'
textbox = ipyw.Label(value='Nothing')
nothing = 'Nothing'
temp_names = names + [nothing]
dropdown_controls = ipyw.Dropdown(
options=temp_names,
description='Control:',
)
move_button = ipyw.Button(description='Choose')
fig, ax = self.plot_layout()
shapes = ax.get_children()[0:N_elements]
def is_polygon(element_id):
condition1 = element_id in self.shapes_ids
condition2 = isinstance(shapes[element_id], matplotlib.patches.Polygon)
return condition1 and condition2
def is_text(element_id):
condition1 = element_id in self.strings_ids
condition2 = isinstance(shapes[element_id], matplotlib.text.Text)
return condition1 and condition2
def highlight_shape(element_id):
if is_polygon(element_id):
shapes[element_id].set_facecolor('red')
shapes[element_id].set_edgecolor('black')
textbox.value = 'Element #{:d}: Polygon'.format(element_id)
if is_text(element_id):
shapes[element_id].set_color('red')
shapes[element_id].set_weight('bold')
textbox.value = 'Element #{:d}: Text'.format(element_id)
def hide_shape(element_id):
if is_polygon(element_id):
shapes[element_id].set_facecolor('grey')
shapes[element_id].set_edgecolor('grey')
shapes[element_id].set_alpha(0.3)
if is_text(element_id):
shapes[element_id].set_color('grey')
shapes[element_id].set_weight('normal')
shapes[element_id].set_alpha(0.8)
def note_selection(element_id):
if dropdown_controls.value != nothing:
temp_str = '{:s} = {:d}'.format(dropdown_controls.value, element_id)
if is_polygon(element_id):
shapes_data.append(temp_str)
if is_text(element_id):
strings_data.append(temp_str)
temp_options = list(dropdown_controls.options)
temp_options.remove(dropdown_controls.value)
dropdown_controls.options = temp_options
# Display all options when string allocation starts:
if element_id == self.shapes_ids[-1]:
dropdown_controls.options = names + [nothing]
dropdown_controls.value = dropdown_controls.options[0]
# if element_id == self.shapes_ids[-1]:
# temp_names = names + [None]
# dropdown_controls.options = temp_names
def write_to_file(filename='../configurations/gds_config.ini',
default_file=None):
with open(filename, 'a') as gds_config:
gds_config.write('[SHAPES]\n')
for shapes_entry in shapes_data:
gds_config.write(shapes_entry + "\n")
gds_config.write('\n')
gds_config.write('[STRINGS]\n')
for strings_entry in strings_data:
gds_config.write(strings_entry + "\n")
gds_config.write('\n')
if default_file != None:
with open(default_file) as rc_default:
for rc_line in rc_default:
gds_config.write(rc_line)
print('Wrote GDS-configuration file: \"{:s}\"'.format(filename))
def close_widgets():
textbox.close()
dropdown_controls.close()
move_button.close()
def advance_wizard(button):
note_selection(shape_id.value) # NOTE SELECTION
hide_shape(shape_id.value) # HIDE CURRENT SHAPE
shape_id.value = shape_id.value + 1 # GO TO NEXT SHAPE
if shape_id.value < N_elements:
highlight_shape(shape_id.value) # HIGHLIGHT NEXT SHAPE
fig.canvas.draw() # UPDATE PLOT
elif shape_id.value == N_elements: # IF FINISH
close_widgets();
hide_shape(shape_id.value - 1) # CLOSE WIDGETS AND
write_to_file(export_file, default_file) # SAVE SELECTIONS TO FILE
move_button.on_click(advance_wizard)
highlight_shape(shape_id.value)
dropdown_controls.value = dropdown_controls.options[0]
display(shape_id, textbox, dropdown_controls, move_button)
# if __name__ == '__main__':
# # sample_gds = sv.PATH_Sample_gds
# sample_gds = './GDS/'
# layout_1 = LayoutGDS(sample_gds)
# # layout_1.plot_elements()
# # layout_1.plot_layout()
# # plt.show()
# configfile = sv.PATH_Sample_layout_config
# layout_1.load_layout_config(configfile)
# datafile = 'DC1_SD_TR_3.h5'
# # datafile = 'pinch_4K_LD2_LV2_1.h5'
# exp_path = sv.PATH_experiments_and_data
# datapath = os.path.join(exp_path,datafile)
# gates = LayoutContent(datapath,FastRampMode=False)
# gates.set_to_layout(layout=layout_1)
# layout_1.plot_layout()
# plt.show()
class structurefromGDS(object):
"""
Interface to convert the polygons from GDS files into point lists that
can be used to calculate the potential landscapes.
Reads gds file
outputs pointlist when called
"""
def __init__(self, fname):
self.fname = fname
self.units = []
self.pointlists = []
self.string_infos = {}
def show_data(self, rec):
"""Shows data in a human-readable format."""
if rec.tag_type == types.ASCII:
return '"%s"' % rec.data.decode() # TODO escape
elif rec.tag_type == types.BITARRAY:
return str(rec.data)
return ', '.join('{0}'.format(i) for i in rec.data)
def main(self):
"""
open filename (if exists)
read units
get list of polygons
"""
# test = []
no_of_Structures = 0
string_position = []
strings = []
with open(self.fname, 'rb') as a_file:
for rec in Record.iterate(a_file):
# test.append([rec.tag_name, rec.data, rec.tag_type])
if rec.tag_type == types.NODATA:
pass
else:
# print('%s: %s' % (rec.tag_name, show_data(rec)))
# print('%s:' % (rec.tag_name))
if rec.tag_name == 'UNITS':
"""
get units
"""
unitstring = self.show_data(rec)
self.units = np.array(re.split(',', unitstring)).astype(float)
elif rec.tag_name == 'XY':
no_of_Structures += 1
"""
get pointlist
"""
# get data
datastring = self.show_data(rec)
# split string at , and convert to float
data = np.array(re.split(',', datastring)).astype(float)
# reshape into [[x1,y1],[x2,y2],...]
# print((len(data)/2, 2))
if len(data) > 2:
data = np.reshape(data, (int(len(data) / 2), 2))[:-1]
else:
data = np.reshape(data, (int(len(data) / 2), 2))
self.pointlists.append(data)
elif rec.tag_name == 'STRING':
string_position.append(no_of_Structures - 1)
strings.append(rec.data)
self.string_infos = dict(zip(string_position, strings))
def __call__(self):
"""
execute main
return list of polygons with correct SI-units (scaled by units)
"""
self.main()
# return np.array(self.pointlists) * self.units[1]
# return np.multiply(np.array(self.pointlists), self.units[1])
return np.array(self.pointlists, dtype=list)
|
py | 7dfdbc7a32a49b4e527ee25d00a951250b0f3ff3 | import os.path
import jsonpickle
def test_get_product_list(app):
products = app.get_list_of_products()
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "products.json")
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(products))
out.close() |
py | 7dfdbc8b7fb1fafbe73b3e1818a79298b6bbfc37 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import unittest
from zope.sequencesort.ssort import SortEx
from .results import res2
from .results import res3
from .results import res4
from .results import res5
from .results import res6
from .results import res7
from .ztestlib import wordlist
class TestCase(unittest.TestCase):
def test2(self):
self.assertEqual(res2, SortEx(wordlist, (("key",),), mapping=1))
def test3(self):
self.assertEqual(res3, SortEx(wordlist, (("key", "cmp"),), mapping=1))
def test4(self):
self.assertEqual(
res4, SortEx(wordlist, (("key", "cmp", "desc"),), mapping=1))
def test5(self):
self.assertEqual(
res5, SortEx(wordlist, (("weight",), ("key",)), mapping=1))
def test6(self):
self.assertEqual(
res6, SortEx(wordlist, (("weight",), ("key", "nocase", "desc")),
mapping=1))
def test7(self):
def myCmp(s1, s2):
if s1 > s2:
return -1
if s1 < s2:
return 1
return 0
# Create namespace...
from DocumentTemplate.DT_Util import TemplateDict
md = TemplateDict()
# ... and push out function onto the namespace
md._push({"myCmp": myCmp})
self.assertEqual(
res7,
SortEx(wordlist, (("weight",), ("key", "myCmp", "desc")), md,
mapping=1))
|
py | 7dfdbd0208c3f49bce4c0e80c17af4f06bc2341c | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
INPUT_DATA_DIR = '/tmp/tensorflow/mnist/input_data/'
MAX_STEPS = 2000
BATCH_SIZE = 100
LEARNING_RATE = 0.3
HIDDEN_1 = 128
HIDDEN_2 = 32
# HACK: Ideally we would want to have a unique subpath for each instance of the job, but since we can't
# we are instead appending HOSTNAME to the logdir
LOG_DIR = os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/fully_connected_feed/', os.getenv('HOSTNAME', ''))
class MyModel(object):
def train(self):
self.data_sets = input_data.read_data_sets(INPUT_DATA_DIR)
self.images_placeholder = tf.placeholder(
tf.float32, shape=(BATCH_SIZE, mnist.IMAGE_PIXELS))
self.labels_placeholder = tf.placeholder(tf.int32, shape=(BATCH_SIZE))
logits = mnist.inference(self.images_placeholder,
HIDDEN_1,
HIDDEN_2)
self.loss = mnist.loss(logits, self.labels_placeholder)
self.train_op = mnist.training(self.loss, LEARNING_RATE)
self.summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.summary_writer = tf.summary.FileWriter(LOG_DIR, self.sess.graph)
self.sess.run(init)
data_set = self.data_sets.train
for step in xrange(MAX_STEPS):
images_feed, labels_feed = data_set.next_batch(BATCH_SIZE, False)
feed_dict = {
self.images_placeholder: images_feed,
self.labels_placeholder: labels_feed,
}
_, loss_value = self.sess.run([self.train_op, self.loss],
feed_dict=feed_dict)
if step % 100 == 0:
print("At step {}, loss = {}".format(step, loss_value))
summary_str = self.sess.run(self.summary, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
self.summary_writer.flush()
if __name__ == '__main__':
if os.getenv('FAIRING_RUNTIME', None) is None:
import fairing
fairing.config.set_preprocessor('python', input_files=[__file__])
fairing.config.set_builder(name='cluster', registry='<your-registry-here>')
fairing.config.run()
else:
train()
|
py | 7dfdbd1db16915ab3889c4125a6cf13cb57d7871 | rows = 'ABCDEFGHI'
cols = '123456789'
def cross(A, B):
"Cross product of elements in A and elements in B."
return [s + t for s in A for t in B]
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
unitlist = row_units + column_units + square_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s], [])) - set([s])) for s in boxes)
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(boxes, values))
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1 + max(len(values[s]) for s in boxes)
line = '+'.join(['-' * (width * 3)] * 3)
for r in rows:
print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')
for c in cols))
if r in 'CF':
print(line)
return
grid = '5.9.2..1.4...56...8..9.3..5.87..25..654....82..15684971.82.5...7..68...3.4..7.8..'
print(len(grid))
values = grid_values(grid)
values['D1'] = '39'
values['F1'] = '23'
values['F2'] = '23'
display(values)
twins_dict = {}
def any_in(a, b):
return any(i in b for i in a)
for key, value in values.items():
if len(value) == 2:
twins_dict.setdefault(value, []).append(key)
for no, _boxes in twins_dict.items():
if len(_boxes) >= 2:
twins = [box for box in _boxes if any_in(peers[box], _boxes)]
if len(twins) == 2:
for peer in set(peers[twins[0]]).intersection(peers[twins[1]]):
for digit in no:
values[peer] = values[peer].replace(digit, '')
display(values)
|
py | 7dfdbf792b3b8c2fcdecd5ab41120c42b50c845d | r"""CMB filtering utilities module.
This module collects some convenience wrapper libraries.
"""
import healpy as hp
from plancklens import utils
import numpy as np
class library_ftl:
""" Library of a-posteriori re-scaled filtered CMB maps, for separate temperature and polarization filtering
Args:
ivfs : inverse filtering library instance (e.g. one of those in plancklens.filt.filt_simple).
lmax (int) : defines the new healpy alm array shape (identical for temperature and polarization)
lfilt_t (1d array): filtered temperature alms are rescaled by lfilt_t
lfilt_e (1d array): filtered E-polarization alms are rescaled by lfilt_e
lfilt_b (1d array): filtered B-polarization alms are rescaled by lfilt_b
Wraps the input filtering instance *(ivfs)* methods to keep the same interface.
Note:
ftl fel fbl should eventually be taken off to be replaced by fal in all cases
"""
def __init__(self, ivfs, lmax, lfilt_t, lfilt_e, lfilt_b):
assert len(lfilt_t) > lmax and len(lfilt_e) > lmax and len(lfilt_b) > lmax
self.ivfs = ivfs
self.lmax = lmax
self.lfilt_t = lfilt_t
self.lfilt_e = lfilt_e
self.lfilt_b = lfilt_b
self.lib_dir = ivfs.lib_dir
def hashdict(self):
return {'ivfs': self.ivfs.hashdict(),
'filt_t': utils.clhash(self.lfilt_t[:self.lmax + 1]),
'filt_e': utils.clhash(self.lfilt_e[:self.lmax + 1]),
'filt_b': utils.clhash(self.lfilt_b[:self.lmax + 1])}
def get_fmask(self):
return self.ivfs.get_fmask()
def get_tal(self, a):
return self.ivfs.get_tal(a)
def get_ftl(self):
return self.ivfs.get_ftl()[:self.lmax + 1] * self.lfilt_t[:self.lmax + 1]
def get_fel(self):
return self.ivfs.get_fel()[:self.lmax + 1] * self.lfilt_e[:self.lmax + 1]
def get_fbl(self):
return self.ivfs.get_fbl()[:self.lmax + 1] * self.lfilt_b[:self.lmax + 1]
def get_sim_tlm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_tlm(idx), lmax=self.lmax), self.lfilt_t)
def get_sim_elm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_elm(idx), lmax=self.lmax), self.lfilt_e)
def get_sim_blm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_blm(idx), lmax=self.lmax), self.lfilt_b)
def get_sim_tmliklm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_tmliklm(idx), lmax=self.lmax), self.lfilt_t)
def get_sim_emliklm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_emliklm(idx), lmax=self.lmax), self.lfilt_e)
def get_sim_bmliklm(self, idx):
return hp.almxfl(utils.alm_copy(self.ivfs.get_sim_bmliklm(idx), lmax=self.lmax), self.lfilt_b)
class library_fml:
def __init__(self, ivfs, lmax, mfilt_t, mfilt_e, mfilt_b):
"""Library of a-posteriori re-scaled filtered CMB maps
This rescales the maps according to 'm' values, alm -> fm alm
Args:
ivfs : inverse filtering library instance (e.g. one of those in plancklens.filt.filt_simple).
lmax (int) : defines the new healpy alm array shape (identical for temperature and polarization)
mfilt_t (1d array): filtered temperature alms are rescaled by mfilt_t
mfilt_e (1d array): filtered E-polarization alms are rescaled by mfilt_e
mfilt_b (1d array): filtered B-polarization alms are rescaled by mfilt_b
Wraps the input filtering instance *(ivfs)* methods to keep the same interface.
"""
assert len(mfilt_t) > lmax and len(mfilt_e) > lmax and len(mfilt_b) > lmax
self.ivfs = ivfs
self.lmax = lmax
self.mfilt_t = mfilt_t
self.mfilt_e = mfilt_e
self.mfilt_b = mfilt_b
self.lib_dir = ivfs.lib_dir
def hashdict(self):
return {'ivfs': self.ivfs.hashdict(),
'filt_t': utils.clhash(self.mfilt_t[:self.lmax + 1]),
'filt_e': utils.clhash(self.mfilt_e[:self.lmax + 1]),
'filt_b': utils.clhash(self.mfilt_b[:self.lmax + 1])}
def get_fmask(self):
return self.ivfs.get_fmask()
@staticmethod
def almxfm(alm, fm, lmax):
ret = utils.alm_copy(alm, lmax=lmax)
for m in range(lmax + 1):
ret[hp.Alm.getidx(lmax, np.arange(m, lmax + 1, dtype=int), m)] *= fm[m]
return ret
def get_tal(self, a):
return self.ivfs.get_tal(a)
def get_ftl(self):
m_rescal = 2 * np.cumsum(self.mfilt_t[:self.lmax + 1]) - self.mfilt_t[0]
m_rescal /= (2 * np.arange(self.lmax + 1) + 1)
return self.ivfs.get_ftl()[:self.lmax + 1] * np.sqrt(m_rescal) # root has better chance to work at the spectrum level
def get_fel(self):
m_rescal = 2 * np.cumsum(self.mfilt_e[:self.lmax + 1]) - self.mfilt_e[0]
m_rescal /= (2 * np.arange(self.lmax + 1) + 1)
return self.ivfs.get_fel()[:self.lmax + 1] * np.sqrt(m_rescal)
def get_fbl(self):
m_rescal = 2 * np.cumsum(self.mfilt_b[:self.lmax + 1]) - self.mfilt_b[0]
m_rescal /= (2 * np.arange(self.lmax + 1) + 1)
return self.ivfs.get_fbl()[:self.lmax + 1] * np.sqrt(m_rescal)
def get_sim_tlm(self, idx):
return self.almxfm(self.ivfs.get_sim_tlm(idx), self.mfilt_t, self.lmax)
def get_sim_elm(self, idx):
return self.almxfm(self.ivfs.get_sim_elm(idx), self.mfilt_t, self.lmax)
def get_sim_blm(self, idx):
return self.almxfm(self.ivfs.get_sim_blm(idx), self.mfilt_t, self.lmax)
def get_sim_tmliklm(self, idx):
return self.almxfm(self.ivfs.get_sim_tmliklm(idx), self.mfilt_t, self.lmax)
def get_sim_emliklm(self, idx):
return self.almxfm(self.ivfs.get_sim_emliklm(idx), self.mfilt_e, self.lmax)
def get_sim_bmliklm(self, idx):
return self.almxfm(self.ivfs.get_sim_bmliklm(idx), self.mfilt_b, self.lmax)
class library_shuffle:
r"""A library of filtered sims with remapped indices.
This is useful for lensing biases calculations, such as :math:`\hat N^{(0)}_L.`
Args:
ivfs : inverse-variance filtering library instance.
idxs : index idx of this new instance points to idxs[idx] of the input *ivfs* instance.
Wraps the input filtering instance *(ivfs)* methods to keep the same interface.
"""
def __init__(self, ivfs, idxs):
self.ivfs = ivfs
self.idxs = idxs
def hashdict(self):
return {'ivfs': self.ivfs.hashdict(), 'idxs': self.idxs}
def get_fmask(self):
return self.ivfs.get_fmask()
def get_tal(self, a):
return self.ivfs.get_tal(a)
def get_ftl(self):
return self.ivfs.get_ftl()
def get_fel(self):
return self.ivfs.get_fel()
def get_fbl(self):
return self.ivfs.get_fbl()
def get_sim_tlm(self, idx):
return self.ivfs.get_sim_tlm(self.idxs[idx])
def get_sim_elm(self, idx):
return self.ivfs.get_sim_elm(self.idxs[idx])
def get_sim_blm(self, idx):
return self.ivfs.get_sim_blm(self.idxs[idx])
def get_sim_tmliklm(self, idx):
return self.ivfs.get_sim_tmliklm(self.idxs[idx])
def get_sim_emliklm(self, idx):
return self.ivfs.get_sim_emliklm(self.idxs[idx])
def get_sim_bmliklm(self, idx):
return self.ivfs.get_sim_bmliklm(self.idxs[idx])
|
py | 7dfdc07736006ca0e55991dd88433116784cd546 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import IQCashTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listreceivedbyaddress returns return expected values."""
addresses = self.node.listreceivedbyaddress(0, True, self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(IQCashTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-addresstype=legacy",] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
|
py | 7dfdc16f35805d2b16e9ea5ad2803e8824997b74 | # 生成器网络
# 包含两部分,深层网络和浅层网络
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
class GNet(nn.Module):
def __init__(self):
super(GNet, self).__init__()
# 4 倍上采样(Bicubic)
self.UpSamp1 = nn.Sequential(
nn.Upsample(scale_factor=4, mode='bicubic', align_corners=True),
nn.Conv2d(3, 64, 3, 1, 0))
# 2 倍上采样(Bicubic)
self.UpSamp2 =nn.Sequential(
nn.Upsample(scale_factor=2, mode='bicubic', align_corners=True),
nn.Conv2d(3, 64, 3, 1, 0))
# 深层网络,16 个残差块
self.preTrain = nn.Conv2d(3, 64, 1, 1, 0)
self.resBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, 1, 1))
# 浅层网络,4 个卷积块
self.shallowNet = nn.Sequential(
nn.Conv2d(3, 64, 9, 1, 4),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 9, 1, 4),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 9, 1, 4),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 9, 1, 4),
nn.ReLU(inplace=True),
)
# 反卷积合并网络
self.DeConv1 = nn.ConvTranspose2d(64, 64, 3, 3, 33)
self.DeConv2 = nn.ConvTranspose2d(64, 64, 3, 3, 62)
# 最后一层卷积
self.Finally = nn.Conv2d(64, 3, 1, 1, 0)
def forward(self, x):
# 4 倍上采样
x_4x = self.UpSamp1(x)
# 2 倍上采样
x_2x = self.UpSamp2(x)
# 提取深层特征
x_deep = self.preTrain(x)
for i in range(16):
x_res = self.resBlock(x_deep)
x_deep = x_deep + x_res
# 浅层网络
x_shallow = self.shallowNet(x)
# 特征融合层
x_DS = x_deep + x_shallow
# 第一次反卷积
x_Deconv1 = self.DeConv1(x_DS)
x_Deconv1 = x_Deconv1 + x_2x
# 第二次反卷积
x_Deconv2 = self.DeConv2(x_Deconv1)
x_Deconv2 = x_Deconv2 + x_4x
# 最后一层卷积
x = self.Finally(x_Deconv2)
x = F.tanh(x)
return x
|
py | 7dfdc27c1241bdb0cc1dbf7006fe4bcd57c653ea | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .bootstrap_rst_directives import *
|
py | 7dfdc2d3c571e288b9b70325a3bee69237fdadee | from simple_converge.main.main import train
from simple_converge.tf_models.models_collection import models_collection
from unet.Settings import Settings
from unet.Dataset import Dataset
# Create class instances
settings = Settings()
dataset = Dataset()
# Train
train(settings, dataset, models_collection)
|
py | 7dfdc2e155f5137e5bd49855909393da30bf9e68 | from enum import Enum
from typing import List, Optional
from typing_extensions import Literal
from pydantic import BaseModel, root_validator
from nonebot.typing import overrides
from nonebot.adapters import Event as BaseEvent
from .message import Message
class Event(BaseEvent):
"""
钉钉协议事件。各事件字段参考 `钉钉文档`_
.. _钉钉文档:
https://ding-doc.dingtalk.com/document#/org-dev-guide/elzz1p
"""
chatbotUserId: str
@overrides(BaseEvent)
def get_type(self) -> Literal["message", "notice", "request", "meta_event"]:
raise ValueError("Event has no type!")
@overrides(BaseEvent)
def get_event_name(self) -> str:
raise ValueError("Event has no name!")
@overrides(BaseEvent)
def get_event_description(self) -> str:
raise ValueError("Event has no description!")
@overrides(BaseEvent)
def get_message(self) -> "Message":
raise ValueError("Event has no message!")
@overrides(BaseEvent)
def get_plaintext(self) -> str:
raise ValueError("Event has no plaintext!")
@overrides(BaseEvent)
def get_user_id(self) -> str:
raise ValueError("Event has no user_id!")
@overrides(BaseEvent)
def get_session_id(self) -> str:
raise ValueError("Event has no session_id!")
@overrides(BaseEvent)
def is_tome(self) -> bool:
return True
class TextMessage(BaseModel):
content: str
class AtUsersItem(BaseModel):
dingtalkId: str
staffId: Optional[str]
class ConversationType(str, Enum):
private = "1"
group = "2"
class MessageEvent(Event):
"""消息事件"""
msgtype: str
text: TextMessage
msgId: str
createAt: int # ms
conversationType: ConversationType
conversationId: str
senderId: str
senderNick: str
senderCorpId: Optional[str]
sessionWebhook: str
sessionWebhookExpiredTime: int
isAdmin: bool
message: Message
@root_validator(pre=True)
def gen_message(cls, values: dict):
assert "msgtype" in values, "msgtype must be specified"
# 其实目前钉钉机器人只能接收到 text 类型的消息
assert values[
"msgtype"] in values, f"{values['msgtype']} must be specified"
content = values[values['msgtype']]['content']
# 如果是被 @,第一个字符将会为空格,移除特殊情况
if content[0] == ' ':
content = content[1:]
values["message"] = content
return values
@overrides(Event)
def get_type(self) -> Literal["message", "notice", "request", "meta_event"]:
return "message"
@overrides(Event)
def get_event_name(self) -> str:
return f"{self.get_type()}.{self.conversationType.name}"
@overrides(Event)
def get_event_description(self) -> str:
return f'Message[{self.msgtype}] {self.msgId} from {self.senderId} "{self.text.content}"'
@overrides(Event)
def get_message(self) -> Message:
return self.message
@overrides(Event)
def get_plaintext(self) -> str:
return self.text.content
@overrides(Event)
def get_user_id(self) -> str:
return self.senderId
@overrides(Event)
def get_session_id(self) -> str:
return self.senderId
class PrivateMessageEvent(MessageEvent):
"""私聊消息事件"""
chatbotCorpId: str
senderStaffId: Optional[str]
conversationType: ConversationType = ConversationType.private
class GroupMessageEvent(MessageEvent):
"""群消息事件"""
atUsers: List[AtUsersItem]
conversationType: ConversationType = ConversationType.group
conversationTitle: str
isInAtList: bool
@overrides(MessageEvent)
def is_tome(self) -> bool:
return self.isInAtList
|
py | 7dfdc343545a7c1b64b35c1651ccf1779ceea67b | # Generated by Django 3.0.8 on 2021-06-14 07:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('university_structure', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Teachers',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=30)),
('fathers_name', models.CharField(default=None, max_length=60, null=True)),
('last_name', models.CharField(max_length=150)),
('email', models.EmailField(default=None, max_length=254, null=True)),
('degree', models.CharField(default=None, max_length=150, null=True)),
('department_id', models.ForeignKey(db_column='department_id', on_delete=django.db.models.deletion.CASCADE, to='university_structure.Departments')),
],
options={
'db_table': 'teachers',
},
),
migrations.CreateModel(
name='Students',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('number', models.CharField(max_length=20, unique=True)),
('first_name', models.CharField(max_length=30)),
('fathers_name', models.CharField(default=None, max_length=60, null=True)),
('last_name', models.CharField(max_length=150)),
('email', models.EmailField(default=None, max_length=254, null=True)),
('average_rating', models.FloatField(default=None)),
('update_date', models.DateTimeField(default=None)),
('group_id', models.ForeignKey(db_column='group_id', on_delete=django.db.models.deletion.CASCADE, to='university_structure.Groups')),
],
options={
'db_table': 'students',
},
),
]
|
py | 7dfdc377c2921dfbf24b9a3baf538763fd61b0da | from torch import nn
import torch
class GaussianFilter(nn.Module):
def __init__(self, kernel_size=13, stride=1, padding=6):
super(GaussianFilter, self).__init__()
# initialize guassian kernel
mean = (kernel_size - 1) / 2.0
variance = ((kernel_size - 1) / 6.0) ** 2.0
# Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
x_coord = torch.arange(kernel_size)
x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()
# Calculate the 2-dimensional gaussian kernel
gaussian_kernel = torch.exp(-torch.sum((xy_grid - mean) ** 2., dim=-1) / (2 * variance))
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
# create gaussian filter as convolutional layer
self.gaussian_filter = nn.Conv2d(3, 3, kernel_size, stride=stride, padding=padding, groups=3, bias=False)
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, x):
return self.gaussian_filter(x)
class FilterLow(nn.Module):
def __init__(self, recursions=1, kernel_size=9, stride=1, padding=True, include_pad=True, gaussian=False):
super(FilterLow, self).__init__()
if padding:
pad = int((kernel_size - 1) / 2)
else:
pad = 0
if gaussian:
self.filter = GaussianFilter(kernel_size=kernel_size, stride=stride, padding=pad)
else:
self.filter = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=pad, count_include_pad=include_pad)
self.recursions = recursions
def forward(self, img):
for i in range(self.recursions):
img = self.filter(img)
return img
class FilterHigh(nn.Module):
def __init__(self, recursions=1, kernel_size=9, stride=1, include_pad=True, normalize=True, gaussian=False):
super(FilterHigh, self).__init__()
self.filter_low = FilterLow(recursions=1, kernel_size=kernel_size, stride=stride, include_pad=include_pad,
gaussian=gaussian)
self.recursions = recursions
self.normalize = normalize
def forward(self, img):
if self.recursions > 1:
for i in range(self.recursions - 1):
img = self.filter_low(img)
img = img - self.filter_low(img)
if self.normalize:
return 0.5 + img * 0.5
else:
return img |
py | 7dfdc3affe305f26f96fc7191d294b520aef96f2 | ##############################################################################
#
# Copyright (c) 2001-2008 Zope Foundation and Contributors
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this
# distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from zExceptions import Forbidden
from Products.PluggableAuthService.tests.conformance \
import IRolesPlugin_conformance
from Products.PluggableAuthService.tests.conformance \
import IRoleEnumerationPlugin_conformance
from Products.PluggableAuthService.tests.conformance \
import IRoleAssignerPlugin_conformance
from Products.PluggableAuthService.plugins.tests.helpers \
import FauxPAS, FauxSmartPAS, DummyUser, makeRequestAndResponse
class ZODBRoleManagerTests(unittest.TestCase, IRolesPlugin_conformance,
IRoleEnumerationPlugin_conformance,
IRoleAssignerPlugin_conformance):
def _getTargetClass(self):
from Products.PluggableAuthService.plugins.ZODBRoleManager \
import ZODBRoleManager
return ZODBRoleManager
def _makeOne(self, id='test', *args, **kw):
return self._getTargetClass()(id=id, *args, **kw)
def test_empty(self):
zrm = self._makeOne()
self.assertEqual(len(zrm.listRoleIds()), 0)
self.assertEqual(len(zrm.enumerateRoles()), 0)
user = DummyUser('userid')
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 0)
def test_addRole(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne().__of__(root)
zrm.addRole('roleid', 'Role', 'This is a role')
role_ids = zrm.listRoleIds()
self.assertEqual(len(role_ids), 1)
self.assertEqual(role_ids[0], 'roleid')
info_list = zrm.enumerateRoles()
self.assertEqual(len(info_list), 1)
info = info_list[0]
self.assertEqual(info['id'], 'roleid')
def test_addRole_duplicate_check(self):
zrm = self._makeOne()
zrm.addRole('roleid', 'Role', 'This is a role')
self.assertRaises(KeyError, zrm.addRole,
'roleid', 'Alias', 'duplicate')
def test_removeRole_nonesuch(self):
zrm = self._makeOne()
self.assertRaises(KeyError, zrm.removeRole, 'nonesuch')
def test_removeRole_valid_id(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne().__of__(root)
zrm.addRole('roleid', 'Role', 'This is a role')
zrm.addRole('doomed', 'Fatal', 'rust never sleeps')
zrm.removeRole('doomed')
role_ids = zrm.listRoleIds()
self.assertEqual(len(role_ids), 1)
self.assertEqual(len(zrm.enumerateRoles()), 1)
self.assertEqual(role_ids[0], 'roleid')
def test_enumerateRoles_no_criteria(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='no_crit').__of__(root)
ID_LIST = ('foo', 'bar', 'baz', 'bam')
for id in ID_LIST:
zrm.addRole(id, 'Role %s' % id, 'This is role, %s' % id)
info_list = zrm.enumerateRoles()
self.assertEqual(len(info_list), len(ID_LIST))
sorted = list(ID_LIST)
sorted.sort()
for i in range(len(sorted)):
self.assertEqual(info_list[i]['id'], sorted[i])
self.assertEqual(info_list[i]['pluginid'], 'no_crit')
self.assertEqual(info_list[i]['properties_url'],
'no_crit/manage_roles?role_id=%s' % sorted[i])
self.assertEqual(info_list[i]['members_url'],
'no_crit/manage_roles?role_id=%s&assign=1' %
sorted[i])
def test_enumerateRoles_exact(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='exact').__of__(root)
ID_LIST = ('foo', 'bar', 'baz', 'bam')
for id in ID_LIST:
zrm.addRole(id, 'Role %s' % id, 'This is role, %s' % id)
info_list = zrm.enumerateRoles(id='bar', exact_match=True)
self.assertEqual(len(info_list), 1)
info = info_list[0]
self.assertEqual(info['id'], 'bar')
self.assertEqual(info['pluginid'], 'exact')
self.assertEqual(info['properties_url'],
'exact/manage_roles?role_id=bar')
self.assertEqual(info['members_url'],
'exact/manage_roles?role_id=bar&assign=1')
self.assertEqual(info['title'], 'Role bar')
self.assertEqual(info['description'], 'This is role, bar')
def test_enumerateRoles_partial(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='partial').__of__(root)
ID_LIST = ('foo', 'bar', 'baz', 'bam')
for id in ID_LIST:
zrm.addRole(id, 'Role %s' % id, 'This is role, %s' % id)
info_list = zrm.enumerateRoles(id='ba', exact_match=False)
self.assertEqual(len(info_list), len(ID_LIST) - 1) # no 'foo'
sorted = list(ID_LIST)
sorted.sort()
for i in range(len(sorted) - 1):
self.assertEqual(info_list[i]['id'], sorted[i])
self.assertEqual(info_list[i]['pluginid'], 'partial')
self.assertEqual(info_list[i]['properties_url'],
'partial/manage_roles?role_id=%s' % sorted[i])
self.assertEqual(info_list[i]['members_url'],
'partial/manage_roles?role_id=%s&assign=1' %
sorted[i])
self.assertEqual(info_list[i]['title'], 'Role %s' % sorted[i])
self.assertEqual(info_list[i]['description'],
'This is role, %s' % sorted[i])
def test_enumerateRoles_multiple(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='partial').__of__(root)
ID_LIST = ('foo', 'bar', 'baz', 'bam')
for id in ID_LIST:
zrm.addRole(id, 'Role %s' % id, 'This is role, %s' % id)
info_list = zrm.enumerateRoles(id=ID_LIST)
self.assertEqual(len(info_list), len(ID_LIST))
for info in info_list:
self.assertTrue(info['id'] in ID_LIST)
SUBSET = ID_LIST[:3]
info_list = zrm.enumerateRoles(id=SUBSET)
self.assertEqual(len(info_list), len(SUBSET))
for info in info_list:
self.assertTrue(info['id'] in SUBSET)
def test_enumerateRoles_exact_nonesuch(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zgm = self._makeOne(id='exact_nonesuch').__of__(root)
ID_LIST = ('foo', 'bar', 'baz', 'bam')
for id in ID_LIST:
zgm.addRole(id, 'Role %s' % id, 'This is role, %s' % id)
self.assertEqual(zgm.enumerateRoles(id='qux', exact_match=True), ())
def test_assignRoleToPrincipal_nonesuch(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='assign_nonesuch').__of__(root)
self.assertRaises(KeyError, zrm.assignRoleToPrincipal, 'test', 'foo')
def test_assignRoleToPrincipal_user(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='assign_user').__of__(root)
zrm.addRole('test1')
zrm.addRole('test2')
user = DummyUser('foo')
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 0)
zrm.assignRoleToPrincipal('test1', 'foo')
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 1)
self.assertTrue('test1' in roles)
zrm.assignRoleToPrincipal('test2', 'foo')
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 2)
self.assertTrue('test1' in roles)
self.assertTrue('test2' in roles)
def test_assignRoleToPrincipal_group(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='assign_user').__of__(root)
zrm.addRole('test1')
zrm.addRole('test2')
user = DummyUser('foo', ('qux',))
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 0)
zrm.assignRoleToPrincipal('test1', 'qux')
roles = zrm.getRolesForPrincipal(user)
self.assertEqual(len(roles), 1)
self.assertTrue('test1' in roles)
def test_assignRoleToPrincipal_new(self):
root = FauxPAS()
zrm = self._makeOne(id='assign_new').__of__(root)
zrm.addRole('test')
self.assertEqual(len(zrm.listAssignedPrincipals('test')), 0)
new = zrm.assignRoleToPrincipal('test', 'foo')
self.assertTrue(new)
assigned = [x[0] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 1)
self.assertEqual(assigned[0], 'foo')
def test_assignRoleToPrincipal_already(self):
root = FauxPAS()
zrm = self._makeOne(id='assign_already').__of__(root)
zrm.addRole('test')
zrm.assignRoleToPrincipal('test', 'foo')
new = zrm.assignRoleToPrincipal('test', 'foo')
self.assertFalse(new)
assigned = [x[0] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 1)
self.assertEqual(assigned[0], 'foo')
def test_assignRoleBeforeRemovingPrincipal(self):
root = FauxSmartPAS()
root.user_ids['foo'] = 'foo'
zrm = self._makeOne(id='assign_before_remove').__of__(root)
zrm.addRole('test')
self.assertEqual(len(zrm.listAssignedPrincipals('test')), 0)
new = zrm.assignRoleToPrincipal('test', 'foo')
self.assertTrue(new)
assigned = [x[1] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 1)
self.assertEqual(assigned[0], 'foo')
del root.user_ids['foo']
assigned = [x[1] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 1)
self.assertEqual(assigned[0], '<foo: not found>')
def test_removeRoleFromPrincipal_nonesuch(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='remove_nonesuch').__of__(root)
self.assertRaises(KeyError, zrm.removeRoleFromPrincipal,
'test', 'foo')
def test_removeRoleFromPrincipal_existing(self):
root = FauxPAS()
zrm = self._makeOne(id='remove_existing').__of__(root)
zrm.addRole('test')
zrm.assignRoleToPrincipal('test', 'foo')
zrm.assignRoleToPrincipal('test', 'bar')
zrm.assignRoleToPrincipal('test', 'baz')
assigned = [x[0] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 3)
self.assertTrue('foo' in assigned)
self.assertTrue('bar' in assigned)
self.assertTrue('baz' in assigned)
removed = zrm.removeRoleFromPrincipal('test', 'bar')
self.assertTrue(removed)
assigned = [x[0] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 2)
self.assertTrue('foo' in assigned)
self.assertFalse('bar' in assigned)
self.assertTrue('baz' in assigned)
def test_removeRoleFromPrincipal_noop(self):
root = FauxPAS()
zrm = self._makeOne(id='remove_noop').__of__(root)
zrm.addRole('test')
zrm.assignRoleToPrincipal('test', 'foo')
zrm.assignRoleToPrincipal('test', 'baz')
assigned = [x[0] for x in zrm.listAssignedPrincipals('test')]
self.assertEqual(len(assigned), 2)
self.assertTrue('foo' in assigned)
self.assertTrue('baz' in assigned)
removed = zrm.removeRoleFromPrincipal('test', 'bar')
self.assertFalse(removed)
def test_listAssignedPrincipals_duplicate_principals(self):
from Products.PluggableAuthService.plugins.ZODBRoleManager \
import MultiplePrincipalError
class FauxDuplicatePAS(FauxSmartPAS):
"""Returns duplicate user ids when searched."""
def searchPrincipals(self, **kw):
return [{'id': 'foo', 'title': 'User 1'},
{'id': 'foo', 'title': 'User 2'}]
root = FauxDuplicatePAS()
zrm = self._makeOne(id='assign_new').__of__(root)
zrm.addRole('test')
zrm.assignRoleToPrincipal('test', 'foo')
self.assertRaises(MultiplePrincipalError,
zrm.listAssignedPrincipals, 'test')
def test_updateRole_nonesuch(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='update_nonesuch').__of__(root)
self.assertRaises(KeyError, zrm.updateRole,
'nonesuch', 'title', 'description')
def test_updateRole_normal(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='update_normal').__of__(root)
zrm.addRole('role', 'Original Title', 'Original description')
info = zrm.getRoleInfo('role')
self.assertEqual(info['id'], 'role')
self.assertEqual(info['title'], 'Original Title')
self.assertEqual(info['description'], 'Original description')
zrm.updateRole('role', 'Updated Title', 'Updated description')
info = zrm.getRoleInfo('role')
self.assertEqual(info['id'], 'role')
self.assertEqual(info['title'], 'Updated Title')
self.assertEqual(info['description'], 'Updated description')
def test_removeRole_then_addRole(self):
from Products.PluggableAuthService.tests.test_PluggableAuthService \
import FauxRoot
root = FauxRoot()
zrm = self._makeOne(id='remove_then_add').__of__(root)
user = DummyUser('foo')
zrm.addRole('test')
zrm.assignRoleToPrincipal('test', 'foo')
self.assertTrue('test' in zrm.getRolesForPrincipal(user))
zrm.removeRole('test')
zrm.addRole('test')
self.assertFalse('test' in zrm.getRolesForPrincipal(user))
def test_manage_assignRoleToPrincipal_POST_permissions(self):
USER_ID = 'testuser'
ROLE_ID = 'myrole'
root = FauxPAS()
zrm = self._makeOne(id='remove_existing').__of__(root)
zrm = self._makeOne()
zrm.addRole(ROLE_ID)
req, res = makeRequestAndResponse()
req.set('REQUEST_METHOD', 'GET')
req.set('method', 'GET')
req.set('SESSION', {})
self.assertRaises(Forbidden, zrm.manage_assignRoleToPrincipals,
ROLE_ID, [USER_ID], RESPONSE=res, REQUEST=req)
req.set('REQUEST_METHOD', 'POST')
req.set('method', 'POST')
self.assertRaises(Forbidden, zrm.manage_assignRoleToPrincipals,
ROLE_ID, [USER_ID], RESPONSE=res, REQUEST=req)
req.form['csrf_token'] = 'deadbeef'
req.SESSION['_csrft_'] = 'deadbeef'
zrm.manage_assignRoleToPrincipals(ROLE_ID, [USER_ID], RESPONSE=res,
REQUEST=req)
def test_manage_removeRoleFromPricipal_POST_permissionsT(self):
USER_ID = 'testuser'
ROLE_ID = 'myrole'
root = FauxPAS()
zrm = self._makeOne(id='remove_existing').__of__(root)
zrm = self._makeOne()
zrm.addRole(ROLE_ID)
req, res = makeRequestAndResponse()
req.set('REQUEST_METHOD', 'GET')
req.set('method', 'GET')
req.set('SESSION', {})
self.assertRaises(Forbidden, zrm.manage_removeRoleFromPrincipals,
ROLE_ID, [USER_ID], RESPONSE=res, REQUEST=req)
req.set('REQUEST_METHOD', 'POST')
req.set('method', 'POST')
self.assertRaises(Forbidden, zrm.manage_removeRoleFromPrincipals,
ROLE_ID, [USER_ID], RESPONSE=res, REQUEST=req)
req.form['csrf_token'] = 'deadbeef'
req.SESSION['_csrft_'] = 'deadbeef'
zrm.manage_removeRoleFromPrincipals(ROLE_ID, [USER_ID], RESPONSE=res,
REQUEST=req)
def test_manage_removeRoles_POST_permissions(self):
ROLE_ID = 'myrole'
root = FauxPAS()
zrm = self._makeOne(id='remove_existing').__of__(root)
zrm = self._makeOne()
zrm.addRole(ROLE_ID)
req, res = makeRequestAndResponse()
req.set('REQUEST_METHOD', 'GET')
req.set('method', 'GET')
req.set('SESSION', {})
self.assertRaises(Forbidden, zrm.manage_removeRoles,
[ROLE_ID], RESPONSE=res, REQUEST=req)
req.set('REQUEST_METHOD', 'POST')
req.set('method', 'POST')
self.assertRaises(Forbidden, zrm.manage_removeRoles,
[ROLE_ID], RESPONSE=res, REQUEST=req)
req.form['csrf_token'] = 'deadbeef'
req.SESSION['_csrft_'] = 'deadbeef'
zrm.manage_removeRoles([ROLE_ID], RESPONSE=res, REQUEST=req)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ZODBRoleManagerTests),
))
|
py | 7dfdc5447e2e05759709a616a3ab73fa4a6a2d32 | # Generated by Django 3.0.8 on 2020-08-01 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('room_reservation', '0012_reservation_user'),
]
operations = [
migrations.AlterField(
model_name='equipment',
name='name',
field=models.CharField(blank=True, max_length=50),
),
]
|
pyw | 7dfdc595d7f2113d7738b8792a4d1b848bf546f7 | #Запуск приложения
from PyQt5.QtWidgets import QApplication
from mainWindow import Window
import sys
app = QApplication(sys.argv)
w = Window()
sys.exit(app.exec_())
|
py | 7dfdc857702f5da315eecca39b872188b7b26293 | import torch
import torch.nn as nn
import torch_dct as dct
import math
from nsoltUtility import Direction
class NsoltBlockDct2dLayer(nn.Module):
"""
NSOLTBLOCKDCT2DLAYER
ベクトル配列をブロック配列を入力:
nSamples x nComponents x (Stride(1)xnRows) x (Stride(2)xnCols)
コンポーネント別に出力(nComponents):
nSamples x nDecs x nRows x nCols
Requirements: Python 3.7.x, PyTorch 1.7.x
Copyright (c) 2020-2021, Shogo MURAMATSU
All rights reserved.
Contact address: Shogo MURAMATSU,
Faculty of Engineering, Niigata University,
8050 2-no-cho Ikarashi, Nishi-ku,
Niigata, 950-2181, JAPAN
http://msiplab.eng.niigata-u.ac.jp/
"""
def __init__(self,
name='',
decimation_factor=[],
number_of_components=1
):
super(NsoltBlockDct2dLayer, self).__init__()
self.decimation_factor = decimation_factor
self.name = name
self.description = "Block DCT of size " \
+ str(self.decimation_factor[Direction.VERTICAL]) + "x" \
+ str(self.decimation_factor[Direction.HORIZONTAL])
#self.type = ''
self.num_outputs = number_of_components
#self.num_inputs = 1
def forward(self,X):
nComponents = self.num_outputs
nSamples = X.size(0)
height = X.size(2)
width = X.size(3)
stride = self.decimation_factor
nrows = int(math.ceil(height/stride[Direction.VERTICAL]))
ncols = int(math.ceil(width/stride[Direction.HORIZONTAL]))
ndecs = stride[0]*stride[1] #math.prod(stride)
# Block DCT (nSamples x nComponents x nrows x ncols) x decV x decH
arrayshape = stride.copy()
arrayshape.insert(0,-1)
Y = dct.dct_2d(X.view(arrayshape),norm='ortho')
# Rearrange the DCT Coefs. (nSamples x nComponents x nrows x ncols) x (decV x decH)
cee = Y[:,0::2,0::2].reshape(Y.size(0),-1)
coo = Y[:,1::2,1::2].reshape(Y.size(0),-1)
coe = Y[:,1::2,0::2].reshape(Y.size(0),-1)
ceo = Y[:,0::2,1::2].reshape(Y.size(0),-1)
A = torch.cat((cee,coo,coe,ceo),dim=-1)
Z = A.view(nSamples,nComponents,nrows,ncols,ndecs)
if nComponents<2:
return torch.squeeze(Z,dim=1)
else:
return map(lambda x: torch.squeeze(x,dim=1),torch.chunk(Z,nComponents,dim=1))
|
py | 7dfdc9686c355547080cdd5ff497a7e2704eeecc | """CLI commands."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
|
py | 7dfdca443e488f8635431670d44087e8ea448e83 | from digitalio import DigitalInOut, Direction, Pull
class Button:
def __init__(self, pin):
self._button = DigitalInOut(pin)
self._button.direction = Direction.INPUT
self._button.pull = Pull.DOWN
self._value = False
def reset(self):
self._value = False
def is_pressed(self):
if self._value and self._button.value:
return True
elif not self._value and not self._button.value:
self._value = True
return False
|
py | 7dfdcc782409c56a54d654598fb5b8096a26be6f | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# import sphinx_pypi_upload
import sys
if sys.version_info[0] < 3:
import codecs
with codecs.open('README.md', 'r', 'utf-8') as f:
readme = f.read()
else:
import io
with io.open('README.md', mode='r', encoding='utf-8') as f:
readme = f.read()
setup(
name='scikit-multilearn',
version='0.2.0',
packages=find_packages(exclude=['docs', 'tests', '*.tests']),
author=u'Piotr Szymański',
author_email=u'[email protected]',
license=u'BSD',
long_description=readme,
url=u'http://scikit.ml/',
description=u'Scikit-multilearn is a BSD-licensed library for multi-label classification that is built on top of the well-known scikit-learn ecosystem.',
classifiers=[
u'Development Status :: 5 - Production/Stable',
u'Environment :: Console',
u'Environment :: Web Environment',
u'Intended Audience :: Developers',
u'Intended Audience :: Education',
u'Intended Audience :: Science/Research',
u'License :: OSI Approved :: BSD License',
u'Operating System :: MacOS :: MacOS X',
u'Operating System :: Microsoft :: Windows',
u'Operating System :: POSIX',
u'Programming Language :: Python',
u'Topic :: Scientific/Engineering',
u'Topic :: Scientific/Engineering :: Information Analysis',
u'Topic :: Scientific/Engineering :: Bio-Informatics',
],
)
|
py | 7dfdcc78eb886b020c475d8461d7e32e2a2d2ff0 | import typing as t
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
from gradualelixir import gtypes, pattern
from gradualelixir.exception import SyntaxRestrictionError
from gradualelixir.utils import Bcolors, enumerate_list, ordinal
class UnaryOpEnum(Enum):
negation = "not"
negative = "-"
absolute_value = "abs"
@property
def is_infix(self):
return self not in [UnaryOpEnum.absolute_value]
@property
def types(self) -> t.List[t.Tuple[gtypes.Type, gtypes.Type]]:
if self in [UnaryOpEnum.negative, UnaryOpEnum.absolute_value]:
return [
(gtypes.IntegerType(), gtypes.IntegerType()),
(gtypes.FloatType(), gtypes.FloatType()),
(gtypes.NumberType(), gtypes.NumberType()),
]
else:
assert self is UnaryOpEnum.negation
return [
(gtypes.AtomLiteralType("true"), gtypes.AtomLiteralType("false")),
(gtypes.AtomLiteralType("false"), gtypes.AtomLiteralType("true")),
(gtypes.BooleanType(), gtypes.BooleanType()),
]
class BinaryOpEnum(Enum):
conjunction = "and"
disjunction = "or"
sum = "+"
subtraction = "-"
product = "*"
division = "/"
integer_division = "div"
integer_reminder = "rem"
maximum = "max"
minimum = "min"
equality = "=="
inequality = "!="
lower = "<"
lower_or_equal = "<="
greater = ">"
greater_or_equal = ">="
identity = "==="
inidentity = "!=="
@property
def is_infix(self) -> bool:
return self not in [
BinaryOpEnum.integer_division,
BinaryOpEnum.integer_reminder,
BinaryOpEnum.maximum,
BinaryOpEnum.minimum,
]
@property
def types(self) -> t.List[t.Tuple[gtypes.Type, gtypes.Type, gtypes.Type]]:
# TODO[improvements] adding term would unlock 'untyped' operators like
# ==, or !,&,||
if self is BinaryOpEnum.conjunction:
return [
(
gtypes.AtomLiteralType("true"),
gtypes.AtomLiteralType("true"),
gtypes.AtomLiteralType("true"),
),
(
gtypes.AtomLiteralType("false"),
gtypes.BooleanType(),
gtypes.AtomLiteralType("false"),
),
(
gtypes.BooleanType(),
gtypes.AtomLiteralType("false"),
gtypes.AtomLiteralType("false"),
),
(gtypes.BooleanType(), gtypes.BooleanType(), gtypes.BooleanType()),
]
if self is BinaryOpEnum.disjunction:
return [
(
gtypes.AtomLiteralType("false"),
gtypes.AtomLiteralType("false"),
gtypes.AtomLiteralType("false"),
),
(
gtypes.AtomLiteralType("true"),
gtypes.BooleanType(),
gtypes.AtomLiteralType("true"),
),
(
gtypes.BooleanType(),
gtypes.AtomLiteralType("true"),
gtypes.AtomLiteralType("true"),
),
(gtypes.BooleanType(), gtypes.BooleanType(), gtypes.BooleanType()),
]
elif self in [BinaryOpEnum.sum, BinaryOpEnum.product, BinaryOpEnum.subtraction]:
return [
(gtypes.IntegerType(), gtypes.IntegerType(), gtypes.IntegerType()),
(gtypes.FloatType(), gtypes.FloatType(), gtypes.FloatType()),
(gtypes.IntegerType(), gtypes.FloatType(), gtypes.FloatType()),
(gtypes.FloatType(), gtypes.IntegerType(), gtypes.FloatType()),
(gtypes.FloatType(), gtypes.NumberType(), gtypes.FloatType()),
(gtypes.NumberType(), gtypes.FloatType(), gtypes.FloatType()),
(gtypes.NumberType(), gtypes.NumberType(), gtypes.NumberType()),
]
elif self is BinaryOpEnum.division:
return [(gtypes.NumberType(), gtypes.NumberType(), gtypes.FloatType())]
elif self in [BinaryOpEnum.integer_reminder, BinaryOpEnum.integer_division]:
return [(gtypes.IntegerType(), gtypes.IntegerType(), gtypes.IntegerType())]
elif self in [BinaryOpEnum.maximum, BinaryOpEnum.minimum]:
return [
(gtypes.IntegerType(), gtypes.IntegerType(), gtypes.IntegerType()),
(gtypes.FloatType(), gtypes.FloatType(), gtypes.FloatType()),
(gtypes.NumberType(), gtypes.NumberType(), gtypes.NumberType()),
]
elif self in [
BinaryOpEnum.equality,
BinaryOpEnum.inequality,
BinaryOpEnum.lower,
BinaryOpEnum.lower_or_equal,
BinaryOpEnum.greater,
BinaryOpEnum.greater_or_equal,
]:
# TODO if we had term type this could be improved
# (gtypes.TermType(), gtypes.TermType(), gtypes.BooleanType())
return [
(gtypes.AtomType(), gtypes.AtomType(), gtypes.BooleanType()),
(gtypes.NumberType(), gtypes.NumberType(), gtypes.BooleanType()),
]
else:
assert self is BinaryOpEnum.identity
# TODO[improvements] improve so that if x!= y
# (gtypes.AtomLiteralType(value=x), gtypes.AtomLiteralType(value=y)) raises error
return [
(gtypes.AtomType(), gtypes.AtomType(), gtypes.BooleanType()),
(gtypes.IntegerType(), gtypes.IntegerType(), gtypes.BooleanType()),
(gtypes.FloatType(), gtypes.FloatType(), gtypes.BooleanType()),
]
class Expression:
# should be implemented in any derived instance
def __str__(self):
pass
@dataclass
class IdentExpression(Expression):
identifier: str
def __str__(self):
return self.identifier
@dataclass
class LiteralExpression(Expression):
type: gtypes.Type
value: t.Any
@dataclass
class IntegerExpression(LiteralExpression):
value: int
def __init__(self, value: int):
self.type = gtypes.IntegerType()
self.value = value
def __str__(self):
return str(self.value)
@dataclass
class FloatExpression(LiteralExpression):
value: float
def __init__(self, value: float):
self.type = gtypes.FloatType()
self.value = value
def __str__(self):
if isinstance(self.value, int):
return str(self.value) + ".0"
return str(self.value)
@dataclass
class AtomLiteralExpression(LiteralExpression):
value: str
def __init__(self, value: str):
self.type = gtypes.AtomLiteralType(atom=value)
self.value = value
def __str__(self):
return str(self.type)
@dataclass
class ElistExpression(Expression):
def __str__(self):
return "[]"
@dataclass
class ListExpression(Expression):
head: Expression
tail: t.Union["ListExpression", Expression]
def __init__(self, head: Expression, tail: t.Union["ListExpression", Expression]):
if not any(
[isinstance(tail, ListExpression), isinstance(tail, ElistExpression), isinstance(tail, IdentExpression)]
):
# this extra import will be avoided once AnnotatedExpression is declared inside this module
from gradualelixir.cast import AnnotatedExpression
if not isinstance(tail, AnnotatedExpression):
raise SyntaxRestrictionError(
"List pattern's tail should be either a List Expression or an Elist Expression"
)
self.head = head
self.tail = tail
def __str__(self):
return f"[{str(self.head)} | {str(self.tail)}]"
@dataclass
class TupleExpression(Expression):
items: t.List[Expression]
def __str__(self):
return "{" + ",".join([str(item) for item in self.items]) + "}"
@dataclass
class MapExpression(Expression):
map: t.OrderedDict[gtypes.MapKey, Expression]
def __str__(self):
keys = self.map.keys()
str_values = [str(v) for _, v in self.map.items()]
return "%{" + ",".join([f"{k} => {v}" for (k, v) in zip(keys, str_values)]) + "}"
@dataclass
class UnaryOpExpression(Expression):
op: UnaryOpEnum
argument: Expression
def __str__(self):
if self.op.is_infix:
return f"{self.op.value} {self.argument}"
else:
return f"{self.op.value}({self.argument})"
@dataclass
class BinaryOpExpression(Expression):
op: BinaryOpEnum
left: Expression
right: Expression
def __str__(self):
if self.op.is_infix:
return f"{self.left} {self.op.value} {self.right}"
else:
return f"{self.op.value}({self.left}, {self.right})"
@dataclass
class PatternMatchExpression(Expression):
pattern: pattern.Pattern
expression: Expression
def __str__(self):
return f"{self.pattern} = {self.expression}"
@dataclass
class IfElseExpression(Expression):
condition: Expression
if_clause: Expression
else_clause: Expression
def __str__(self):
return f"if {self.condition} do\n" f" {self.if_clause}\n" f"else\n" f"{self.else_clause}\n" f"end"
@dataclass
class SeqExpression(Expression):
left: Expression
right: Expression
def __str__(self):
return f"{self.left}\n{self.right}"
@dataclass
class CondExpression(Expression):
clauses: t.List[t.Tuple[Expression, Expression]]
def __str__(self):
res = "cond do\n"
for clause in self.clauses:
res += f"{clause[0]} -> {clause[1]}\n"
res += "end"
return res
@dataclass
class CaseExpression(Expression):
test: Expression
clauses: t.List[t.Tuple[pattern.Pattern, Expression]]
def __str__(self):
res = f"case {self.test} do\n"
for clause in self.clauses:
res += f"{clause[0]} -> {clause[1]}\n"
res += "end"
return res
@dataclass
class AnonymizedFunctionExpression(Expression):
function_name: str
arity: int
def __str__(self):
return f"&({self.function_name}/{self.arity})"
@dataclass
class FunctionCallExpression(Expression):
function_name: str
arguments: t.List[Expression]
def __str__(self):
arguments_str = ",".join([str(arg) for arg in self.arguments])
return f"{self.function_name}({arguments_str})"
@dataclass
class VarCallExpression(Expression):
ident: str
arguments: t.List[Expression]
def __str__(self):
arguments_str = ",".join([str(arg) for arg in self.arguments])
return f"{self.ident}.({arguments_str})"
def format_expression(expression: Expression, padding="") -> str:
needs_formatting = False
if len(str(expression).split("\n")) > 1 or len(str(expression)) > 20: # is multiline or expression is too long
needs_formatting = True
if needs_formatting: # is multiline
from .elixir_port import format_code
msg = format_code(str(expression))
return "\n\n" + "\n".join([padding + m for m in msg.split("\n")])
else:
return f" {expression}"
class ExpressionErrorEnum(Enum):
identifier_not_found_in_environment = "Couldn't find variable {identifier} in the environment"
incompatible_type_for_unary_operator = "The argument of type {type} is not a valid argument for builtin {op}/1"
incompatible_types_for_binary_operator = (
"The arguments, of types {type1} and {type2}, are not together valid arguments for builtin {op}/2"
)
pattern_match = "Pattern match type errors\n\n" f"{Bcolors.ENDC}" "{pattern_match_error}"
incompatible_types_for_list = (
"The type for the head, {type1}, and the type for the tail, {type2} don't have supremum"
)
incompatible_types_for_if_else = (
"The type inferred for the if branch, {type1}, and the type inferred for the else branch, "
"{type2} don't have supremum"
)
inferred_type_is_not_as_expected = "The type inferred for the expression, {type1} is not a subtype of {type2}"
incompatible_types_for_branches = "The types inferred for the branches, {types}, don't have a joint supremum"
function_not_declared = "The function with signature {name}/{arity} was not declared"
identifier_type_is_not_arrow_of_expected_arity = (
"The type inferred for {identifier}, {type}, is not a function of {arity} arguments"
)
class ExpressionContext:
pass
@dataclass
class ListExpressionContext(ExpressionContext):
head: bool
def __str__(self):
if self.head:
return "In the head expression"
return "In the tail expression"
@dataclass
class TupleExpressionContext(ExpressionContext):
n: int
def __str__(self):
return f"In the {ordinal(self.n + 1)} position"
@dataclass
class MapExpressionContext(ExpressionContext):
key: gtypes.MapKey
def __str__(self):
return f"In the expression for key {self.key}"
@dataclass
class UnaryOpContext(ExpressionContext):
def __str__(self):
return "In the operator's argument"
@dataclass
class BinaryOpContext(ExpressionContext):
is_left: bool
def __str__(self):
if self.is_left:
return "In the operator's left argument"
else:
return "In the operator's right argument"
@dataclass
class PatternMatchExpressionContext(ExpressionContext):
def __str__(self):
return "In the expression"
@dataclass
class IfElseExpressionContext(ExpressionContext):
branch: t.Optional[bool]
def __str__(self):
if self.branch is True:
return "In the condition"
elif self.branch is False:
return "In the if branch"
else:
return "In the else branch"
@dataclass
class SeqExpressionContext(ExpressionContext):
is_left: bool
def __str__(self):
if self.is_left:
return "In the left side"
else:
return "In the right side"
@dataclass
class CondExpressionContext(ExpressionContext):
cond: bool
branch: int
def __str__(self):
if self.cond:
return f"In the {ordinal(self.branch + 1)} condition"
return f"In the {ordinal(self.branch + 1)} expression"
@dataclass
class CaseExpressionContext(ExpressionContext):
pattern: t.Optional[bool]
branch: t.Optional[int]
def __str__(self):
if self.branch is None:
return "In the case expression"
if self.pattern is True:
return f"In the {ordinal(self.branch + 1)} pattern"
return f"In the {ordinal(self.branch + 1)} expression"
@dataclass
class FunctionCallExpressionContext(ExpressionContext):
argument: int
def __str__(self):
return f"In the {ordinal(self.argument + 1)} argument"
@dataclass
class VarCallExpressionContext(ExpressionContext):
argument: int
def __str__(self):
return f"In the {ordinal(self.argument + 1)} argument"
class ExpressionTypeCheckError:
expression: Expression
def _message(self, padding="", env: gtypes.TypeEnv = None, specs_env: gtypes.SpecsEnv = None):
raise NotImplementedError()
@staticmethod
def env_message(padding="", env: gtypes.TypeEnv = None, specs_env: gtypes.SpecsEnv = None):
env_msg = ""
specs_msg = ""
eol = ""
if env is not None:
env_msg = f"{padding}{Bcolors.OKBLUE}Variables:{Bcolors.ENDC} {env}\n"
eol = "\n"
if specs_env is not None:
specs_msg = f"{padding}{Bcolors.OKBLUE}Specs:{Bcolors.ENDC} {specs_env}\n"
eol = "\n"
return env_msg + specs_msg + eol
def message(self, padding="", env: gtypes.TypeEnv = None, specs_env: gtypes.SpecsEnv = None):
expression_msg = format_expression(expression=self.expression, padding=padding + " ")
env_msg = self.env_message(padding, env, specs_env)
return (
f"{padding}{Bcolors.OKBLUE}Type errors found inside expression{Bcolors.ENDC} "
f"{expression_msg}\n\n"
f"{env_msg}"
f"{self._message(padding, env, specs_env)}\n"
)
@dataclass
class BaseExpressionTypeCheckError(ExpressionTypeCheckError):
expression: Expression
kind: ExpressionErrorEnum
args: t.Dict[str, t.Any]
def __str__(self):
return self.message()
def _message(self, padding="", _env: gtypes.TypeEnv = None, _specs_env: gtypes.SpecsEnv = None):
error_msg = self.kind.value.format(**{k: str(arg) for k, arg in self.args.items()})
error_msg = "\n".join([padding + " " + line for line in error_msg.split("\n")])
return f"{Bcolors.FAIL}{error_msg}{Bcolors.ENDC}\n"
@dataclass
class ContextExpressionTypeCheckError:
context: ExpressionContext
env: gtypes.TypeEnv
error: ExpressionTypeCheckError
@dataclass
class NestedExpressionTypeCheckError(ExpressionTypeCheckError):
expression: Expression
bullets: t.List[ContextExpressionTypeCheckError]
def __str__(self):
return self.message()
def _message(self, padding="", env: gtypes.TypeEnv = None, specs_env: gtypes.SpecsEnv = None):
env_msg = self.env_message(padding + " ", env, specs_env)
item_msgs = []
for bullet in self.bullets:
bullet_expression_msg = format_expression(expression=bullet.error.expression, padding=padding + " ")
bullet_msg = bullet.error._message(padding + " ", bullet.env, specs_env)
item_msgs.append(
f"{padding}{Bcolors.OKBLUE} > {bullet.context}{Bcolors.ENDC}"
f"{bullet_expression_msg}\n\n"
f"{env_msg}"
f"{bullet_msg}\n"
)
return "".join(item_msgs)
@dataclass
class ExpressionTypeCheckSuccess:
expression: Expression
type: gtypes.Type
env: gtypes.TypeEnv
specs_env: gtypes.SpecsEnv
exported_env: gtypes.TypeEnv
children: t.Dict[str, t.Any]
def message(self, padding=""):
expression_msg = format_expression(self.expression, padding=padding + " ")
return (
f"{padding}{Bcolors.OKBLUE}Type check success for{Bcolors.ENDC} {expression_msg}\n\n"
f"{padding}{Bcolors.OKBLUE}Variables:{Bcolors.ENDC} {self.env}\n"
f"{padding}{Bcolors.OKBLUE}Specs:{Bcolors.ENDC} {self.specs_env}\n"
f"{padding}{Bcolors.OKBLUE}Assigned Type:{Bcolors.ENDC} {self.type}\n"
f"{padding}{Bcolors.OKBLUE}Exported Variables:{Bcolors.ENDC} {self.exported_env}\n"
)
ExpressionTypeCheckResult = t.Union[ExpressionTypeCheckSuccess, ExpressionTypeCheckError]
def type_check(expr: Expression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
if isinstance(expr, LiteralExpression):
return type_check_literal(expr, env, specs_env)
if isinstance(expr, IdentExpression):
return type_check_ident(expr, env, specs_env)
if isinstance(expr, ElistExpression):
return type_check_elist(expr, env, specs_env)
if isinstance(expr, ListExpression):
return type_check_list(expr, env, specs_env)
if isinstance(expr, TupleExpression):
return type_check_tuple(expr, env, specs_env)
if isinstance(expr, MapExpression):
return type_check_map(expr, env, specs_env)
if isinstance(expr, UnaryOpExpression):
return type_check_unary_op(expr, env, specs_env)
if isinstance(expr, BinaryOpExpression):
return type_check_binary_op(expr, env, specs_env)
if isinstance(expr, PatternMatchExpression):
return type_check_pattern_match(expr, env, specs_env)
if isinstance(expr, IfElseExpression):
return type_check_if_else(expr, env, specs_env)
if isinstance(expr, SeqExpression):
return type_check_seq(expr, env, specs_env)
if isinstance(expr, CondExpression):
return type_check_cond(expr, env, specs_env)
if isinstance(expr, CaseExpression):
return type_check_case(expr, env, specs_env)
if isinstance(expr, AnonymizedFunctionExpression):
return type_check_anon(expr, env, specs_env)
if isinstance(expr, FunctionCallExpression):
return type_check_call(expr, env, specs_env)
else:
assert isinstance(expr, VarCallExpression)
return type_check_call(expr, env, specs_env)
def type_check_literal(
expr: LiteralExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
return ExpressionTypeCheckSuccess(
expression=expr, env=env, specs_env=specs_env, type=expr.type, exported_env=env, children={}
)
def type_check_ident(
expr: IdentExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
if (ret_type := env.get(expr.identifier)) is not None:
return ExpressionTypeCheckSuccess(
expression=expr, env=env, specs_env=specs_env, type=ret_type, exported_env=env, children={}
)
else:
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.identifier_not_found_in_environment,
args={"identifier": expr.identifier},
)
def type_check_elist(
expr: ElistExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
return ExpressionTypeCheckSuccess(
expression=expr, env=env, specs_env=specs_env, type=gtypes.ElistType(), exported_env=env, children={}
)
def type_check_list(expr: ListExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
head_type_check_result = type_check(expr.head, env, specs_env)
tail_type_check_result = type_check(expr.tail, env, specs_env)
if isinstance(head_type_check_result, ExpressionTypeCheckError) and isinstance(
tail_type_check_result, ExpressionTypeCheckError
):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(ListExpressionContext(head=True), env, head_type_check_result),
ContextExpressionTypeCheckError(ListExpressionContext(head=False), env, tail_type_check_result),
],
)
if isinstance(head_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(ListExpressionContext(head=True), env, head_type_check_result)],
)
if isinstance(tail_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(ListExpressionContext(head=False), env, tail_type_check_result)],
)
result_type = gtypes.supremum(gtypes.ListType(head_type_check_result.type), tail_type_check_result.type)
if not isinstance(result_type, gtypes.TypingError):
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=result_type,
exported_env=gtypes.TypeEnv.merge(head_type_check_result.exported_env, tail_type_check_result.exported_env),
children={"head": head_type_check_result, "tail": tail_type_check_result},
)
else:
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_types_for_list,
args={"type1": head_type_check_result.type, "type2": tail_type_check_result.type.type}, # type: ignore
)
def type_check_tuple(
expr: TupleExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
items_type_check_results = []
errors: t.List[ContextExpressionTypeCheckError] = []
for i in range(len(expr.items)):
item_type_check_result = type_check(expr.items[i], env, specs_env)
if isinstance(item_type_check_result, ExpressionTypeCheckError):
errors.append(ContextExpressionTypeCheckError(TupleExpressionContext(n=i), env, item_type_check_result))
else:
items_type_check_results.append(item_type_check_result)
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
else:
exported_env = gtypes.TypeEnv()
for item in items_type_check_results:
exported_env = gtypes.TypeEnv.merge(exported_env, item.exported_env)
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=gtypes.TupleType([e.type for e in items_type_check_results]),
exported_env=exported_env,
children={"items": items_type_check_results},
)
def type_check_map(expr: MapExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
type_check_results_dict = OrderedDict()
errors: t.List[ContextExpressionTypeCheckError] = []
for k in expr.map:
item_type_check_result = type_check(expr.map[k], env, specs_env)
if isinstance(item_type_check_result, ExpressionTypeCheckError):
errors.append(ContextExpressionTypeCheckError(MapExpressionContext(key=k), env, item_type_check_result))
else:
type_check_results_dict[k] = item_type_check_result
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
else:
exported_env = gtypes.TypeEnv()
for k in type_check_results_dict:
exported_env = gtypes.TypeEnv.merge(exported_env, type_check_results_dict[k].exported_env)
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=gtypes.MapType({k: type.type for k, type in type_check_results_dict.items()}),
exported_env=exported_env,
children={"map": list(type_check_results_dict.values())},
)
def type_check_unary_op(
expr: UnaryOpExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
argument_type_check_result = type_check(expr.argument, env, specs_env)
if isinstance(argument_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(UnaryOpContext(), env, argument_type_check_result)],
)
if valid_result_types := [
ret_type for arg_type, ret_type in expr.op.types if gtypes.is_subtype(argument_type_check_result.type, arg_type)
]:
return ExpressionTypeCheckSuccess(
env=env,
expression=expr,
specs_env=specs_env,
type=valid_result_types[0],
exported_env=argument_type_check_result.exported_env,
children={"argument": argument_type_check_result},
)
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_type_for_unary_operator,
args={"type": argument_type_check_result.type, "op": expr.op.value},
)
def type_check_binary_op(
expr: BinaryOpExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
left_type_check_result = type_check(expr.left, env, specs_env)
right_type_check_result = type_check(expr.right, env, specs_env)
if isinstance(left_type_check_result, ExpressionTypeCheckError) and isinstance(
right_type_check_result, ExpressionTypeCheckError
):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
BinaryOpContext(is_left=True),
env,
left_type_check_result,
),
ContextExpressionTypeCheckError(
BinaryOpContext(is_left=False),
env,
right_type_check_result,
),
],
)
if isinstance(left_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(BinaryOpContext(is_left=True), env, left_type_check_result)],
)
if isinstance(right_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(BinaryOpContext(is_left=False), env, right_type_check_result)],
)
if valid_result_types := [
ret_type
for left_arg_type, right_arg_type, ret_type in expr.op.types
if (
gtypes.is_subtype(left_type_check_result.type, left_arg_type)
and gtypes.is_subtype(right_type_check_result.type, right_arg_type)
)
]:
return ExpressionTypeCheckSuccess(
expression=expr,
env=env,
specs_env=specs_env,
type=valid_result_types[0],
exported_env=gtypes.TypeEnv.merge(
left_type_check_result.exported_env, right_type_check_result.exported_env
),
children={"left": left_type_check_result, "right": right_type_check_result},
)
else:
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_types_for_binary_operator,
args={
"type1": left_type_check_result.type,
"type2": right_type_check_result.type,
"op": expr.op.value,
},
)
def type_check_pattern_match(
expr: PatternMatchExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
expression_type_check_result = type_check(expr.expression, env, specs_env)
if isinstance(expression_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
PatternMatchExpressionContext(),
env,
expression_type_check_result,
)
],
)
else:
pattern_match_result = pattern.pattern_match(
expr.pattern,
expression_type_check_result.type,
gtypes.TypeEnv(),
expression_type_check_result.exported_env,
)
if isinstance(pattern_match_result, pattern.PatternMatchError):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.pattern_match,
args={
"type": expression_type_check_result.type,
"pattern": expr.pattern,
"pattern_match_error": pattern_match_result.message(" "),
},
)
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=pattern_match_result.refined_type,
exported_env=gtypes.TypeEnv.merge(
expression_type_check_result.exported_env, pattern_match_result.exported_env
),
children={"expression": expression_type_check_result},
)
def type_check_if_else(
expr: IfElseExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
condition_type_check_result = type_check(expr.condition, env, specs_env)
if isinstance(condition_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
IfElseExpressionContext(branch=None),
env,
condition_type_check_result,
)
],
)
if not gtypes.is_subtype(condition_type_check_result.type, gtypes.BooleanType()):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
IfElseExpressionContext(branch=None),
env,
BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.inferred_type_is_not_as_expected,
args={"type1": condition_type_check_result.type, "type2": gtypes.BooleanType()},
),
)
],
)
errors: t.List[ContextExpressionTypeCheckError] = []
if_clause_type_check_result = type_check(expr.if_clause, condition_type_check_result.exported_env, specs_env)
if isinstance(if_clause_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
IfElseExpressionContext(branch=True),
condition_type_check_result.exported_env,
if_clause_type_check_result,
)
)
if expr.else_clause is not None:
else_clause_type_check_result = type_check(
expr.else_clause, condition_type_check_result.exported_env, specs_env
)
if isinstance(else_clause_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
IfElseExpressionContext(branch=False),
condition_type_check_result.exported_env,
else_clause_type_check_result,
)
)
else:
else_clause_type_check_result = if_clause_type_check_result
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
assert isinstance(if_clause_type_check_result, ExpressionTypeCheckSuccess)
assert isinstance(else_clause_type_check_result, ExpressionTypeCheckSuccess)
ret_type = if_clause_type_check_result.type
if expr.else_clause:
aux = gtypes.supremum(if_clause_type_check_result.type, else_clause_type_check_result.type)
if isinstance(aux, gtypes.TypingError):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_types_for_if_else,
args={
"type1": if_clause_type_check_result.type,
"type2": else_clause_type_check_result.type,
},
)
ret_type = aux
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=ret_type,
exported_env=condition_type_check_result.exported_env,
children={
"condition": condition_type_check_result,
"if_clause": if_clause_type_check_result,
"else_clause": else_clause_type_check_result,
},
)
def type_check_seq(expr: SeqExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
left_type_check_result = type_check(expr.left, env, specs_env)
if isinstance(left_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[ContextExpressionTypeCheckError(SeqExpressionContext(is_left=True), env, left_type_check_result)],
)
else:
right_type_check_result = type_check(expr.right, left_type_check_result.exported_env, specs_env)
if isinstance(right_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
SeqExpressionContext(is_left=False),
left_type_check_result.exported_env,
right_type_check_result,
)
],
)
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=right_type_check_result.type,
exported_env=gtypes.TypeEnv.merge(
left_type_check_result.exported_env, right_type_check_result.exported_env
),
children={"left": left_type_check_result, "right": right_type_check_result},
)
def type_check_cond(expr: CondExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
errors: t.List[ContextExpressionTypeCheckError] = []
clauses_type_check_results = []
for i in range(len(expr.clauses)):
cond_type_check_result = type_check(expr.clauses[i][0], env, specs_env)
if isinstance(cond_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
CondExpressionContext(branch=i, cond=True),
env,
cond_type_check_result,
)
)
continue
if not gtypes.is_subtype(cond_type_check_result.type, gtypes.BooleanType()):
errors.append(
ContextExpressionTypeCheckError(
CondExpressionContext(branch=i, cond=True),
env,
BaseExpressionTypeCheckError(
expression=expr.clauses[i][0],
kind=ExpressionErrorEnum.inferred_type_is_not_as_expected,
args={"type1": cond_type_check_result.type, "type2": gtypes.BooleanType()},
),
)
)
continue
do_type_check_result = type_check(expr.clauses[i][1], cond_type_check_result.exported_env, specs_env)
if isinstance(do_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
CondExpressionContext(branch=i, cond=False),
cond_type_check_result.exported_env,
do_type_check_result,
)
)
continue
clauses_type_check_results.append((cond_type_check_result, do_type_check_result))
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
ret_type = clauses_type_check_results[0][1].type
for i in range(len(clauses_type_check_results) - 1):
aux = gtypes.supremum(ret_type, clauses_type_check_results[i + 1][1].type)
if isinstance(aux, gtypes.TypingError):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_types_for_branches,
args={"types": enumerate_list([str(clause[1].type) for clause in clauses_type_check_results])},
)
ret_type = aux
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=ret_type,
exported_env=env,
children={"clauses": clauses_type_check_results},
)
def type_check_case(expr: CaseExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv) -> ExpressionTypeCheckResult:
test_type_check_result = type_check(expr.test, env, specs_env)
if isinstance(test_type_check_result, ExpressionTypeCheckError):
return NestedExpressionTypeCheckError(
expression=expr,
bullets=[
ContextExpressionTypeCheckError(
CaseExpressionContext(branch=None, pattern=None),
env,
test_type_check_result,
)
],
)
errors: t.List[ContextExpressionTypeCheckError] = []
clauses_type_check_results = []
for i in range(len(expr.clauses)):
pattern_match_result = pattern.pattern_match(
expr.clauses[i][0], test_type_check_result.type, gtypes.TypeEnv(), env
)
if isinstance(pattern_match_result, pattern.PatternMatchError):
errors.append(
ContextExpressionTypeCheckError(
CaseExpressionContext(branch=i, pattern=True),
test_type_check_result.exported_env,
BaseExpressionTypeCheckError(
expression=PatternMatchExpression(expr.clauses[i][0], expr.test),
kind=ExpressionErrorEnum.pattern_match,
args={
"type": test_type_check_result.type,
"pattern": expr.clauses[i][0],
"pattern_match_error": pattern_match_result,
},
),
)
)
continue
assert isinstance(pattern_match_result, pattern.PatternMatchSuccess)
new_env = gtypes.TypeEnv.merge(env, pattern_match_result.exported_env)
do_type_check_result = type_check(expr.clauses[i][1], new_env, specs_env)
if isinstance(do_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
CaseExpressionContext(branch=i, pattern=False),
new_env,
do_type_check_result,
)
)
continue
clauses_type_check_results.append((pattern_match_result, do_type_check_result))
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
ret_type = clauses_type_check_results[0][1].type
for i in range(len(clauses_type_check_results) - 1):
aux = gtypes.supremum(ret_type, clauses_type_check_results[i + 1][1].type)
if isinstance(aux, gtypes.TypingError):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.incompatible_types_for_branches,
args={"types": enumerate_list([str(clause[1].type) for clause in clauses_type_check_results])},
)
ret_type = aux
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=ret_type,
exported_env=test_type_check_result.exported_env,
children={"test": test_type_check_result, "clauses": clauses_type_check_results},
)
def type_check_anon(
expr: AnonymizedFunctionExpression, env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
if not (value := specs_env.get((expr.function_name, expr.arity))):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.function_not_declared,
args={"name": expr.function_name, "arity": expr.arity},
)
else:
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=gtypes.FunctionType(value[0], value[1]),
exported_env=env,
children={},
)
def type_check_call(
expr: t.Union[FunctionCallExpression, VarCallExpression], env: gtypes.TypeEnv, specs_env: gtypes.SpecsEnv
) -> ExpressionTypeCheckResult:
context_class: t.Type[
t.Union[FunctionCallExpressionContext, VarCallExpressionContext]
] = FunctionCallExpressionContext
function_type: gtypes.FunctionType
if isinstance(expr, FunctionCallExpression):
if not (value := specs_env.get((expr.function_name, len(expr.arguments)))):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.function_not_declared,
args={"name": expr.function_name, "arity": len(expr.arguments)},
)
else:
function_type = gtypes.FunctionType(value[0], value[1])
else:
context_class = VarCallExpressionContext
if not (aux := env.get(expr.ident)):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.identifier_not_found_in_environment,
args={"identifier": expr.ident},
)
else:
if isinstance(aux, gtypes.AnyType):
# if env[x] = any, we will refine x: (any,...,any)n -> any for the
# sake of typing x.(e1,..en)
aux = gtypes.FunctionType([gtypes.AnyType() for _ in expr.arguments], gtypes.AnyType())
if not isinstance(aux, gtypes.FunctionType) or len(aux.arg_types) != len(expr.arguments):
return BaseExpressionTypeCheckError(
expression=expr,
kind=ExpressionErrorEnum.identifier_type_is_not_arrow_of_expected_arity,
args={"identifier": expr.ident, "type": aux, "arity": len(expr.arguments)},
)
function_type = aux
errors: t.List[ContextExpressionTypeCheckError] = []
arguments_type_check_results = []
for i in range(len(expr.arguments)):
argument_type_check_result = type_check(expr.arguments[i], env, specs_env)
if isinstance(argument_type_check_result, ExpressionTypeCheckError):
errors.append(
ContextExpressionTypeCheckError(
context_class(argument=i),
env,
argument_type_check_result,
)
)
continue
if not gtypes.is_subtype(argument_type_check_result.type, function_type.arg_types[i]):
errors.append(
ContextExpressionTypeCheckError(
context_class(argument=i),
env,
BaseExpressionTypeCheckError(
expression=expr.arguments[i],
kind=ExpressionErrorEnum.inferred_type_is_not_as_expected,
args={"type1": argument_type_check_result.type, "type2": function_type.arg_types[i]},
),
)
)
continue
arguments_type_check_results.append(argument_type_check_result)
if errors:
return NestedExpressionTypeCheckError(expression=expr, bullets=errors)
exported_env = gtypes.TypeEnv()
for item in arguments_type_check_results:
exported_env = gtypes.TypeEnv.merge(exported_env, item.exported_env)
return ExpressionTypeCheckSuccess(
env=env,
specs_env=specs_env,
expression=expr,
type=function_type.ret_type,
exported_env=exported_env,
children={"arguments": arguments_type_check_results},
)
|
py | 7dfdccbf5036981710614e0383962a1a20cf88c8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/xor.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher.utilities import random_string
from king_phisher.xor import *
class XORTests(testing.KingPhisherTestCase):
def test_xor_encode(self):
plain_string = random_string(16)
encoded_string = xor_encode(plain_string)
self.assertNotEqual(plain_string, encoded_string)
def test_xor_decode(self):
plain_string = random_string(16)
encoded_string = xor_encode(plain_string)
self.assertNotEqual(plain_string, encoded_string)
decoded_string = xor_decode(encoded_string)
self.assertEqual(plain_string, decoded_string)
if __name__ == '__main__':
unittest.main()
|
py | 7dfdcf276d93e46ecf26ae50ad2e96baf88b927d | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
class SnapshotsClient(rest_client.RestClient):
"""Client class to send CRUD Volume V1 API requests."""
create_resp = 200
def list_snapshots(self, detail=False, **params):
"""List all the snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots
https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
"""
url = 'snapshots'
if detail:
url += '/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
"""
url = "snapshots/%s" % snapshot_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def create_snapshot(self, **kwargs):
"""Creates a new snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#create-snapshot
"""
post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
self.expected_success(self.create_resp, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#delete-snapshot
"""
resp, body = self.delete("snapshots/%s" % snapshot_id)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_snapshot(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'volume-snapshot'
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_snapshot_status(self, snapshot_id, **kwargs):
"""Update the specified snapshot's status."""
# TODO(gmann): api-site doesn't contain doc ref
# for this API. After fixing the api-site, we need to
# add the link here.
# Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
post_body = json.dumps({'os-update_snapshot_status': kwargs})
url = 'snapshots/%s/action' % snapshot_id
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.post(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot
"""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
"""
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata(self, snapshot_id, **kwargs):
"""Update metadata for the snapshot.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
"""Update metadata item for the snapshot."""
# TODO(piyush): Current api-site doesn't contain this API description.
# After fixing the api-site, we need to fix here also for putting the
# link to api-site.
# LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def force_delete_snapshot(self, snapshot_id):
"""Force Delete Snapshot."""
post_body = json.dumps({'os-force_delete': {}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
|
py | 7dfdcf642299ea5a761e21193f6eee8b81a48a44 | # -*- coding: utf-8 -*-
import uuid
from django.conf import settings
import factory, factory.fuzzy
from .models import (Discount, DiscountRegistration, Event, Organization,
Section, Student, TicketType, Union)
class OrganizationFactory(factory.DjangoModelFactory):
name = factory.Faker('company')
class Meta:
model = Organization
class EventFactory(factory.DjangoModelFactory):
name = factory.Faker('word')
organization = factory.SubFactory(OrganizationFactory)
class Meta:
model = Event
class TicketTypeFactory(factory.DjangoModelFactory):
name = factory.Faker('word')
event = factory.SubFactory(EventFactory)
class Meta:
model = TicketType
class DiscountFactory(factory.DjangoModelFactory):
ticket_type = factory.SubFactory(TicketTypeFactory)
union = factory.Iterator(Union.objects.all())
amount = factory.fuzzy.FuzzyInteger(5, 30)
class Meta:
model = Discount
class UnionFactory(factory.DjangoModelFactory):
name = factory.Faker('company')
class Meta:
model = Union
class SectionFactory(factory.DjangoModelFactory):
name = factory.Faker('company')
code = factory.LazyAttribute(lambda self: self.name[:3])
class Meta:
model = Section
class StudentFactory(factory.DjangoModelFactory):
name = factory.Faker('name')
liu_id = factory.LazyAttributeSequence(
lambda self, seq: '{:.5}{:03d}'.format(
self.name.lower(), seq))
mifare_id = factory.fuzzy.FuzzyInteger(0, 0xffffffffffffff)
union = factory.SubFactory(UnionFactory)
section = factory.SubFactory(SectionFactory)
email = factory.Faker('email')
id = factory.LazyFunction(uuid.uuid4)
liu_lin = factory.LazyFunction(uuid.uuid4)
class Meta:
model = Student
class DiscountRegistrationFactory(factory.DjangoModelFactory):
discount = factory.SubFactory(DiscountFactory)
student = factory.SubFactory(StudentFactory)
class Meta:
model = DiscountRegistration
class UserFactory(factory.DjangoModelFactory):
name = factory.Faker('name')
email = factory.Faker('email')
class Meta:
model = settings.AUTH_USER_MODEL
|
py | 7dfdcfdb0e63076e4459952b637410049a1db65c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import iostack
import flasherctl
import time
if __name__ == '__main__':
# Parse command-line arguments
parser = argparse.ArgumentParser(
description='remote control of flasher TEST_PULSE')
parser.add_argument('ip', type=str, help="IP address")
parser.add_argument('on_off', metavar='on_off', type=int,
help='0 (low), 1 (high), 2 (pulse high then low)')
parser.add_argument('-p', metavar='port', type=int,
default=iostack.default_port,
help='port (default: %i)' % iostack.default_port)
args = parser.parse_args()
# Create connection
flasher = flasherctl.FlasherCtl(args.ip, args.p, verbosity=0)
flasher._TEST_PULSE(args.on_off)
|
py | 7dfdd0401fcad093011bd9ad9cbcfbc546a29ce5 | class Foo:
def __init__(self):
print('Foo.__init__')
def one(self):
return 'Foo.self'
class Bar:
def __init__(self):
print('Bar.__init__')
def one(self):
return 'Bar.self'
class Baz(Foo, Bar):
def two(self):
print(Foo.one(self), Bar.one(self), self.one())
Baz().two()
# Result:
# Foo.__init__
# Foo.self Bar.self Foo.self
|
py | 7dfdd043237fde6618f1d0730be7de3e1e342d4a | """Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.vacuum import (
ATTR_FAN_SPEED, ATTR_PARAMS, DOMAIN, SERVICE_CLEAN_SPOT, SERVICE_LOCATE,
SERVICE_PAUSE, SERVICE_SEND_COMMAND, SERVICE_SET_FAN_SPEED, SERVICE_START,
SERVICE_START_PAUSE, SERVICE_STOP, SERVICE_RETURN_TO_BASE)
from homeassistant.const import (
ATTR_COMMAND, ATTR_ENTITY_ID, SERVICE_TOGGLE,
SERVICE_TURN_OFF, SERVICE_TURN_ON)
from homeassistant.loader import bind_hass
@bind_hass
def turn_on(hass, entity_id=None):
"""Turn all or specified vacuum on."""
hass.add_job(async_turn_on, hass, entity_id)
async def async_turn_on(hass, entity_id=None):
"""Turn all or specified vacuum on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=None):
"""Turn all or specified vacuum off."""
hass.add_job(async_turn_off, hass, entity_id)
async def async_turn_off(hass, entity_id=None):
"""Turn all or specified vacuum off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(hass, entity_id=None):
"""Toggle all or specified vacuum."""
hass.add_job(async_toggle, hass, entity_id)
async def async_toggle(hass, entity_id=None):
"""Toggle all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, data, blocking=True)
@bind_hass
def locate(hass, entity_id=None):
"""Locate all or specified vacuum."""
hass.add_job(async_locate, hass, entity_id)
async def async_locate(hass, entity_id=None):
"""Locate all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, data, blocking=True)
@bind_hass
def clean_spot(hass, entity_id=None):
"""Tell all or specified vacuum to perform a spot clean-up."""
hass.add_job(async_clean_spot, hass, entity_id)
async def async_clean_spot(hass, entity_id=None):
"""Tell all or specified vacuum to perform a spot clean-up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, data, blocking=True)
@bind_hass
def return_to_base(hass, entity_id=None):
"""Tell all or specified vacuum to return to base."""
hass.add_job(async_return_to_base, hass, entity_id)
async def async_return_to_base(hass, entity_id=None):
"""Tell all or specified vacuum to return to base."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, data, blocking=True)
@bind_hass
def start_pause(hass, entity_id=None):
"""Tell all or specified vacuum to start or pause the current task."""
hass.add_job(async_start_pause, hass, entity_id)
async def async_start_pause(hass, entity_id=None):
"""Tell all or specified vacuum to start or pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_START_PAUSE, data, blocking=True)
@bind_hass
def start(hass, entity_id=None):
"""Tell all or specified vacuum to start or resume the current task."""
hass.add_job(async_start, hass, entity_id)
async def async_start(hass, entity_id=None):
"""Tell all or specified vacuum to start or resume the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_START, data, blocking=True)
@bind_hass
def pause(hass, entity_id=None):
"""Tell all or the specified vacuum to pause the current task."""
hass.add_job(async_pause, hass, entity_id)
async def async_pause(hass, entity_id=None):
"""Tell all or the specified vacuum to pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, data, blocking=True)
@bind_hass
def stop(hass, entity_id=None):
"""Stop all or specified vacuum."""
hass.add_job(async_stop, hass, entity_id)
async def async_stop(hass, entity_id=None):
"""Stop all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(
DOMAIN, SERVICE_STOP, data, blocking=True)
@bind_hass
def set_fan_speed(hass, fan_speed, entity_id=None):
"""Set fan speed for all or specified vacuum."""
hass.add_job(async_set_fan_speed, hass, fan_speed, entity_id)
async def async_set_fan_speed(hass, fan_speed, entity_id=None):
"""Set fan speed for all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_FAN_SPEED] = fan_speed
await hass.services.async_call(
DOMAIN, SERVICE_SET_FAN_SPEED, data, blocking=True)
@bind_hass
def send_command(hass, command, params=None, entity_id=None):
"""Send command to all or specified vacuum."""
hass.add_job(async_send_command, hass, command, params, entity_id)
async def async_send_command(hass, command, params=None, entity_id=None):
"""Send command to all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_COMMAND] = command
if params is not None:
data[ATTR_PARAMS] = params
await hass.services.async_call(
DOMAIN, SERVICE_SEND_COMMAND, data, blocking=True)
|
py | 7dfdd05ab6dec65a7623b016c3498a54a0006341 | # -*- coding: utf-8 -*-
import vk_api
from vk_api.streaming import VkStreaming
def main():
""" Пример использования streaming
https://vk.com/dev/streaming_api_docs
"""
vk = vk_api.VkApi(token=<Сервисный ключ доступа>)
streaming = VkStreaming(vk)
streaming.delete_all_rules()
streaming.add_rule("квартира Москва", "Квартиры")
streaming.add_rule("купить гараж", "Гаражи")
for event in streaming.listen():
tags = '|'.join(event['tags'])
print("Теги: " + tags)
print("Запись: " + event['event_url'])
print("Текст: " + event['text'])
print("_____________________________________________________")
if __name__ == '__main__':
main()
|
py | 7dfdd15071d388759340dc763589876cddaded96 | #!/usr/bin/env python
"""
Training on a single process
"""
import configargparse
import os
import random
import torch
import onmt.opts as opts
from onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \
load_fields, _collect_report_features
from onmt.model_builder import build_model
from onmt.utils.optimizers import build_optim
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return n_params, enc, dec
def training_opt_postprocessing(opt, device_id):
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
if opt.rnn_size != -1:
opt.enc_rnn_size = opt.rnn_size
opt.dec_rnn_size = opt.rnn_size
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = opt.enc_rnn_size == opt.dec_rnn_size
assert opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
opt.brnn = opt.encoder_type == "brnn"
assert opt.rnn_type != "SRU" or opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.seed > 0:
torch.manual_seed(opt.seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(opt.seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
if opt.seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(opt.seed)
return opt
def main(opt, device_id):
opt = training_opt_postprocessing(opt, device_id)
init_logger(opt.log_file)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
# Load default opts values then overwrite it with opts from
# the checkpoint. It's usefull in order to re-train a model
# after adding a new option (not set in checkpoint)
dummy_parser = configargparse.ArgumentParser()
opts.model_opts(dummy_parser)
default_opt = dummy_parser.parse_known_args([])[0]
model_opt = default_opt
model_opt.__dict__.update(checkpoint['opt'].__dict__)
else:
checkpoint = None
model_opt = opt
# Peek the first dataset to determine the data_type.
# (All datasets have the same data_type).
first_dataset = next(lazily_load_dataset("train", opt))
data_type = first_dataset.data_type
# Load fields generated from preprocess phase.
fields = load_fields(first_dataset, opt, checkpoint)
# Report src/tgt features.
knl_features, src_features, tgt_features = _collect_report_features(fields)
for j, feat in enumerate(src_features):
logger.info(' * knl feature %d size = %d'
% (j, len(fields[feat].vocab)))
for j, feat in enumerate(src_features):
logger.info(' * src feature %d size = %d'
% (j, len(fields[feat].vocab)))
for j, feat in enumerate(tgt_features):
logger.info(' * tgt feature %d size = %d'
% (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = build_optim(model, opt, checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(opt, device_id, model, fields,
optim, data_type, model_saver=model_saver)
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields, opt, is_train=False)
# Do training.
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
opt.valid_steps)
if opt.tensorboard:
trainer.report_manager.tensorboard_writer.close()
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description='train.py',
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
|
py | 7dfdd2ad62b95b258926167f25ce50b673d712c7 | """ opentrons.execute: functions and entrypoint for running protocols
This module has functions that can be imported to provide protocol
contexts for running protocols during interactive sessions like Jupyter or just
regular python shells. It also provides a console entrypoint for running a
protocol from the command line.
"""
import atexit
import argparse
import logging
import os
import sys
from typing import Any, Callable, Dict, List, Optional, TextIO, Union, TYPE_CHECKING
from opentrons import protocol_api, __version__
from opentrons.config import IS_ROBOT, JUPYTER_NOTEBOOK_LABWARE_DIR
from opentrons.protocol_api import MAX_SUPPORTED_VERSION
from opentrons.protocols.execution import execute as execute_apiv2
from opentrons.protocols.context.protocol_api.protocol_context import (
ProtocolContextImplementation,
)
from opentrons.commands import types as command_types
from opentrons.protocols.parse import parse, version_from_string
from opentrons.protocols.types import ApiDeprecationError
from opentrons.protocols.api_support.types import APIVersion
from opentrons.hardware_control import API, ThreadManager
from .util.entrypoint_util import labware_from_paths, datafiles_from_paths
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
_THREAD_MANAGED_HW: Optional[ThreadManager] = None
#: The background global cache that all protocol contexts created by
#: :py:meth:`get_protocol_api` will share
def get_protocol_api(
version: Union[str, APIVersion],
bundled_labware: Dict[str, "LabwareDefinition"] = None,
bundled_data: Dict[str, bytes] = None,
extra_labware: Dict[str, "LabwareDefinition"] = None,
) -> protocol_api.ProtocolContext:
"""
Build and return a ``protocol_api.ProtocolContext``
connected to the robot.
This can be used to run protocols from interactive Python sessions
such as Jupyter or an interpreter on the command line:
.. code-block:: python
>>> from opentrons.execute import get_protocol_api
>>> protocol = get_protocol_api('2.0')
>>> instr = protocol.load_instrument('p300_single', 'right')
>>> instr.home()
If ``extra_labware`` is not specified, any labware definitions saved in
the ``labware`` directory of the Jupyter notebook directory will be
available.
When this function is called, modules and instruments will be recached.
:param version: The API version to use. This must be lower than
``opentrons.protocol_api.MAX_SUPPORTED_VERSION``.
It may be specified either as a string (``'2.0'``) or
as a ``protocols.types.APIVersion``
(``APIVersion(2, 0)``).
:param bundled_labware: If specified, a mapping from labware names to
labware definitions for labware to consider in the
protocol. Note that if you specify this, _only_
labware in this argument will be allowed in the
protocol. This is preparation for a beta feature
and is best not used.
:param bundled_data: If specified, a mapping from filenames to contents
for data to be available in the protocol from
``protocol_api.ProtocolContext.bundled_data``.
:param extra_labware: If specified, a mapping from labware names to
labware definitions for labware to consider in the
protocol in addition to those stored on the robot.
If this is an empty dict, and this function is called
on a robot, it will look in the 'labware'
subdirectory of the Jupyter data directory for
custom labware.
:return: The protocol context.
"""
global _THREAD_MANAGED_HW
if not _THREAD_MANAGED_HW:
# Build a hardware controller in a worker thread, which is necessary
# because ipython runs its notebook in asyncio but the notebook
# is at script/repl scope not function scope and is synchronous so
# you can't control the loop from inside. If we update to
# IPython 7 we can avoid this, but for now we can't
_THREAD_MANAGED_HW = ThreadManager(API.build_hardware_controller)
if isinstance(version, str):
checked_version = version_from_string(version)
elif not isinstance(version, APIVersion):
raise TypeError("version must be either a string or an APIVersion")
else:
checked_version = version
if (
extra_labware is None
and IS_ROBOT
and JUPYTER_NOTEBOOK_LABWARE_DIR.is_dir() # type: ignore[union-attr]
):
extra_labware = labware_from_paths([str(JUPYTER_NOTEBOOK_LABWARE_DIR)])
context_imp = ProtocolContextImplementation(
hardware=_THREAD_MANAGED_HW,
bundled_labware=bundled_labware,
bundled_data=bundled_data,
extra_labware=extra_labware,
api_version=checked_version,
)
context = protocol_api.ProtocolContext(
implementation=context_imp, api_version=checked_version
)
context_imp.get_hardware().hardware.cache_instruments()
return context
def get_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Get the argument parser for this module
Useful if you want to use this module as a component of another CLI program
and want to add its arguments.
:param parser: A parser to add arguments to.
:returns argparse.ArgumentParser: The parser with arguments added.
"""
parser.add_argument(
"-l",
"--log-level",
choices=["debug", "info", "warning", "error", "none"],
default="warning",
help="Specify the level filter for logs to show on the command line. "
"The logs stored in journald or local log files are unaffected by "
"this option and should be configured in the config file. If "
"'none', do not show logs",
)
parser.add_argument(
"-L",
"--custom-labware-path",
action="append",
default=[os.getcwd()],
help="Specify directories to search for custom labware definitions. "
"You can specify this argument multiple times. Once you specify "
"a directory in this way, labware definitions in that directory "
"will become available in ProtocolContext.load_labware(). "
"Only directories specified directly by "
"this argument are searched, not their children. JSON files that "
"do not define labware will be ignored with a message. "
"By default, the current directory (the one in which you are "
"invoking this program) will be searched for labware.",
)
parser.add_argument(
"-D",
"--custom-data-path",
action="append",
nargs="?",
const=".",
default=[],
help="Specify directories to search for custom data files. "
"You can specify this argument multiple times. Once you specify "
"a directory in this way, files located in the specified "
"directory will be available in ProtocolContext.bundled_data. "
"Note that bundle execution will still only allow data files in "
"the bundle. If you specify this without a path, it will "
"add the current path implicitly. If you do not specify this "
"argument at all, no data files will be added. Any file in the "
"specified paths will be loaded into memory and included in the "
"bundle if --bundle is passed, so be careful that any directory "
"you specify has only the files you want. It is usually a "
"better idea to use -d so no files are accidentally included. "
"Also note that data files are made available as their name, not "
"their full path, so name them uniquely.",
)
parser.add_argument(
"-d",
"--custom-data-file",
action="append",
default=[],
help="Specify data files to be made available in "
"ProtocolContext.bundled_data (and possibly bundled if --bundle "
"is passed). Can be specified multiple times with different "
"files. It is usually a better idea to use this than -D because "
"there is less possibility of accidentally including something.",
)
parser.add_argument(
"protocol",
metavar="PROTOCOL",
type=argparse.FileType("rb"),
help="The protocol file to execute. If you pass '-', you can pipe "
"the protocol via stdin; this could be useful if you want to use this "
"utility as part of an automated workflow.",
)
return parser
def execute(
protocol_file: TextIO,
protocol_name: str,
propagate_logs: bool = False,
log_level: str = "warning",
emit_runlog: Callable[[Dict[str, Any]], None] = None,
custom_labware_paths: List[str] = None,
custom_data_paths: List[str] = None,
):
"""
Run the protocol itself.
This is a one-stop function to run a protocol, whether python or json,
no matter the api verson, from external (i.e. not bound up in other
internal server infrastructure) sources.
To run an opentrons protocol from other places, pass in a file like
object as protocol_file; this function either returns (if the run has no
problems) or raises an exception.
To call from the command line use either the autogenerated entrypoint
``opentrons_execute`` or ``python -m opentrons.execute``.
:param protocol_file: The protocol file to execute
:param protocol_name: The name of the protocol file. This is required
internally, but it may not be a thing we can get
from the protocol_file argument.
:param propagate_logs: Whether this function should allow logs from the
Opentrons stack to propagate up to the root handler.
This can be useful if you're integrating this
function in a larger application, but most logs that
occur during protocol simulation are best associated
with the actions in the protocol that cause them.
Default: ``False``
:param log_level: The level of logs to emit on the command line:
``"debug"``, ``"info"``, ``"warning"``, or ``"error"``.
Defaults to ``"warning"``.
:param emit_runlog: A callback for printing the runlog. If specified, this
will be called whenever a command adds an entry to the
runlog, which can be used for display and progress
estimation. If specified, the callback should take a
single argument (the name doesn't matter) which will
be a dictionary (see below). Default: ``None``
:param custom_labware_paths: A list of directories to search for custom
labware, or None. Ignored if the apiv2 feature
flag is not set. Loads valid labware from
these paths and makes them available to the
protocol context.
:param custom_data_paths: A list of directories or files to load custom
data files from. Ignored if the apiv2 feature
flag if not set. Entries may be either files or
directories. Specified files and the
non-recursive contents of specified directories
are presented by the protocol context in
``ProtocolContext.bundled_data``.
The format of the runlog entries is as follows:
.. code-block:: python
{
'name': command_name,
'payload': {
'text': string_command_text,
# The rest of this struct is command-dependent; see
# opentrons.commands.commands. Its keys match format
# keys in 'text', so that
# entry['payload']['text'].format(**entry['payload'])
# will produce a string with information filled in
}
}
"""
stack_logger = logging.getLogger("opentrons")
stack_logger.propagate = propagate_logs
stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING))
contents = protocol_file.read()
if custom_labware_paths:
extra_labware = labware_from_paths(custom_labware_paths)
else:
extra_labware = {}
if custom_data_paths:
extra_data = datafiles_from_paths(custom_data_paths)
else:
extra_data = {}
protocol = parse(
contents, protocol_name, extra_labware=extra_labware, extra_data=extra_data
)
if getattr(protocol, "api_level", APIVersion(2, 0)) < APIVersion(2, 0):
raise ApiDeprecationError(getattr(protocol, "api_level"))
else:
bundled_data = getattr(protocol, "bundled_data", {})
bundled_data.update(extra_data)
gpa_extras = getattr(protocol, "extra_labware", None) or None
context = get_protocol_api(
getattr(protocol, "api_level", MAX_SUPPORTED_VERSION),
bundled_labware=getattr(protocol, "bundled_labware", None),
bundled_data=bundled_data,
extra_labware=gpa_extras,
)
if emit_runlog:
broker = context.broker
broker.subscribe(command_types.COMMAND, emit_runlog)
context.home()
try:
execute_apiv2.run_protocol(protocol, context)
finally:
context.cleanup()
def make_runlog_cb():
level = 0
last_dollar = None
def _print_runlog(command: Dict[str, Any]):
nonlocal level
nonlocal last_dollar
if last_dollar == command["$"]:
if command["$"] == "before":
level += 1
else:
level -= 1
last_dollar = command["$"]
if command["$"] == "before":
print(
" ".join(
[
"\t" * level,
command["payload"].get("text", "").format(**command["payload"]),
]
)
)
return _print_runlog
def main() -> int:
"""Handler for command line invocation to run a protocol.
:param argv: The arguments the program was invoked with; this is usually
:py:obj:`sys.argv` but if you want to override that you can.
:returns int: A success or failure value suitable for use as a shell
return code passed to :py:obj:`sys.exit` (0 means success,
anything else is a kind of failure).
"""
parser = argparse.ArgumentParser(
prog="opentrons_execute", description="Run an OT-2 protocol"
)
parser = get_arguments(parser)
# don't want to add this in get_arguments because if somebody upstream is
# using that parser they probably want their own version
parser.add_argument("-v", "--version", action="version", version=__version__)
parser.add_argument(
"-n",
"--no-print-runlog",
action="store_true",
help="Do not print the commands as they are executed",
)
args = parser.parse_args()
printer = None if args.no_print_runlog else make_runlog_cb()
if args.log_level != "none":
stack_logger = logging.getLogger("opentrons")
stack_logger.addHandler(logging.StreamHandler(sys.stdout))
log_level = args.log_level
else:
log_level = "warning"
# Try to migrate containers from database to v2 format
execute(args.protocol, args.protocol.name, log_level=log_level, emit_runlog=printer)
return 0
@atexit.register
def _clear_cached_hardware_controller():
global _THREAD_MANAGED_HW
if _THREAD_MANAGED_HW:
_THREAD_MANAGED_HW.clean_up()
_THREAD_MANAGED_HW = None
if __name__ == "__main__":
sys.exit(main())
|
py | 7dfdd3a654cb41c99919dfbabafcf029f7cd16f5 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import types
import time
import random
import logging
from collections import Counter, OrderedDict
from copy import deepcopy
from esrally import exceptions, track
# Mapping from operation type to specific runner
__RUNNERS = {}
def register_default_runners():
register_runner(track.OperationType.Bulk.name, BulkIndex())
register_runner(track.OperationType.ForceMerge.name, ForceMerge())
register_runner(track.OperationType.IndicesStats.name, IndicesStats())
register_runner(track.OperationType.NodesStats.name, NodeStats())
register_runner(track.OperationType.Search.name, Query())
register_runner(track.OperationType.RawRequest.name, RawRequest())
# We treat the following as administrative commands and thus already start to wrap them in a retry.
register_runner(track.OperationType.ClusterHealth.name, Retry(ClusterHealth()))
register_runner(track.OperationType.PutPipeline.name, Retry(PutPipeline()))
register_runner(track.OperationType.Refresh.name, Retry(Refresh()))
register_runner(track.OperationType.CreateIndex.name, Retry(CreateIndex()))
register_runner(track.OperationType.DeleteIndex.name, Retry(DeleteIndex()))
register_runner(track.OperationType.CreateIndexTemplate.name, Retry(CreateIndexTemplate()))
register_runner(track.OperationType.DeleteIndexTemplate.name, Retry(DeleteIndexTemplate()))
register_runner(track.OperationType.ShrinkIndex.name, Retry(ShrinkIndex()))
register_runner(track.OperationType.CreateMlDatafeed.name, Retry(CreateMlDatafeed()))
register_runner(track.OperationType.DeleteMlDatafeed.name, Retry(DeleteMlDatafeed()))
register_runner(track.OperationType.StartMlDatafeed.name, Retry(StartMlDatafeed()))
register_runner(track.OperationType.StopMlDatafeed.name, Retry(StopMlDatafeed()))
register_runner(track.OperationType.CreateMlJob.name, Retry(CreateMlJob()))
register_runner(track.OperationType.DeleteMlJob.name, Retry(DeleteMlJob()))
register_runner(track.OperationType.OpenMlJob.name, Retry(OpenMlJob()))
register_runner(track.OperationType.CloseMlJob.name, Retry(CloseMlJob()))
def runner_for(operation_type):
try:
return __RUNNERS[operation_type]
except KeyError:
raise exceptions.RallyError("No runner available for operation type [%s]" % operation_type)
def register_runner(operation_type, runner):
logger = logging.getLogger(__name__)
if getattr(runner, "multi_cluster", False) == True:
if "__enter__" in dir(runner) and "__exit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
__RUNNERS[operation_type] = MultiClusterDelegatingRunner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
__RUNNERS[operation_type] = MultiClusterDelegatingRunner(runner, str(runner))
# we'd rather use callable() but this will erroneously also classify a class as callable...
elif isinstance(runner, types.FunctionType):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner function [%s] for [%s].", str(runner), str(operation_type))
__RUNNERS[operation_type] = SingleClusterDelegatingRunner(runner, runner.__name__)
elif "__enter__" in dir(runner) and "__exit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
__RUNNERS[operation_type] = SingleClusterDelegatingRunner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
__RUNNERS[operation_type] = SingleClusterDelegatingRunner(runner, str(runner))
# Only intended for unit-testing!
def remove_runner(operation_type):
del __RUNNERS[operation_type]
class Runner:
"""
Base class for all operations against Elasticsearch.
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def __enter__(self):
return self
def __call__(self, *args):
"""
Runs the actual method that should be benchmarked.
:param args: All arguments that are needed to call this method.
:return: A pair of (int, String). The first component indicates the "weight" of this call. it is typically 1 but for bulk operations
it should be the actual bulk size. The second component is the "unit" of weight which should be "ops" (short for
"operations") by default. If applicable, the unit should always be in plural form. It is used in metrics records
for throughput and reports. A value will then be shown as e.g. "111 ops/s".
"""
raise NotImplementedError("abstract operation")
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class SingleClusterDelegatingRunner(Runner):
def __init__(self, runnable, name, context_manager_enabled=False):
super().__init__()
self.runnable = runnable
self.name = name
self.context_manager_enabled = context_manager_enabled
def __call__(self, *args):
# Single cluster mode: es parameter passed in runner is a client object for the "default" cluster
es = args[0]
return self.runnable(es['default'], *args[1:])
def __repr__(self, *args, **kwargs):
if self.context_manager_enabled:
return "user-defined context-manager enabled runner for [%s]" % self.name
else:
return "user-defined runner for [%s]" % self.name
def __enter__(self):
if self.context_manager_enabled:
self.runnable.__enter__()
return self
else:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.context_manager_enabled:
return self.runnable.__exit__(exc_type, exc_val, exc_tb)
else:
return False
class MultiClusterDelegatingRunner(Runner):
def __init__(self, runnable, name, context_manager_enabled=False):
super().__init__()
self.runnable = runnable
self.name = name
self.context_manager_enabled = context_manager_enabled
def __call__(self, *args):
# Multi cluster mode: pass the entire es dict and let runner code handle connections to different clusters
return self.runnable(*args)
def __repr__(self, *args, **kwargs):
if self.context_manager_enabled:
return "user-defined multi-cluster context-manager enabled runner for [%s]" % self.name
else:
return "user-defined multi-cluster enabled runner for [%s]" % self.name
def __enter__(self):
if self.context_manager_enabled:
self.runnable.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.context_manager_enabled:
return self.runnable.__exit__(exc_type, exc_val, exc_tb)
else:
return False
def mandatory(params, key, op):
try:
return params[key]
except KeyError:
raise exceptions.DataError("Parameter source for operation '%s' did not provide the mandatory parameter '%s'. Please add it to your"
" parameter source." % (str(op), key))
class BulkIndex(Runner):
"""
Bulk indexes the given documents.
"""
def __init__(self):
super().__init__()
def __call__(self, es, params):
"""
Runs one bulk indexing operation.
:param es: The Elasticsearch client.
:param params: A hash with all parameters. See below for details.
:return: A hash with meta data for this bulk operation. See below for details.
It expects a parameter dict with the following mandatory keys:
* ``body``: containing all documents for the current bulk request.
* ``bulk-size``: the number of documents in this bulk.
* ``action_metadata_present``: if ``True``, assume that an action and metadata line is present (meaning only half of the lines
contain actual documents to index)
* ``index``: The name of the affected index in case ``action_metadata_present`` is ``False``.
* ``type``: The name of the affected type in case ``action_metadata_present`` is ``False``.
The following keys are optional:
* ``pipeline``: If present, runs the the specified ingest pipeline for this bulk.
* ``detailed-results``: If ``True``, the runner will analyze the response and add detailed meta-data. Defaults to ``False``. Note
that this has a very significant impact on performance and will very likely cause a bottleneck in the benchmark driver so please
be very cautious enabling this feature. Our own measurements have shown a median overhead of several thousand times (execution time
is in the single digit microsecond range when this feature is disabled and in the single digit millisecond range when this feature
is enabled; numbers based on a bulk size of 500 elements and no errors). For details please refer to the respective benchmarks
in ``benchmarks/driver``.
Returned meta data
`
The following meta data are always returned:
* ``index``: name of the affected index. May be `None` if it could not be derived.
* ``bulk-size``: bulk size, e.g. 5.000.
* ``bulk-request-size-bytes``: size of the full bulk requset in bytes
* ``total-document-size-bytes``: size of all documents contained in the bulk request in bytes
* ``weight``: operation-agnostic representation of the bulk size (used internally by Rally for throughput calculation).
* ``unit``: The unit in which to interpret ``bulk-size`` and ``weight``. Always "docs".
* ``success``: A boolean indicating whether the bulk request has succeeded.
* ``success-count``: Number of successfully processed items for this request (denoted in ``unit``).
* ``error-count``: Number of failed items for this request (denoted in ``unit``).
* ``took``` Value of the the ``took`` property in the bulk response.
If ``detailed-results`` is ``True`` the following meta data are returned in addition:
* ``ops``: A hash with the operation name as key (e.g. index, update, delete) and various counts as values. ``item-count`` contains
the total number of items for this key. Additionally, we return a separate counter each result (indicating e.g. the number of created
items, the number of deleted items etc.).
* ``shards_histogram``: An array of hashes where each hash has two keys: ``item-count`` contains the number of items to which a shard
distribution applies and ``shards`` contains another hash with the actual distribution of ``total``, ``successful`` and ``failed``
shards (see examples below).
* ``bulk-request-size-bytes``: Total size of the bulk request body in bytes.
* ``total-document-size-bytes``: Total size of all documents within the bulk request body in bytes.
Here are a few examples:
If ``detailed-results`` is ``False`` a typical return value is::
{
"index": "my_index",
"weight": 5000,
"unit": "docs",
"bulk-size": 5000,
"success": True,
"success-count": 5000,
"error-count": 0,
"took": 20
}
Whereas the response will look as follow if there are bulk errors::
{
"index": "my_index",
"weight": 5000,
"unit": "docs",
"bulk-size": 5000,
"success": False,
"success-count": 4000,
"error-count": 1000,
"took": 20
}
If ``detailed-results`` is ``True`` a typical return value is::
{
"index": "my_index",
"weight": 5000,
"unit": "docs",
"bulk-size": 5000,
"bulk-request-size-bytes": 2250000,
"total-document-size-bytes": 2000000,
"success": True,
"success-count": 5000,
"error-count": 0,
"took": 20,
"ops": {
"index": {
"item-count": 5000,
"created": 5000
}
},
"shards_histogram": [
{
"item-count": 5000,
"shards": {
"total": 2,
"successful": 2,
"failed": 0
}
}
]
}
An example error response may look like this::
{
"index": "my_index",
"weight": 5000,
"unit": "docs",
"bulk-size": 5000,
"bulk-request-size-bytes": 2250000,
"total-document-size-bytes": 2000000,
"success": False,
"success-count": 4000,
"error-count": 1000,
"took": 20,
"ops": {
"index": {
"item-count": 5000,
"created": 4000,
"noop": 1000
}
},
"shards_histogram": [
{
"item-count": 4000,
"shards": {
"total": 2,
"successful": 2,
"failed": 0
}
},
{
"item-count": 500,
"shards": {
"total": 2,
"successful": 1,
"failed": 1
}
},
{
"item-count": 500,
"shards": {
"total": 2,
"successful": 0,
"failed": 2
}
}
]
}
"""
detailed_results = params.get("detailed-results", False)
index = params.get("index")
bulk_params = {}
if "pipeline" in params:
bulk_params["pipeline"] = params["pipeline"]
with_action_metadata = mandatory(params, "action-metadata-present", self)
bulk_size = mandatory(params, "bulk-size", self)
if with_action_metadata:
# only half of the lines are documents
response = es.bulk(body=params["body"], params=bulk_params)
else:
response = es.bulk(body=params["body"], index=index, doc_type=params["type"], params=bulk_params)
stats = self.detailed_stats(params, bulk_size, response) if detailed_results else self.simple_stats(bulk_size, response)
meta_data = {
"index": str(index) if index else None,
"weight": bulk_size,
"unit": "docs",
"bulk-size": bulk_size
}
meta_data.update(stats)
if not stats["success"]:
meta_data["error-type"] = "bulk"
return meta_data
def detailed_stats(self, params, bulk_size, response):
ops = {}
shards_histogram = OrderedDict()
bulk_error_count = 0
error_details = set()
bulk_request_size_bytes = 0
total_document_size_bytes = 0
with_action_metadata = mandatory(params, "action-metadata-present", self)
for line_number, data in enumerate(params["body"]):
line_size = len(data.encode('utf-8'))
if with_action_metadata:
if line_number % 2 == 1:
total_document_size_bytes += line_size
else:
total_document_size_bytes += line_size
bulk_request_size_bytes += line_size
for idx, item in enumerate(response["items"]):
# there is only one (top-level) item
op, data = next(iter(item.items()))
if op not in ops:
ops[op] = Counter()
ops[op]["item-count"] += 1
if "result" in data:
ops[op][data["result"]] += 1
if "_shards" in data:
s = data["_shards"]
sk = "%d-%d-%d" % (s["total"], s["successful"], s["failed"])
if sk not in shards_histogram:
shards_histogram[sk] = {
"item-count": 0,
"shards": s
}
shards_histogram[sk]["item-count"] += 1
if data["status"] > 299 or ("_shards" in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
stats = {
"took": response.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_size - bulk_error_count,
"error-count": bulk_error_count,
"ops": ops,
"shards_histogram": list(shards_histogram.values()),
"bulk-request-size-bytes": bulk_request_size_bytes,
"total-document-size-bytes": total_document_size_bytes
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
return stats
def simple_stats(self, bulk_size, response):
bulk_error_count = 0
error_details = set()
if response["errors"]:
for idx, item in enumerate(response["items"]):
data = next(iter(item.values()))
if data["status"] > 299 or ('_shards' in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
stats = {
"took": response.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_size - bulk_error_count,
"error-count": bulk_error_count
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
return stats
def extract_error_details(self, error_details, data):
error_data = data.get("error", {})
error_reason = error_data.get("reason") if isinstance(error_data, dict) else str(error_data)
if error_data:
error_details.add((data["status"], error_reason))
else:
error_details.add((data["status"], None))
def error_description(self, error_details):
error_description = ""
for status, reason in error_details:
if reason:
error_description += "HTTP status: %s, message: %s" % (str(status), reason)
else:
error_description += "HTTP status: %s" % str(status)
return error_description
def __repr__(self, *args, **kwargs):
return "bulk-index"
class ForceMerge(Runner):
"""
Runs a force merge operation against Elasticsearch.
"""
def __call__(self, es, params):
import elasticsearch
try:
if "max-num-segments" in params:
max_num_segments = params["max-num-segments"]
elif "max_num_segments" in params:
self.logger.warning("Your parameter source uses the deprecated name [max_num_segments]. "
"Please change it to [max-num-segments].")
max_num_segments = params["max_num_segments"]
else:
max_num_segments = None
if max_num_segments:
es.indices.forcemerge(index="_all", max_num_segments=max_num_segments)
else:
es.indices.forcemerge(index="_all")
except elasticsearch.TransportError as e:
# this is caused by older versions of Elasticsearch (< 2.1), fall back to optimize
if e.status_code == 400:
if max_num_segments:
es.transport.perform_request("POST", "/_optimize?max_num_segments={}".format(max_num_segments))
else:
es.transport.perform_request("POST", "/_optimize")
else:
raise e
def __repr__(self, *args, **kwargs):
return "force-merge"
class IndicesStats(Runner):
"""
Gather index stats for all indices.
"""
def __call__(self, es, params):
es.indices.stats(metric="_all")
def __repr__(self, *args, **kwargs):
return "indices-stats"
class NodeStats(Runner):
"""
Gather node stats for all nodes.
"""
def __call__(self, es, params):
es.nodes.stats(metric="_all")
def __repr__(self, *args, **kwargs):
return "node-stats"
class Query(Runner):
"""
Runs a request body search against Elasticsearch.
It expects at least the following keys in the `params` hash:
* `index`: The index or indices against which to issue the query.
* `type`: See `index`
* `cache`: True iff the request cache should be used.
* `body`: Query body
If the following parameters are present in addition, a scroll query will be issued:
* `pages`: Number of pages to retrieve at most for this scroll. If a scroll query does yield less results than the specified number of
pages we will terminate earlier.
* `results-per-page`: Number of results to retrieve per page.
Returned meta data
The following meta data are always returned:
* ``weight``: operation-agnostic representation of the "weight" of an operation (used internally by Rally for throughput calculation).
Always 1 for normal queries and the number of retrieved pages for scroll queries.
* ``unit``: The unit in which to interpret ``weight``. Always "ops".
* ``hits``: Total number of hits for this operation.
* ``hits_relation``: whether ``hits`` is accurate (``eq``) or a lower bound of the actual hit count (``gte``).
* ``timed_out``: Whether the search has timed out. For scroll queries, this flag is ``True`` if the flag was ``True`` for any of the
queries issued.
For scroll queries we also return:
* ``pages``: Total number of pages that have been retrieved.
"""
def __init__(self):
super().__init__()
self.scroll_id = None
self.es = None
def __call__(self, es, params):
if "pages" in params and "results-per-page" in params:
return self.scroll_query(es, params)
else:
return self.request_body_query(es, params)
def request_body_query(self, es, params):
request_params = params.get("request-params", {})
if "cache" in params:
request_params["request_cache"] = params["cache"]
r = es.search(
index=params.get("index", "_all"),
doc_type=params.get("type"),
body=mandatory(params, "body", self),
**request_params)
hits = r["hits"]["total"]
if isinstance(hits, dict):
hits_total = hits["value"]
hits_relation = hits["relation"]
else:
hits_total = hits
hits_relation = "eq"
return {
"weight": 1,
"unit": "ops",
"hits": hits_total,
"hits_relation": hits_relation,
"timed_out": r["timed_out"],
"took": r["took"]
}
def scroll_query(self, es, params):
request_params = params.get("request-params", {})
cache = params.get("cache")
hits = 0
retrieved_pages = 0
timed_out = False
took = 0
self.es = es
# explicitly convert to int to provoke an error otherwise
total_pages = sys.maxsize if params["pages"] == "all" else int(params["pages"])
size = params.get("results-per-page")
for page in range(total_pages):
if page == 0:
r = es.search(
index=params.get("index", "_all"),
doc_type=params.get("type"),
body=mandatory(params, "body", self),
sort="_doc",
scroll="10s",
size=size,
request_cache=cache,
**request_params
)
# This should only happen if we concurrently create an index and start searching
self.scroll_id = r.get("_scroll_id", None)
else:
# This does only work for ES 2.x and above
# r = es.scroll(body={"scroll_id": self.scroll_id, "scroll": "10s"})
# This is the most compatible version to perform a scroll across all supported versions of Elasticsearch
# (1.x does not support a proper JSON body in search scroll requests).
r = self.es.transport.perform_request("GET", "/_search/scroll", params={"scroll_id": self.scroll_id, "scroll": "10s"})
hit_count = len(r["hits"]["hits"])
timed_out = timed_out or r["timed_out"]
took += r["took"]
hits += hit_count
retrieved_pages += 1
if hit_count == 0:
# We're done prematurely. Even if we are on page index zero, we still made one call.
break
return {
"weight": retrieved_pages,
"pages": retrieved_pages,
"hits": hits,
# as Rally determines the number of hits in a scroll, the result is always accurate.
"hits_relation": "eq",
"unit": "pages",
"timed_out": timed_out,
"took": took
}
def __exit__(self, exc_type, exc_val, exc_tb):
if self.scroll_id and self.es:
try:
# This does only work for ES 2.x and above
# self.es.clear_scroll(body={"scroll_id": [self.scroll_id]})
# This is the most compatible version to clear one scroll id across all supported versions of Elasticsearch
# (1.x does not support a proper JSON body in clear scroll requests).
self.es.transport.perform_request("DELETE", "/_search/scroll/%s" % self.scroll_id)
except BaseException:
self.logger.exception("Could not clear scroll [%s]. This will lead to excessive resource usage in Elasticsearch and will "
"skew your benchmark results.", self.scroll_id)
self.scroll_id = None
self.es = None
return False
def __repr__(self, *args, **kwargs):
return "query"
class ClusterHealth(Runner):
"""
Get cluster health
"""
def __call__(self, es, params):
from enum import Enum
from functools import total_ordering
from elasticsearch.client import _make_path
@total_ordering
class ClusterHealthStatus(Enum):
UNKNOWN = 0
RED = 1
YELLOW = 2
GREEN = 3
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def status(v):
try:
return ClusterHealthStatus[v.upper()]
except (KeyError, AttributeError):
return ClusterHealthStatus.UNKNOWN
index = params.get("index")
request_params = params.get("request-params", {})
# by default, Elasticsearch will not wait and thus we treat this as success
expected_cluster_status = request_params.get("wait_for_status", str(ClusterHealthStatus.UNKNOWN))
# newer ES versions >= 5.0
if "wait_for_no_relocating_shards" in request_params:
expected_relocating_shards = 0
else:
# older ES versions
# either the user has defined something or we're good with any count of relocating shards.
expected_relocating_shards = int(request_params.get("wait_for_relocating_shards", sys.maxsize))
# This would not work if the request parameter is not a proper method parameter for the ES client...
# result = es.cluster.health(**request_params)
result = es.transport.perform_request("GET", _make_path("_cluster", "health", index), params=request_params)
cluster_status = result["status"]
relocating_shards = result["relocating_shards"]
return {
"weight": 1,
"unit": "ops",
"success": status(cluster_status) >= status(expected_cluster_status) and relocating_shards <= expected_relocating_shards,
"cluster-status": cluster_status,
"relocating-shards": relocating_shards
}
def __repr__(self, *args, **kwargs):
return "cluster-health"
class PutPipeline(Runner):
"""
Execute the `put pipeline API <https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html>`_. Note that this
API is only available from Elasticsearch 5.0 onwards.
"""
def __call__(self, es, params):
es.ingest.put_pipeline(id=mandatory(params, "id", self),
body=mandatory(params, "body", self),
master_timeout=params.get("master-timeout"),
timeout=params.get("timeout"),
)
def __repr__(self, *args, **kwargs):
return "put-pipeline"
class Refresh(Runner):
"""
Execute the `refresh API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_.
"""
def __call__(self, es, params):
es.indices.refresh(index=params.get("index", "_all"))
def __repr__(self, *args, **kwargs):
return "refresh"
class CreateIndex(Runner):
"""
Execute the `create index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html>`_.
"""
def __call__(self, es, params):
indices = mandatory(params, "indices", self)
request_params = params.get("request-params", {})
for index, body in indices:
es.indices.create(index=index, body=body, **request_params)
return len(indices), "ops"
def __repr__(self, *args, **kwargs):
return "create-index"
class DeleteIndex(Runner):
"""
Execute the `delete index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_.
"""
def __call__(self, es, params):
ops = 0
indices = mandatory(params, "indices", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
for index_name in indices:
if not only_if_exists:
es.indices.delete(index=index_name, **request_params)
ops += 1
elif only_if_exists and es.indices.exists(index=index_name):
self.logger.info("Index [%s] already exists. Deleting it.", index_name)
es.indices.delete(index=index_name, **request_params)
ops += 1
return ops, "ops"
def __repr__(self, *args, **kwargs):
return "delete-index"
class CreateIndexTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_.
"""
def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = params.get("request-params", {})
for template, body in templates:
es.indices.put_template(name=template,
body=body,
**request_params)
return len(templates), "ops"
def __repr__(self, *args, **kwargs):
return "create-index-template"
class DeleteIndexTemplate(Runner):
"""
Execute the `delete index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#delete>`_.
"""
def __call__(self, es, params):
template_names = mandatory(params, "templates", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
ops_count = 0
for template_name, delete_matching_indices, index_pattern in template_names:
if not only_if_exists:
es.indices.delete_template(name=template_name, **request_params)
ops_count += 1
elif only_if_exists and es.indices.exists_template(template_name):
self.logger.info("Index template [%s] already exists. Deleting it.", template_name)
es.indices.delete_template(name=template_name, **request_params)
ops_count += 1
# ensure that we do not provide an empty index pattern by accident
if delete_matching_indices and index_pattern:
es.indices.delete(index=index_pattern)
ops_count += 1
return ops_count, "ops"
def __repr__(self, *args, **kwargs):
return "delete-index-template"
class ShrinkIndex(Runner):
"""
Execute the `shrink index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_.
This is a high-level runner that actually executes multiple low-level operations under the hood.
"""
def __init__(self):
super().__init__()
self.cluster_health = Retry(ClusterHealth())
def _wait_for(self, es, idx, description):
# wait a little bit before the first check
time.sleep(3)
result = self.cluster_health(es, params={
"index": idx,
"retries": sys.maxsize,
"request-params": {
"wait_for_no_relocating_shards": "true"
}
})
if not result["success"]:
raise exceptions.RallyAssertionError("Failed to wait for [{}].".format(description))
def __call__(self, es, params):
source_index = mandatory(params, "source-index", self)
target_index = mandatory(params, "target-index", self)
# we need to inject additional settings so we better copy the body
target_body = deepcopy(mandatory(params, "target-body", self))
shrink_node = params.get("shrink-node")
# Choose a random data node if none is specified
if not shrink_node:
node_names = []
# choose a random data node
for node in es.nodes.info()["nodes"].values():
if "data" in node["roles"]:
node_names.append(node["name"])
if not node_names:
raise exceptions.RallyAssertionError("Could not choose a suitable shrink-node automatically. Please specify it explicitly.")
shrink_node = random.choice(node_names)
self.logger.info("Using [%s] as shrink node.", shrink_node)
self.logger.info("Preparing [%s] for shrinking.", source_index)
# prepare index for shrinking
es.indices.put_settings(index=source_index,
body={
"settings": {
"index.routing.allocation.require._name": shrink_node,
"index.blocks.write": "true"
}
},
preserve_existing=True)
self.logger.info("Waiting for relocation to finish for index [%s]...", source_index)
self._wait_for(es, source_index, "shard relocation for index [{}]".format(source_index))
self.logger.info("Shrinking [%s] to [%s].", source_index, target_index)
if "settings" not in target_body:
target_body["settings"] = {}
target_body["settings"]["index.routing.allocation.require._name"] = None
target_body["settings"]["index.blocks.write"] = None
# kick off the shrink operation
es.indices.shrink(index=source_index, target=target_index, body=target_body)
self.logger.info("Waiting for shrink to finish for index [%s]...", source_index)
self._wait_for(es, target_index, "shrink for index [{}]".format(target_index))
self.logger.info("Shrinking [%s] to [%s] has finished.", source_index, target_index)
# ops_count is not really important for this operation...
return 1, "ops"
def __repr__(self, *args, **kwargs):
return "shrink-index"
class CreateMlDatafeed(Runner):
"""
Execute the `create datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_.
"""
def __call__(self, es, params):
datafeed_id = mandatory(params, "datafeed-id", self)
body = mandatory(params, "body", self)
es.xpack.ml.put_datafeed(datafeed_id=datafeed_id, body=body)
def __repr__(self, *args, **kwargs):
return "create-ml-datafeed"
class DeleteMlDatafeed(Runner):
"""
Execute the `delete datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_.
"""
def __call__(self, es, params):
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
# we don't want to fail if a datafeed does not exist, thus we ignore 404s.
es.xpack.ml.delete_datafeed(datafeed_id=datafeed_id, force=force, ignore=[404])
def __repr__(self, *args, **kwargs):
return "delete-ml-datafeed"
class StartMlDatafeed(Runner):
"""
Execute the `start datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_.
"""
def __call__(self, es, params):
datafeed_id = mandatory(params, "datafeed-id", self)
body = params.get("body")
start = params.get("start")
end = params.get("end")
timeout = params.get("timeout")
es.xpack.ml.start_datafeed(datafeed_id=datafeed_id, body=body, start=start, end=end, timeout=timeout)
def __repr__(self, *args, **kwargs):
return "start-ml-datafeed"
class StopMlDatafeed(Runner):
"""
Execute the `stop datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_.
"""
def __call__(self, es, params):
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
es.xpack.ml.stop_datafeed(datafeed_id=datafeed_id, force=force, timeout=timeout)
def __repr__(self, *args, **kwargs):
return "stop-ml-datafeed"
class CreateMlJob(Runner):
"""
Execute the `create job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_.
"""
def __call__(self, es, params):
job_id = mandatory(params, "job-id", self)
body = mandatory(params, "body", self)
es.xpack.ml.put_job(job_id=job_id, body=body)
def __repr__(self, *args, **kwargs):
return "create-ml-job"
class DeleteMlJob(Runner):
"""
Execute the `delete job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_.
"""
def __call__(self, es, params):
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
# we don't want to fail if a job does not exist, thus we ignore 404s.
es.xpack.ml.delete_job(job_id=job_id, force=force, ignore=[404])
def __repr__(self, *args, **kwargs):
return "delete-ml-job"
class OpenMlJob(Runner):
"""
Execute the `open job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_.
"""
def __call__(self, es, params):
job_id = mandatory(params, "job-id", self)
es.xpack.ml.open_job(job_id=job_id)
def __repr__(self, *args, **kwargs):
return "open-ml-job"
class CloseMlJob(Runner):
"""
Execute the `close job API <http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_.
"""
def __call__(self, es, params):
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
es.xpack.ml.close_job(job_id=job_id, force=force, timeout=timeout)
def __repr__(self, *args, **kwargs):
return "close-ml-job"
class RawRequest(Runner):
def __call__(self, es, params):
request_params = {}
if "ignore" in params:
request_params["ignore"] = params["ignore"]
request_params.update(params.get("request-params", {}))
es.transport.perform_request(method=params.get("method", "GET"),
url=mandatory(params, "path", self),
headers=params.get("headers"),
body=params.get("body"),
params=request_params)
def __repr__(self, *args, **kwargs):
return "raw-request"
# TODO: Allow to use this from (selected) regular runners and add user documentation.
# TODO: It would maybe be interesting to add meta-data on how many retries there were.
class Retry(Runner):
"""
This runner can be used as a wrapper around regular runners to retry operations.
It defines the following parameters:
* ``retries`` (optional, default 0): The number of times the operation is retried.
* ``retry-wait-period`` (optional, default 0.5): The time in seconds to wait after an error.
* ``retry-on-timeout`` (optional, default True): Whether to retry on connection timeout.
* ``retry-on-error`` (optional, default False): Whether to retry on failure (i.e. the delegate returns ``success == False``)
"""
def __init__(self, delegate):
super().__init__()
self.delegate = delegate
def __enter__(self):
self.delegate.__enter__()
return self
def __call__(self, es, params):
import elasticsearch
import socket
max_attempts = params.get("retries", 0) + 1
sleep_time = params.get("retry-wait-period", 0.5)
retry_on_timeout = params.get("retry-on-timeout", True)
retry_on_error = params.get("retry-on-error", False)
for attempt in range(max_attempts):
last_attempt = attempt + 1 == max_attempts
try:
return_value = self.delegate(es, params)
if last_attempt or not retry_on_error:
return return_value
# we can determine success if and only if the runner returns a dict. Otherwise, we have to assume it was fine.
elif isinstance(return_value, dict):
if return_value.get("success", True):
return return_value
else:
time.sleep(sleep_time)
else:
return return_value
except (socket.timeout, elasticsearch.exceptions.ConnectionError):
if last_attempt or not retry_on_timeout:
raise
else:
time.sleep(sleep_time)
except elasticsearch.exceptions.TransportError as e:
if last_attempt or not retry_on_timeout:
raise e
elif e.status_code == 408:
self.logger.debug("%s has timed out.", repr(self.delegate))
time.sleep(sleep_time)
else:
raise e
def __exit__(self, exc_type, exc_val, exc_tb):
return self.delegate.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self, *args, **kwargs):
return "retryable %s" % repr(self.delegate)
|
py | 7dfdd445eb4e9880ced383ecea6f574ea78a1043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayMobileCodeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._qr_token = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def qr_token(self):
return self._qr_token
@qr_token.setter
def qr_token(self, value):
self._qr_token = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.mobile.code.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.qr_token:
if hasattr(self.qr_token, 'to_alipay_dict'):
params['qr_token'] = json.dumps(obj=self.qr_token.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['qr_token'] = self.qr_token
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 7dfdd46569d5c984d54b270c937db229a0531954 | from flask import current_app
from flask_wtf import FlaskForm
from wtforms import (SubmitField, TextAreaField, SelectField, FieldList, FormField,
StringField, DecimalField, IntegerField, HiddenField, BooleanField,
FloatField)
from wtforms.validators import (DataRequired, Length, InputRequired, ValidationError,
Optional)
from wtforms.widgets import html5
import os, glob
import concurrent
from . import utils
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
''' Make the file handler to deal with logging to file '''
file_handler = logging.FileHandler('logs/imaging_forms.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler() # level already set at debug from logger.setLevel() above
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
""" For the individual imaging sample entry form """
class ChannelForm(FlaskForm):
""" A form that is used in ImagingForm() via a FormField Fieldlist
so I dont have to write the imaging parameters out for each channel
"""
username = HiddenField('Username')
request_name = HiddenField('Request name')
sample_name = HiddenField('Sample name')
imaging_request_number = HiddenField('imaging request number')
channel_name = HiddenField('Channel name')
image_resolution = HiddenField('Image resolution')
zoom_body_magnification = DecimalField('Zoom body magnification',default=1.0,validators=[Optional()])
image_orientation = SelectField('Image orientation',choices=[('sagittal','sagittal'),('coronal','coronal'),
('horizontal','horizontal')],default='horizontal',
validators=[DataRequired()])
ventral_up = BooleanField('Imaged ventral side up?',validators=[Optional()],
default=False)
left_lightsheet_used = BooleanField('Left',default=False)
right_lightsheet_used = BooleanField('Right',default=False)
tiling_scheme = StringField('Tiling scheme (e.g. 3x3) -- n_rows x n_columns --',validators=[DataRequired()])
tiling_overlap = StringField('Tiling overlap (number between 0.0 and 1.0; leave as default if unsure or not using tiling)',
validators=[DataRequired()])
z_step = StringField('Z resolution (microns)')
number_of_z_planes = IntegerField('Number of z planes',
widget=html5.NumberInput())
rawdata_subfolder = TextAreaField('channel subfolder')
delete_channel_button = SubmitField("Delete channel")
add_flipped_channel_button = SubmitField("Add ventral up channel")
def validate_right_lightsheet_used(self,right_lightsheet_used):
if not (self.left_lightsheet_used.data or self.right_lightsheet_used.data):
raise ValidationError(" At least one light sheet required.")
def validate_tiling_overlap(self,tiling_overlap):
try:
fl_val = float(tiling_overlap.data)
except:
raise ValidationError("Tiling overlap must be a number between 0.0 and 1.0")
if fl_val < 0.0 or fl_val >= 1.0:
raise ValidationError("Tiling overlap must be a number between 0.0 and 1.0")
def validate_tiling_scheme(self,tiling_scheme):
try:
n_rows = int(tiling_scheme.data.lower().split('x')[0])
n_columns = int(tiling_scheme.data.lower().split('x')[1])
except:
raise ValidationError("Tiling scheme is not in correct format."
" Make sure it is like: 1x1 with no spaces.")
if self.image_resolution.data in ['1.1x','1.3x']:
if n_rows > 2 or n_columns > 2:
raise ValidationError("Tiling scheme must not exceed 2x2 for this resolution")
elif self.image_resolution.data in ['2x','4x']:
if n_rows > 4 or n_columns > 4:
raise ValidationError("Tiling scheme must not exceed 4x4 for this resolution")
elif (self.image_resolution.data == '3.6x' or self.image_resolution.data == '15x') and (n_rows > 10 or n_columns > 10):
raise ValidationError("Tiling scheme must not exceed 10x10 for this resolution")
def validate_z_step(self,z_step):
if not z_step.data:
raise ValidationError("z_step required")
try:
z_step = float(z_step.data)
except:
raise ValidationError("z_step must be a number")
if z_step < 2:
raise ValidationError("z_step must be a positive number larger than 2 microns")
elif z_step > 1000:
raise ValidationError("z_step greater than 1000 microns is not supported by the microscope.")
def validate_number_of_z_planes(self,number_of_z_planes):
if not number_of_z_planes.data:
raise ValidationError("number_of_z_planes required")
try:
number_of_z_planes = float(number_of_z_planes.data)
except:
raise ValidationError("number of z_planes must be a number")
if number_of_z_planes <= 0:
raise ValidationError("The number of z planes must be a positive number")
elif number_of_z_planes > 5500:
raise ValidationError("More than 5500 z planes is not supported by the microscope.")
def validate_rawdata_subfolder(self,rawdata_subfolder):
if not rawdata_subfolder.data:
raise ValidationError("Rawdata subfolder required")
rawdata_subfolder = rawdata_subfolder.data.rstrip("/").strip()
# Check to make sure no spaces contained in rawdata_subfolder
if " " in rawdata_subfolder:
raise ValidationError("Rawdata subfolder must not contain spaces")
class ImageResolutionForm(FlaskForm):
""" A form for each image resolution that a user picks """
max_number_of_channels = 8 # 4 channels and each of them can have a "flipped" copy
image_resolution = HiddenField('image resolution')
notes_for_clearer = TextAreaField('Notes left for clearer:')
notes_for_imager = TextAreaField('Notes left for imager:')
change_resolution = BooleanField("Change image resolution?",default=False)
new_image_resolution = SelectField('Select the new image resolution you want to use:',
choices=[("1.3x","1.3x"),("1.1x","1.1x"),
("2x","2x"),("4x","4x"),
("3.6x","3.6x"),("15x","15x")],validators=[Optional()])
update_resolution_button = SubmitField('Update')
new_channel_dropdown = SelectField("Add additional channel?",validators=[Optional()])
new_channel_purpose = SelectField("What type of imaging?",choices=[('registration','registration'),
('injection_detection','injection_detection'),('probe_detection','probe_detection'),
('cell_detection','cell_detection'),
('generic_imaging','generic_imaging')],validators=[Optional()])
new_channel_button = SubmitField("Add channel")
channel_forms = FieldList(FormField(ChannelForm),min_entries=0,max_entries=max_number_of_channels)
def validate_channel_forms(self,channel_forms):
subfolder_dict = {'dorsal':{},'ventral':{}}
logger.debug("Looping over channel forms to validate each")
for channel_form in channel_forms:
channel_dict = channel_form.data
username = channel_dict['username']
request_name = channel_dict['request_name']
sample_name = channel_dict['sample_name']
imaging_request_number = channel_dict['imaging_request_number']
channel_name = channel_dict['channel_name']
logger.debug(f"Validating channel: {channel_name}")
logger.debug(f"Imaging request number: {imaging_request_number}")
number_of_z_planes = channel_dict['number_of_z_planes']
left_lightsheet_used = channel_dict['left_lightsheet_used']
right_lightsheet_used = channel_dict['right_lightsheet_used']
tiling_scheme = channel_dict['tiling_scheme']
rawdata_subfolder = channel_dict['rawdata_subfolder'].rstrip("/").strip()
ventral_up = channel_dict['ventral_up']
if ventral_up == True:
rawdata_fullpath = os.path.join(current_app.config['DATA_BUCKET_ROOTPATH'],
username,request_name,sample_name,
f'imaging_request_{imaging_request_number}',
'rawdata',f'resolution_{self.image_resolution.data}_ventral_up',rawdata_subfolder)
else:
rawdata_fullpath = os.path.join(current_app.config['DATA_BUCKET_ROOTPATH'],
username,request_name,sample_name,
f'imaging_request_{imaging_request_number}',
'rawdata',f'resolution_{self.image_resolution.data}',rawdata_subfolder)
logger.debug(f"Searching in full rawdata path: {rawdata_fullpath}")
if ventral_up:
topkey = 'ventral'
else:
topkey = 'dorsal'
if rawdata_subfolder in subfolder_dict[topkey].keys():
subfolder_dict[topkey][rawdata_subfolder].append(channel_dict)
else:
subfolder_dict[topkey][rawdata_subfolder] = [channel_dict]
channel_index = len(subfolder_dict[topkey][rawdata_subfolder]) - 1
logger.debug(f"Channel index: {channel_index}")
n_columns = int(tiling_scheme.lower().split('x')[0])
n_rows = int(tiling_scheme.lower().split('x')[1])
logger.debug("tiling scheme:")
logger.debug(tiling_scheme)
if self.image_resolution.data in ['3.6x','15x']:
number_of_rawfiles_expected = number_of_z_planes*n_rows*n_columns
""" For SmartSPIM, make sure the number of folders
represents the tiling scheme, should be row/col.
Also count files in the deepest directories to get total file count"""
col_dirs = glob.glob(rawdata_fullpath + '/' + '[0-9]'*6 + '/')
logger.debug(f"found {len(col_dirs)} column directories")
logger.debug(f"expected {n_columns} column directories")
if len(col_dirs) != n_columns:
raise ValidationError(
f"You entered that there should be {n_columns} tiling column folders in rawdata folder, "
f"but found {len(col_dirs)}")
else:
logger.debug("have correct number of column tile folders")
first_col_dir = col_dirs[0]
row_dirs = glob.glob(first_col_dir + '/??????_??????/')
if len(row_dirs) != n_rows:
raise ValidationError(
f"You entered that there should be {n_rows} tiling row folders in each tiling column folder, "
f"but found {len(row_dirs)}")
else:
all_subdirs = glob.glob(rawdata_fullpath + '/??????/??????_??????/')
total_counts = []
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
for count in executor.map(utils.count_files, all_subdirs):
total_counts.append(count)
number_of_rawfiles_found = sum(total_counts)
else:
""" For LaVision
We have to be careful here
because the raw data filenames will include C00 if there
is only one light sheet used, regardless of whether it is
left or right. If both are used,
then the left lightsheet files always have C00 in filenames
and right lightsheet files always have C01 in filenames.
"""
number_of_rawfiles_found = 0 # initialize, will add to it below
number_of_rawfiles_expected = number_of_z_planes*(left_lightsheet_used+right_lightsheet_used)*n_rows*n_columns
# First identify if any files in the folder do not have the tiling info, e.g. [00 x 00] in them
# Brainpipe does not handle these files well so we need to rename them
all_raw_files_no_tiling = glob.glob(rawdata_fullpath + f'/*RawDataStack_*Filter*.tif')
if len(all_raw_files_no_tiling) > 0:
logger.debug("Found raw files with no tiling string, e.g. [00 x 00]. Renaming them")
for f in all_raw_files_no_tiling:
renamed_f = f.replace('RawDataStack_','RawDataStack[00 x 00]_')
os.rename(f,renamed_f)
if left_lightsheet_used and right_lightsheet_used:
logger.debug("Left and right lightsheets used")
number_of_rawfiles_found_left_lightsheet = \
len(glob.glob(rawdata_fullpath + f'/*RawDataStack*_C00_*Filter000{channel_index}*'))
number_of_rawfiles_found += number_of_rawfiles_found_left_lightsheet
number_of_rawfiles_found_right_lightsheet = \
len(glob.glob(rawdata_fullpath + f'/*RawDataStack*_C01_*Filter000{channel_index}*'))
number_of_rawfiles_found += number_of_rawfiles_found_right_lightsheet
else:
# doesn't matter if its left or right lightsheet. Since there is only one, their glob patterns will be identical
number_of_rawfiles_found = \
len(glob.glob(rawdata_fullpath + f'/*RawDataStack*_C00_*Filter000{channel_index}*'))
if number_of_rawfiles_found != number_of_rawfiles_expected:
raise ValidationError(f"Channel: {channel_name} - you entered that there should be {number_of_rawfiles_expected} raw files in rawdata folder, "
f"but found {number_of_rawfiles_found}")
""" Now make sure imaging parameters are the same for all channels within the same subfolder """
common_key_list = ['image_orientation','left_lightsheet_used',
'right_lightsheet_used','tiling_scheme','tiling_overlap',
'z_step','number_of_z_planes']
all_tiling_schemes = [] # also keep track of tiling parameters for all subfolders at this resolution
all_tiling_overlaps = [] # also keep track of tiling parameters for all subfolders at this resolution
for subfolder in subfolder_dict[topkey].keys(): # topkey is 'dorsal' or 'ventral'
channel_dict_list = subfolder_dict[topkey][subfolder]
for d in channel_dict_list:
all_tiling_schemes.append(d['tiling_scheme'])
all_tiling_overlaps.append(d['tiling_overlap'])
if not all([list(map(d.get,common_key_list)) == \
list(map(channel_dict_list[0].get,common_key_list)) \
for d in channel_dict_list]):
raise ValidationError(f"For raw data subfolder: {subfolder}. "
"Tiling and imaging parameters must be identical"
" for all channels in the same subfolder. Check your entries.")
""" Now make sure tiling parameters are same for all channels at each resolution """
if (not all([x==all_tiling_overlaps[0] for x in all_tiling_overlaps])
or (not all([x==all_tiling_schemes[0] for x in all_tiling_schemes]))):
validation_str = "All tiling parameters must be the same for each channel of a given resolution"
raise ValidationError(validation_str)
class ImagingSampleForm(FlaskForm):
""" The form for entering imaging information """
username = HiddenField('username')
request_name = HiddenField('request_name')
sample_name = HiddenField('sample_name')
imaging_request_number = HiddenField('imaging_request_number')
max_number_of_image_resolutions = 4
notes_from_imaging = TextAreaField("Note down anything additional about the imaging"
" of this sample that you would like recorded:")
image_resolution_forms = FieldList(FormField(ImageResolutionForm),min_entries=0,max_entries=max_number_of_image_resolutions)
skip_sample_button = SubmitField('Click if you do not wish to image this sample')
submit = SubmitField('Click when imaging for this sample is complete and data are on bucket')
class ChannelBatchForm(FlaskForm):
""" A form that is used via a FormField Fieldlist
so I dont have to write the imaging parameters out for each channel
"""
channel_name = HiddenField('Channel name')
imaging_request_number = HiddenField('imaging request number')
image_resolution = HiddenField('Image resolution')
zoom_body_magnification = DecimalField('Zoom body magnification',
default=1.0,validators=[Optional()])
image_orientation = SelectField('Image orientation',
choices=[('sagittal','sagittal'),('coronal','coronal'),
('horizontal','horizontal')],default='horizontal',
validators=[Optional()])
ventral_up = BooleanField('Imaged ventral side up?',validators=[Optional()],
default=False)
left_lightsheet_used = BooleanField('Left',default=False)
right_lightsheet_used = BooleanField('Right',default=False)
tiling_scheme = StringField('Tiling scheme (e.g. 3x3) -- n_rows x n_columns --',
validators=[Optional()])
tiling_overlap = StringField('Tiling overlap (number between 0.0 and 1.0; leave as default if unsure or not using tiling)',
validators=[Optional()])
z_step = StringField('Z resolution (microns)',validators=[Optional()])
delete_channel_button = SubmitField("Delete channel")
add_flipped_channel_button = SubmitField("Add ventral up channel")
def validate_right_lightsheet_used(self,right_lightsheet_used):
if not (self.left_lightsheet_used.data or self.right_lightsheet_used.data):
raise ValidationError(" At least one light sheet required.")
def validate_tiling_overlap(self,tiling_overlap):
try:
fl_val = float(tiling_overlap.data)
except:
raise ValidationError("Tiling overlap must be a number between 0.0 and 1.0")
if fl_val < 0.0 or fl_val >= 1.0:
raise ValidationError("Tiling overlap must be a number between 0.0 and 1.0")
def validate_tiling_scheme(self,tiling_scheme):
try:
n_rows = int(tiling_scheme.data.lower().split('x')[0])
n_columns = int(tiling_scheme.data.lower().split('x')[1])
except:
raise ValidationError("Tiling scheme is not in correct format."
" Make sure it is like: 1x1 with no spaces.")
if self.image_resolution.data in ['1.1x','1.3x']:
if n_rows > 2 or n_columns > 2:
raise ValidationError("Tiling scheme must not exceed 2x2 for this resolution")
elif self.image_resolution.data in ['2x','4x']:
if n_rows > 4 or n_columns > 4:
raise ValidationError("Tiling scheme must not exceed 4x4 for this resolution")
elif (self.image_resolution.data == '3.6x' or self.image_resolution.data == '15x') and (n_rows > 10 or n_columns > 10):
raise ValidationError("Tiling scheme must not exceed 10x10 for this resolution")
def validate_z_step(self,z_step):
if not z_step.data:
raise ValidationError("z_step required")
try:
z_step = float(z_step.data)
except:
raise ValidationError("z_step must be a number")
if z_step < 2:
raise ValidationError("z_step must be a positive number larger than 2 microns")
elif z_step > 1000:
raise ValidationError("z_step greater than 1000 microns is not supported by the microscope.")
class ImageBatchResolutionForm(FlaskForm):
""" A form for each image resolution that a user picks """
max_number_of_channels = 4
image_resolution = HiddenField('image resolution')
notes_for_clearer = TextAreaField('Notes left for clearer:')
notes_for_imager = TextAreaField('Notes left for imager:')
change_resolution = BooleanField("Change image resolution?",default=False)
new_image_resolution = SelectField('Select the new image resolution you want to use:',
choices=[('1.3x','1.3x'),
('4x','4x'),('1.1x','1.1x'),('2x','2x')],validators=[Optional()])
update_resolution_button = SubmitField('Update')
new_channel_dropdown = SelectField("Add additional channel?",
validators=[Optional()])
new_channel_purpose = SelectField("What type of imaging?",choices=[('registration','registration'),
('injection_detection','injection_detection'),('probe_detection','probe_detection'),
('cell_detection','cell_detection'),
('generic_imaging','generic_imaging')],validators=[Optional()])
new_channel_button = SubmitField("Add channel")
channel_forms = FieldList(FormField(ChannelBatchForm),min_entries=0,max_entries=max_number_of_channels)
class ImagingBatchForm(FlaskForm):
""" The form for entering batch imaging information """
username = HiddenField('username')
request_name = HiddenField('request_name')
sample_name = HiddenField('sample_name')
imaging_request_number = HiddenField('imaging_request_number')
max_number_of_image_resolutions = 4
max_number_of_samples = 50 # per request and therefore per imaging batch
notes_from_imaging = TextAreaField("Note down anything additional about the imaging"
" that you would like recorded:")
image_resolution_batch_forms = FieldList(
FormField(ImageBatchResolutionForm),
min_entries=0,max_entries=max_number_of_image_resolutions)
apply_batch_parameters_button = SubmitField('Apply these parameters to all samples')
sample_forms = FieldList(FormField(ImagingSampleForm),min_entries=0,max_entries=max_number_of_samples)
submit = SubmitField('Click when done imaging all samples')
""" For follow up imaging requests """
class NewImagingChannelForm(FlaskForm):
""" Used by other forms in a FieldList """
channel_name = HiddenField('Channel Name')
registration = BooleanField('Registration',default=False)
injection_detection = BooleanField('Injection Detection',default=False)
probe_detection = BooleanField('Probe Detection',default=False)
cell_detection = BooleanField('Cell Detection',default=False)
generic_imaging = BooleanField('Generic imaging',default=False)
class NewImagingImageResolutionForm(FlaskForm):
""" A form for each image resolution that a user picks """
image_resolution = HiddenField('image resolution')
channel_forms = FieldList(FormField(NewImagingChannelForm),min_entries=4,max_entries=4)
notes_for_imager = TextAreaField('''Special notes for imaging
(e.g. z step size, whether to image ventral-side up, region of brain to image, exposure time, \
suggested tiling scheme) -- max 1024 characters --''',
validators=[Length(max=1024)])
notes_for_processor = TextAreaField('''Special notes for processing
-- max 1024 characters --''',validators=[Length(max=1024)])
atlas_name = SelectField('Atlas for registration',
choices=[('allen_2017','Allen atlas (2017)'),('allen_2011','Allen atlas (pre-2017)'),
('princeton_mouse_atlas','Princeton Mouse Atlas'),
('paxinos','Franklin-Paxinos Mouse Brain Atlas')],validators=[Optional()])
final_orientation = SelectField('Output orientation',
choices=[('sagittal','sagittal'),('coronal','coronal'),
('horizontal','horizontal')],default='sagittal',validators=[Optional()])
class NewImagingForm(FlaskForm):
""" A form that is used in ExpForm() via a FormField FieldList
so I dont have to write the imaging parameters out for each sample
"""
sample_name = HiddenField('sample name')
reimaging_this_sample = BooleanField('I need to reimage this sample')
image_resolution_forsetup = SelectField('Select an image resolution you want to use:',
choices=[('1.1x','1.1x (LaVision)'),('1.3x','1.3x (LaVision, for continuity with older experiments)'),
('2x','2x (LaVision)'),('3.6x','3.6x (SmartSPIM)'),
('4x','4x (LaVision, for continuity with older experiments)')],validators=[Optional()],default='')
image_resolution_forms = FieldList(FormField(NewImagingImageResolutionForm),min_entries=0,max_entries=5)
new_image_resolution_form_submit = SubmitField('Set up imaging parameters') # renders a new resolution table
class NewImagingRequestForm(FlaskForm):
""" The form for a new imaging request """
max_number_of_samples = 50
number_of_samples = HiddenField('number of samples')
species = HiddenField('species')
""" Imaging """
self_imaging = BooleanField('Check if you plan to do the imaging yourself',default=False)
imaging_samples = FieldList(FormField(NewImagingForm),min_entries=0,max_entries=max_number_of_samples)
uniform_imaging_submit_button = SubmitField('Apply these imaging/processing parameters to all samples') # setting default=True does not do anything, so I have to do it in the view function: https://github.com/lepture/flask-wtf/issues/362
""" Submission """
submit = SubmitField('Submit request')
""" Custom validators """
def validate_imaging_samples(self,imaging_samples):
"""
Make sure that each resolution sub-form has at least
one option selected, and if that option is one of the
detection algorithms, then registration
registration selected.
Also make sure that user cannot create multiple
image resolution sub-forms for the same image resolution.
Also make sure that if registration is used
in a given image resolution table, the output_orientation
must be sagittal
Also make sure there can only be 1 registration channel per image resolution
"""
any_samples_need_reimaging = any([x['reimaging_this_sample'] for x in imaging_samples.data])
if not any_samples_need_reimaging:
raise ValidationError("At least one sample needs to be selected for reimaging to submit this form")
for ii in range(len(imaging_samples.data)):
imaging_sample_dict = imaging_samples[ii].data
sample_name = self.imaging_samples[ii].data['sample_name']
reimaging_this_sample = self.imaging_samples[ii].data['reimaging_this_sample']
if not reimaging_this_sample:
continue
current_image_resolutions_rendered = []
if imaging_sample_dict['image_resolution_forms'] == [] and self.submit.data == True:
raise ValidationError(f"Sample name: {sample_name}, you must set up"
" the imaging parameters for at least one image resolution")
for resolution_form_dict in imaging_sample_dict['image_resolution_forms']:
image_resolution = resolution_form_dict['image_resolution']
current_image_resolutions_rendered.append(image_resolution)
channel_dict_list = resolution_form_dict['channel_forms']
selected_imaging_modes = [key for channel_dict in channel_dict_list \
for key in channel_dict if key in current_app.config['IMAGING_MODES'] and channel_dict[key] == True]
if selected_imaging_modes.count('registration') > 1:
raise ValidationError("There can only be one registration channel per image resolution")
if selected_imaging_modes == []:
raise ValidationError(f"The image resolution table: {image_resolution}"
f" for sample name: {sample_name} is empty. Please select at least one option. ")
if 'registration' in selected_imaging_modes and resolution_form_dict['final_orientation'] != 'sagittal':
raise ValidationError(f"Sample name: {sample_name}, image resolution table: {image_resolution}:"
f" Output orientation must be sagittal since registration was selected")
elif self.species.data != 'mouse' and \
('injection_detection' in selected_imaging_modes or \
'probe_detection' in selected_imaging_modes or \
'cell_detection' in selected_imaging_modes or \
'registration' in selected_imaging_modes):
raise ValidationError(f"Only generic imaging is currently available for species: {self.species.data}")
elif ('injection_detection' in selected_imaging_modes or \
'probe_detection' in selected_imaging_modes or \
'cell_detection' in selected_imaging_modes) and \
'registration' not in selected_imaging_modes:
raise ValidationError(f"Sample name: {sample_name}, image resolution table: {image_resolution}"
f" You must select a registration channel"
" when requesting any of the detection channels")
if imaging_sample_dict['new_image_resolution_form_submit'] == True:
image_resolution = imaging_sample_dict['image_resolution_forsetup']
if image_resolution in current_image_resolutions_rendered:
raise ValidationError(f"You tried to make a table for image_resolution {image_resolution}"
f". But that resolution was already picked for this sample: {sample_name}.") |
py | 7dfdd52792eafb538d38add8fa3fb68f46f8511b | import dataclasses
import typing
from stests.core.types.infra.enums import NetworkStatus
from stests.core.types.infra.enums import NetworkType
@dataclasses.dataclass
class Network:
"""A test network.
"""
# Primary faucet associated with network.
faucet: typing.Optional[typing.Any]
# Count of bootstrap nodes associated with network.
count_of_bootstrap_nodes: typing.Optional[int]
# Count of genesis nodes associated with network.
count_of_genesis_nodes: typing.Optional[int]
# Name of chain associated with network.
chain_name: typing.Optional[str]
# Numerical index to distinguish between multiple deployments of the same network type, e.g. lrt1, lrt2 ...etc.
index: int
# Network's name, e.g. LRT-01.
name: str
# Network's raw name, e.g. lrt1.
name_raw: str
# Current network status.
status: NetworkStatus
# Type of network, e.g. local, lrt, proof-of-concept ...etc.
typeof: NetworkType
@dataclasses.dataclass
class NetworkIdentifier:
"""Information required to disambiguate between networks.
"""
# Internal name of network, e.g. LRT-01
name: str
@property
def index(self) -> int:
return int(self.name.split("-")[1])
@property
def type(self) -> NetworkType:
return NetworkType[self.name.split("-")[0]]
@property
def key(self) -> str:
return f"global.network:{self.name}"
|
py | 7dfdd54948fe8073467dabc83e905de8c3c30a48 | from pyrogram import Client
from pyrogram.types import InlineKeyboardMarkup
from Data import Data
# Callbacks
@Client.on_callback_query()
async def _calls(anonbot, callback_query):
chat_id = callback_query.from_user.id
message_id = callback_query.message.message_id
if callback_query.data.lower() == "home":
user = await anonbot.get_me()
mention = user["mention"]
await anonbot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=Data.START.format(callback_query.from_user.mention, mention),
reply_markup=InlineKeyboardMarkup(Data.buttons),
)
if callback_query.data.lower() == "about":
await anonbot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=Data.ABOUT,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(Data.home_button),
)
if callback_query.data.lower() == "remove":
caption = ""
await anonbot.edit_message_caption(
chat_id=chat_id, message_id=message_id, caption=caption, reply_markup=InlineKeyboardMarkup([Data.add_button])
)
if callback_query.data.lower() == "add":
caption = callback_query.message.reply_to_message.caption
if caption:
await anonbot.edit_message_caption(
chat_id=chat_id, message_id=message_id, caption=caption, reply_markup=InlineKeyboardMarkup([Data.remove_button])
)
else:
await callback_query.answer("The original message has been deleted or their is no previous caption.", show_alert=True)
|
py | 7dfdd6edeeb2228a547746b98e2a079680c00810 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from glob import glob
import torch
from PIL import Image
from torch.utils.data import DataLoader
import monai
from monai.data import create_test_image_2d, list_data_collate, decollate_batch
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.transforms import Activations, AddChanneld, AsDiscrete, Compose, LoadImaged, SaveImage, ScaleIntensityd, EnsureTyped, EnsureType
def main(tempdir):
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_2d(128, 128, num_seg_classes=1)
Image.fromarray((im * 255).astype("uint8")).save(os.path.join(tempdir, f"img{i:d}.png"))
Image.fromarray((seg * 255).astype("uint8")).save(os.path.join(tempdir, f"seg{i:d}.png"))
images = sorted(glob(os.path.join(tempdir, "img*.png")))
segs = sorted(glob(os.path.join(tempdir, "seg*.png")))
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
val_transforms = Compose(
[
LoadImaged(keys=["img", "seg"]),
AddChanneld(keys=["img", "seg"]),
ScaleIntensityd(keys=["img", "seg"]),
EnsureTyped(keys=["img", "seg"]),
]
)
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
# sliding window inference need to input 1 image in every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
saver = SaveImage(output_dir="./output", output_ext=".png", output_postfix="seg")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
model.load_state_dict(torch.load("best_metric_model_segmentation2d_dict.pth"))
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
val_labels = decollate_batch(val_labels)
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
for val_output in val_outputs:
saver(val_output)
# aggregate the final mean dice result
print("evaluation metric:", dice_metric.aggregate().item())
# reset the status
dice_metric.reset()
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
main(tempdir)
|
py | 7dfdd80aee7d36079d6756586016ec5b0ab49d1c | import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from ventilators.utils import get_first_date, change2Percent
def get_transfers_visuals():
df = pd.read_csv('data/predicted_ventilator/transfers_table-ihme.csv', sep=",", parse_dates = ['Date'])
df.loc[:,'Date'] = pd.to_datetime(df['Date'], format='y%m%d').dt.date
p1 = df.Param1.unique()
p2 = df.Param2.unique()
p3 = sorted(df.Param3.unique())
min_date = min(df.Date.values)
max_date = max(df.Date.values)
del df
firstDate = get_first_date()
transfers_visuals = \
[
dbc.Row(
[
dbc.Col(
[
dbc.Card(
[
dbc.CardBody(
[
dcc.Markdown("How much of its base supply is each state willing to share?"),
dbc.Row(
[
dbc.Col(dcc.Markdown("**Pooling Fraction:**")),
dbc.Col(
html.Div(
dcc.Dropdown(
id = 'p1-transfer-dropdown',
options = [{'label': change2Percent(x), 'value': x} for x in p1],
value = '0.1',
),
id = "p1-transfer-dropdown-wrapper"
),
),
],
),
dbc.Tooltip(
"Example: for a pooling fraction of 10%, we guarantee that states will always keep at least 90% of their initial supply.",
target="p1-transfer-dropdown-wrapper",
),
],
),
],
className="h-100",
),
],
xs=12,
sm=12,
md=4,
lg=4,
),
dbc.Col(
[
dbc.Card(
[
dbc.CardBody(
[
dcc.Markdown("How much additional safety supply would states like to have?"),
dbc.Row(
[
dbc.Col(dcc.Markdown("**Buffer:**")),
dbc.Col(
html.Div(
dcc.Dropdown(
id = 'p2-transfer-dropdown',
options = [{'label': change2Percent(x), 'value': x} for x in p2],
value = '0.2',
),
id = "p2-transfer-dropdown-wrapper",
),
),
]
),
dbc.Tooltip(
"Example: for a buffer value of 20%, a projected demand of 1,000 ventilators implies a target supply of 1,200.",
target="p2-transfer-dropdown-wrapper",
),
],
),
],
className="h-100",
),
],
xs=12,
sm=12,
md=4,
lg=4,
),
dbc.Col(
[
dbc.Card(
[
dbc.CardBody(
[
dcc.Markdown("How much would you like to adjust the federal surge supply?"),
dbc.Row(
[
dbc.Col(dcc.Markdown("**Surge Supply:**")),
dbc.Col(
html.Div(
dcc.Dropdown(
id = 'p3-transfer-dropdown',
options = [{'label': change2Percent(x), 'value': x} for x in p3],
value = '0.75',
),
id = "p3-transfer-dropdown-wrapper",
),
),
]
),
dbc.Tooltip(
"Example: a value of 50% adjusts the baseline estimate of 450 ventilators per day to 225 ventilators per day.",
target="p3-transfer-dropdown-wrapper",
),
],
),
],
className="h-100",
),
],
xs=12,
sm=12,
md=4,
lg=4,
),
],
),
] + \
[
dbc.Row(
[
dbc.Col(
[
dbc.Alert("By optimizing ventilator allocation across states, we can quickly eliminate the shortage of ventilators in every state.",
color="primary"),
],
style={"marginTop": "1rem"},
)
]
)
] + \
[
dbc.Row(
[
dbc.Col(
[
html.Div(
id = 'us_transfers_graph',
children = [],
),
],
xs=12,
sm=12,
md=6,
lg=6,
),
dbc.Col(
[
html.Div(
id = 'us_map_transfers_vent',
children = [],
),
html.H6('Date:',id="date-projections"),
html.Div(
dcc.DatePickerSingle(
id='date-transfer-dropdown',
min_date_allowed=min_date,
max_date_allowed=max_date,
date=firstDate,
initial_visible_month=firstDate,
style={'marginBottom':20}
),
id="date-projections-picker-div"
),
],
xs=12,
sm=12,
md=6,
lg=6,
),
]
),
]
return transfers_visuals
|
py | 7dfdd92013fc141c48884abcc3bd47f08aa748f7 | from rest_framework import serializers
from audit.models import AuditLog, RelatedObjectType, FEATURE_CREATED_MESSAGE, FEATURE_UPDATED_MESSAGE, \
FEATURE_STATE_UPDATED_MESSAGE, IDENTITY_FEATURE_STATE_UPDATED_MESSAGE
from features.utils import get_value_type
from segments.serializers import SegmentSerializerBasic
from .models import Feature, FeatureState, FeatureStateValue, FeatureSegment, STRING, INTEGER, BOOLEAN
class CreateFeatureSerializer(serializers.ModelSerializer):
class Meta:
model = Feature
fields = "__all__"
read_only_fields = ('feature_segments',)
def to_internal_value(self, data):
if data.get('initial_value'):
data['initial_value'] = str(data.get('initial_value'))
return super(CreateFeatureSerializer, self).to_internal_value(data)
def create(self, validated_data):
if Feature.objects.filter(project=validated_data['project'], name__iexact=validated_data['name']).exists():
raise serializers.ValidationError("Feature with that name already exists for this "
"project. Note that feature names are case "
"insensitive.")
instance = super(CreateFeatureSerializer, self).create(validated_data)
self._create_audit_log(instance, True)
return instance
def update(self, instance, validated_data):
self._create_audit_log(instance, False)
return super(CreateFeatureSerializer, self).update(instance, validated_data)
def _create_audit_log(self, instance, created):
message = FEATURE_CREATED_MESSAGE % instance.name if created else FEATURE_UPDATED_MESSAGE % instance.name
request = self.context.get('request')
AuditLog.objects.create(author=request.user if request else None, related_object_id=instance.id,
related_object_type=RelatedObjectType.FEATURE.name,
project=instance.project,
log=message)
class FeatureSegmentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = FeatureSegment
fields = ('feature', 'segment', 'priority', 'enabled', 'value')
def create(self, validated_data):
if validated_data.get('value') or validated_data.get('value') is False:
validated_data['value_type'] = get_value_type(validated_data['value'])
return super(FeatureSegmentCreateSerializer, self).create(validated_data)
def to_internal_value(self, data):
if data.get('value') or data.get('value') is False:
data['value'] = str(data['value'])
return super(FeatureSegmentCreateSerializer, self).to_internal_value(data)
class FeatureSegmentSerializer(serializers.ModelSerializer):
segment = SegmentSerializerBasic()
class Meta:
model = FeatureSegment
fields = ('segment', 'priority', 'enabled')
class FeatureSerializer(serializers.ModelSerializer):
feature_segments = FeatureSegmentSerializer(many=True)
class Meta:
model = Feature
fields = "__all__"
class FeatureStateSerializerFull(serializers.ModelSerializer):
feature = CreateFeatureSerializer()
feature_state_value = serializers.SerializerMethodField()
class Meta:
model = FeatureState
fields = "__all__"
def get_feature_state_value(self, obj):
return obj.get_feature_state_value()
class FeatureStateSerializerBasic(serializers.ModelSerializer):
feature_state_value = serializers.SerializerMethodField()
class Meta:
model = FeatureState
fields = "__all__"
def get_feature_state_value(self, obj):
return obj.get_feature_state_value()
def create(self, validated_data):
instance = super(FeatureStateSerializerBasic, self).create(validated_data)
self._create_audit_log(instance=instance)
return instance
def update(self, instance, validated_data):
updated_instance = super(FeatureStateSerializerBasic, self).update(instance, validated_data)
self._create_audit_log(updated_instance)
return updated_instance
def _create_audit_log(self, instance):
create_feature_state_audit_log(instance, self.context.get('request'))
class FeatureStateSerializerCreate(serializers.ModelSerializer):
class Meta:
model = FeatureState
fields = ('feature', 'enabled')
def create(self, validated_data):
instance = super(FeatureStateSerializerCreate, self).create(validated_data)
self._create_audit_log(instance=instance)
return instance
def _create_audit_log(self, instance):
create_feature_state_audit_log(instance, self.context.get('request'))
def create_feature_state_audit_log(feature_state, request):
if feature_state.identity:
message = IDENTITY_FEATURE_STATE_UPDATED_MESSAGE % (feature_state.feature.name,
feature_state.identity.identifier)
else:
message = FEATURE_STATE_UPDATED_MESSAGE % feature_state.feature.name
AuditLog.objects.create(author=request.user if request else None,
related_object_id=feature_state.id,
related_object_type=RelatedObjectType.FEATURE_STATE.name,
environment=feature_state.environment,
project=feature_state.environment.project,
log=message)
class FeatureStateValueSerializer(serializers.ModelSerializer):
class Meta:
model = FeatureStateValue
fields = "__all__"
|
py | 7dfdd9ac5f2bfb92301bee7594ea8326707f6315 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Zortcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking carve-out allowing one final transaction in
an otherwise-full package as long as it has only one parent and is <= 10k in
size.
"""
from decimal import Decimal
from test_framework.test_framework import ZortcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(ZortcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-maxorphantx=1000"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txids, vouts, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = []
for (txid, vout) in zip(parent_txids, vouts):
inputs.append({'txid' : txid, 'vout' : vout})
outputs = {}
for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
# Mine some blocks and have them mature.
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0002")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for _ in range(4):
(txid, sent_value) = self.chain_transaction(self.nodes[0], [txid], [vout], value, fee, 2)
vout = 0
value = sent_value
chain.append([txid, value])
for _ in range(MAX_ANCESTORS - 4):
(txid, sent_value) = self.chain_transaction(self.nodes[0], [txid], [0], value, fee, 1)
value = sent_value
chain.append([txid, value])
(second_chain, second_chain_value) = self.chain_transaction(self.nodes[0], [utxo[1]['txid']], [utxo[1]['vout']], utxo[1]['amount'], fee, 1)
# Check mempool has MAX_ANCESTORS + 1 transactions in it
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 1)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", self.chain_transaction, self.nodes[0], [txid], [0], value, fee, 1)
# ...even if it chains on from some point in the middle of the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[2][0]], [1], chain[2][1], fee, 1)
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[1][0]], [1], chain[1][1], fee, 1)
# ...even if it chains on to two parent transactions with one in the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[0][0], second_chain], [1, 0], chain[0][1] + second_chain_value, fee, 1)
# ...especially if its > 40k weight
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 350)
# But not if it chains directly off the first transaction
(replacable_txid, replacable_orig_value) = self.chain_transaction(self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 1)
# and the second chain should work just fine
self.chain_transaction(self.nodes[0], [second_chain], [0], second_chain_value, fee, 1)
# Make sure we can RBF the chain which used our carve-out rule
second_tx_outputs = {self.nodes[0].getrawtransaction(replacable_txid, True)["vout"][0]['scriptPubKey']['addresses'][0]: replacable_orig_value - (Decimal(1) / Decimal(100))}
second_tx = self.nodes[0].createrawtransaction([{'txid': chain[0][0], 'vout': 1}], second_tx_outputs)
signed_second_tx = self.nodes[0].signrawtransactionwithwallet(second_tx)
self.nodes[0].sendrawtransaction(signed_second_tx['hex'])
# Finally, check that we added two transactions
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 3)
if __name__ == '__main__':
MempoolPackagesTest().main()
|
py | 7dfddad5adf6beccd87711105882aed0a47ee84e | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 184000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 7dfddaef34596f7083061b64b1a0d42dc2ce1a87 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""export checkpoint file into air, onnx, mindir models"""
import argparse
import numpy as np
from mindspore import Tensor, context, load_checkpoint, export
import mindspore.common.dtype as mstype
from src.config import Config_CNNCTC
from src.cnn_ctc import CNNCTC_Model
parser = argparse.ArgumentParser(description="CNNCTC_export")
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--file_name", type=str, default="cnn_ctc", help="CNN&CTC output air name.")
parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="AIR", help="file format")
parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend",
help="device target")
parser.add_argument("--ckpt_file", type=str, default="./ckpts/cnn_ctc.ckpt", help="CNN&CTC ckpt file.")
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
if args_opt.device_target == "Ascend":
context.set_context(device_id=args_opt.device_id)
if __name__ == "__main__":
cfg = Config_CNNCTC()
ckpt_path = cfg.CKPT_PATH
if args_opt.ckpt_file != "":
ckpt_path = args_opt.ckpt_file
net = CNNCTC_Model(cfg.NUM_CLASS, cfg.HIDDEN_SIZE, cfg.FINAL_FEATURE_WIDTH)
load_checkpoint(ckpt_path, net=net)
bs = cfg.TEST_BATCH_SIZE
input_data = Tensor(np.zeros([bs, 3, cfg.IMG_H, cfg.IMG_W]), mstype.float32)
export(net, input_data, file_name=args_opt.file_name, file_format=args_opt.file_format)
|
py | 7dfddb86f8daeed5b2547a344174131c879d68cf | # terrascript/vcd/r.py
import terrascript
class vcd_network(terrascript.Resource):
pass
class vcd_network_routed(terrascript.Resource):
pass
class vcd_network_direct(terrascript.Resource):
pass
class vcd_network_isolated(terrascript.Resource):
pass
class vcd_vapp_network(terrascript.Resource):
pass
class vcd_vapp(terrascript.Resource):
pass
class vcd_firewall_rules(terrascript.Resource):
pass
class vcd_dnat(terrascript.Resource):
pass
class vcd_snat(terrascript.Resource):
pass
class vcd_edgegateway(terrascript.Resource):
pass
class vcd_edgegateway_vpn(terrascript.Resource):
pass
class vcd_vapp_vm(terrascript.Resource):
pass
class vcd_org(terrascript.Resource):
pass
class vcd_org_vdc(terrascript.Resource):
pass
class vcd_org_user(terrascript.Resource):
pass
class vcd_catalog(terrascript.Resource):
pass
class vcd_catalog_item(terrascript.Resource):
pass
class vcd_catalog_media(terrascript.Resource):
pass
class vcd_inserted_media(terrascript.Resource):
pass
class vcd_independent_disk(terrascript.Resource):
pass
class vcd_external_network(terrascript.Resource):
pass
class vcd_lb_service_monitor(terrascript.Resource):
pass
class vcd_lb_server_pool(terrascript.Resource):
pass
class vcd_lb_app_profile(terrascript.Resource):
pass
class vcd_lb_app_rule(terrascript.Resource):
pass
class vcd_lb_virtual_server(terrascript.Resource):
pass
class vcd_nsxv_dnat(terrascript.Resource):
pass
class vcd_nsxv_snat(terrascript.Resource):
pass
class vcd_nsxv_firewall_rule(terrascript.Resource):
pass
class vcd_nsxv_dhcp_relay(terrascript.Resource):
pass
class vcd_nsxv_ip_set(terrascript.Resource):
pass
class vcd_vm_internal_disk(terrascript.Resource):
pass
class vcd_vapp_org_network(terrascript.Resource):
pass
class vcd_org_group(terrascript.Resource):
pass
class vcd_vapp_firewall_rules(terrascript.Resource):
pass
class vcd_vapp_nat_rules(terrascript.Resource):
pass
class vcd_vapp_static_routing(terrascript.Resource):
pass
class vcd_vm_affinity_rule(terrascript.Resource):
pass
|
py | 7dfddbad6f8ede96a21b4725c8e94a750a763a79 | from __future__ import unicode_literals
from datetime import timedelta
import random
import threading
import uuid
from django.conf import settings
from django.db.models.signals import post_delete, post_save
from django.utils import timezone
from django.utils.functional import curry
from extras.webhooks import enqueue_webhooks
from .constants import (
OBJECTCHANGE_ACTION_CREATE, OBJECTCHANGE_ACTION_DELETE, OBJECTCHANGE_ACTION_UPDATE,
)
from .models import ObjectChange
_thread_locals = threading.local()
def cache_changed_object(instance, **kwargs):
action = OBJECTCHANGE_ACTION_CREATE if kwargs['created'] else OBJECTCHANGE_ACTION_UPDATE
# Cache the object for further processing was the response has completed.
_thread_locals.changed_objects.append(
(instance, action)
)
def _record_object_deleted(request, instance, **kwargs):
# Record that the object was deleted.
if hasattr(instance, 'log_change'):
instance.log_change(request.user, request.id, OBJECTCHANGE_ACTION_DELETE)
enqueue_webhooks(instance, OBJECTCHANGE_ACTION_DELETE)
class ObjectChangeMiddleware(object):
"""
This middleware performs two functions in response to an object being created, updated, or deleted:
1. Create an ObjectChange to reflect the modification to the object in the changelog.
2. Enqueue any relevant webhooks.
The post_save and pre_delete signals are employed to catch object modifications, however changes are recorded a bit
differently for each. Objects being saved are cached into thread-local storage for action *after* the response has
completed. This ensures that serialization of the object is performed only after any related objects (e.g. tags)
have been created. Conversely, deletions are acted upon immediately, so that the serialized representation of the
object is recorded before it (and any related objects) are actually deleted from the database.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Initialize an empty list to cache objects being saved.
_thread_locals.changed_objects = []
# Assign a random unique ID to the request. This will be used to associate multiple object changes made during
# the same request.
request.id = uuid.uuid4()
# Signals don't include the request context, so we're currying it into the pre_delete function ahead of time.
record_object_deleted = curry(_record_object_deleted, request)
# Connect our receivers to the post_save and pre_delete signals.
post_save.connect(cache_changed_object, dispatch_uid='record_object_saved')
post_delete.connect(record_object_deleted, dispatch_uid='record_object_deleted')
# Process the request
response = self.get_response(request)
# Create records for any cached objects that were created/updated.
for obj, action in _thread_locals.changed_objects:
# Record the change
if hasattr(obj, 'log_change'):
obj.log_change(request.user, request.id, action)
# Enqueue webhooks
enqueue_webhooks(obj, action)
# Housekeeping: 1% chance of clearing out expired ObjectChanges
if _thread_locals.changed_objects and settings.CHANGELOG_RETENTION and random.randint(1, 100) == 1:
cutoff = timezone.now() - timedelta(days=settings.CHANGELOG_RETENTION)
purged_count, _ = ObjectChange.objects.filter(
time__lt=cutoff
).delete()
return response
|
py | 7dfddc2b8e402b190c22a5128fa755c48f31072c | import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from data.WordData import WordData
import json
from models.WordDataLoader import WordDataLoader
def main():
with open('data/interim/tokens.json') as f:
tokens = json.load(f)
word_data = WordData(tokens, None, 3000)
print(word_data.token_final[1:4])
print(word_data.getVocabularyLength())
# print(word_data.ref_word_to_id)
word_data_loader = WordDataLoader(word_data, 1)
for x,y in word_data_loader.generate():
print(x)
print(x.shape)
print(y)
print(y.shape)
break
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
logger = logging.getLogger(__name__)
main() |
py | 7dfddcf5f1ee06137d5ec7c40a672f2de21386ff | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestCollectionState(unittest.TestCase):
def test_dopri5(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='dopri5')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_dopri5_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='dopri5')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adams')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adams_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adams')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
if __name__ == '__main__':
unittest.main()
|
py | 7dfddd3230df1e32a7e5b1f35b4db985197e888b | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-checkmempool"]] * 2
alert_filename = None # Set by setup_network
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_jsonrpc(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_jsonrpc(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
py | 7dfdde122702b48737c00823525f3bf6b4943f8c | if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable
x = Variable(np.array(1.0))
y = (x + 3) ** 2
y.backward()
print(y)
print(x.grad)
print(str(os.path.dirname(__file__)))
print(str(os.path.join(os.path.dirname(__file__), '..')))
|
py | 7dfddf818246063f3ec4d57184cb34a67078aced | ## This package about strings
def reverseit(st):
print(st[::-1])
def cap(st):
print(st.capitalize()) |
py | 7dfde1ecf299fbf7494428d719e73376b7caea31 | """
This defines the Value object used by components and events.
"""
from ..six import PY2, python_2_unicode_compatible, string_types, text_type
from .events import Event
@python_2_unicode_compatible
class Value(object):
"""Create a new future Value Object
Creates a new future Value Object which is used by Event Objects and the
Manager to store the result(s) of an Event Handler's exeuction of some
Event in the system.
:param event: The Event this Value is associated with.
:type event: Event instance
:param manager: The Manager/Component used to trigger notifications.
:type manager: A Manager/Component instance.
:ivar result: True if this value has been changed.
:ivar errors: True if while setting this value an exception occured.
:ivar notify: True or an event name to notify of changes to this value
This is a Future/Promise implementation.
"""
def __init__(self, event=None, manager=None):
self.event = event
self.manager = manager
self.notify = False
self.promise = False
self.result = False
self.errors = False
self.parent = self
self.handled = False
self._value = None
def __getstate__(self):
odict = self.__dict__.copy()
del odict["manager"]
return odict
def __contains__(self, y):
value = self.value
return y in value if isinstance(value, list) else y == value
def __getitem__(self, y):
v = self.value[y]
if isinstance(v, Value):
return v.value
else:
return v
def __iter__(self):
return iter(map(lambda v: v.value if isinstance(v, Value) else v,
self.value))
def __repr__(self):
"x.__repr__() <==> repr(x)"
value = ""
if self.result:
value = repr(self.value)
format = "<Value (%s) result=%r; errors=%r; for %r>"
return format % (value, self.result, self.errors, self.event)
def __str__(self):
"x.__str__() <==> str(x)"
if PY2:
return text_type(self.value).encode('utf-8')
return str(self.value)
def inform(self, force=False):
if self.promise and not force:
return
notify = getattr(self.event, "notify", False) or self.notify
if self.manager is not None and notify:
if isinstance(notify, string_types):
e = Event.create(notify, self)
else:
e = self.event.child("value_changed", self)
self.manager.fire(e, self.manager)
def getValue(self, recursive=True):
value = self._value
if not recursive:
return value
while isinstance(value, Value):
value = value._value
return value
def setValue(self, value):
if isinstance(value, Value):
value.parent = self
if self.result and isinstance(self._value, list):
self._value.append(value)
elif self.result:
self._value = [self._value]
self._value.append(value)
else:
self._value = value
def update(o, v):
if isinstance(v, Value):
o.errors = v.errors
o.result = v.result
elif v is not None:
o.result = True
o.inform()
if o.parent is not o:
o.parent.errors = o.errors
o.parent.result = o.result
update(o.parent, v)
update(self, value)
value = property(getValue, setValue, None, "Value of this Value")
|
py | 7dfde1f99685be58dc359cb59604f2969a6f41ea | #!/usr/bin/env python3
from sqlalchemy import asc, desc
from marshmallow import fields
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from model import Player
from model import Hug
class PlayerSchema(SQLAlchemyAutoSchema):
hug_counter = fields.Method("get_hug_count", dump_only=True)
playtime = fields.Method("get_playtime", dump_only=True)
faction = fields.Pluck("FactionSchema", "id", many=False)
def get_hug_count(self, player) -> int:
return Hug.query.filter_by(player=player).count()
def get_playtime(self, player):
first_hug = Hug.query.filter_by(player=player).order_by(asc('timestamp')).first()
last_hug = Hug.query.filter_by(player=player).order_by(desc('timestamp')).first()
if not (first_hug and last_hug):
return None
return (last_hug.timestamp - first_hug.timestamp).total_seconds()
class Meta:
# Regular players won't be able to access other player's schema, so we can safely dump the is_admin
exclude = [
'id',
'hugs'
]
dump_only = [
'faction'
]
model = Player
include_relationships = True
load_instance = True
include_fk = False
|
py | 7dfde22456e37a319c61f42e3c7a844d4de67f9b | ## @file
# process APRIORI file data and generate PEI/DXE APRIORI file
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import pack
import edk2basetools.Common.LongFilePathOs as os
from io import BytesIO
from .FfsFileStatement import FileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from edk2basetools.Common.StringUtils import NormPath
from edk2basetools.Common.Misc import SaveFileOnChange, PathClass
from edk2basetools.Common.EdkLogger import error as EdkLoggerError
from edk2basetools.Common.BuildToolError import RESOURCE_NOT_AVAILABLE
from edk2basetools.Common.DataType import TAB_COMMON
DXE_APRIORI_GUID = "FC510EE7-FFDC-11D4-BD41-0080C73C8881"
PEI_APRIORI_GUID = "1B45CC0A-156A-428A-AF62-49864DA0E6E6"
## process APRIORI file data and generate PEI/DXE APRIORI file
#
#
class AprioriSection (object):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DefineVarDict = {}
self.FfsList = []
self.AprioriType = ""
## GenFfs() method
#
# Generate FFS for APRIORI file
#
# @param self The object pointer
# @param FvName for whom apriori file generated
# @param Dict dictionary contains macro and its value
# @retval string Generated file name
#
def GenFfs (self, FvName, Dict = None, IsMakefile = False):
if Dict is None:
Dict = {}
Buffer = BytesIO()
if self.AprioriType == "PEI":
AprioriFileGuid = PEI_APRIORI_GUID
else:
AprioriFileGuid = DXE_APRIORI_GUID
OutputAprFilePath = os.path.join (GenFdsGlobalVariable.WorkSpaceDir, \
GenFdsGlobalVariable.FfsDir,\
AprioriFileGuid + FvName)
if not os.path.exists(OutputAprFilePath):
os.makedirs(OutputAprFilePath)
OutputAprFileName = os.path.join( OutputAprFilePath, \
AprioriFileGuid + FvName + '.Apri' )
AprFfsFileName = os.path.join (OutputAprFilePath,\
AprioriFileGuid + FvName + '.Ffs')
Dict.update(self.DefineVarDict)
InfFileName = None
for FfsObj in self.FfsList:
Guid = ""
if isinstance(FfsObj, FileStatement):
Guid = FfsObj.NameGuid
else:
InfFileName = NormPath(FfsObj.InfFileName)
Arch = FfsObj.GetCurrentArch()
if Arch:
Dict['$(ARCH)'] = Arch
InfFileName = GenFdsGlobalVariable.MacroExtend(InfFileName, Dict, Arch)
if Arch:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClass(InfFileName, GenFdsGlobalVariable.WorkSpaceDir), Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
Guid = Inf.Guid
else:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClass(InfFileName, GenFdsGlobalVariable.WorkSpaceDir), TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
Guid = Inf.Guid
if not Inf.Module.Binaries:
EdkLoggerError("GenFds", RESOURCE_NOT_AVAILABLE,
"INF %s not found in build ARCH %s!" \
% (InfFileName, GenFdsGlobalVariable.ArchList))
GuidPart = Guid.split('-')
Buffer.write(pack('I', int(GuidPart[0], 16)))
Buffer.write(pack('H', int(GuidPart[1], 16)))
Buffer.write(pack('H', int(GuidPart[2], 16)))
for Num in range(2):
Char = GuidPart[3][Num*2:Num*2+2]
Buffer.write(pack('B', int(Char, 16)))
for Num in range(6):
Char = GuidPart[4][Num*2:Num*2+2]
Buffer.write(pack('B', int(Char, 16)))
SaveFileOnChange(OutputAprFileName, Buffer.getvalue())
RawSectionFileName = os.path.join( OutputAprFilePath, \
AprioriFileGuid + FvName + '.raw' )
MakefilePath = None
if IsMakefile:
if not InfFileName:
return None
MakefilePath = InfFileName, Arch
GenFdsGlobalVariable.GenerateSection(RawSectionFileName, [OutputAprFileName], 'EFI_SECTION_RAW', IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateFfs(AprFfsFileName, [RawSectionFileName],
'EFI_FV_FILETYPE_FREEFORM', AprioriFileGuid, MakefilePath=MakefilePath)
return AprFfsFileName
|
py | 7dfde337259cbcf6f33a7afd3446178eaa23c555 | from selenium import webdriver
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
CHROMEDRIVER_PATH = '/usr/bin/chromedriver'
class BaseTestCase(StaticLiveServerTestCase):
fixtures = ['base.json']
def setUp(self):
self.browser = webdriver.Chrome(CHROMEDRIVER_PATH)
def tearDown(self):
self.browser.close()
def get(self, url=None, name=None, *args, **kwargs):
if name:
url = reverse(name, *args, **kwargs)
self.browser.get('{}{}'.format(self.live_server_url, url))
def get_by_id(self, selector):
return self.browser.find_element_by_id(selector)
def set_field(self, field_id, value):
field = self.browser.find_element_by_id(field_id)
field.clear()
field.send_keys(value)
def submit(self):
form = self.browser.find_element_by_tag_name('form')
form.submit()
def get_full_url(self, url):
return '{}{}'.format(self.live_server_url, url)
|
py | 7dfde346d0b1f09254dd689420d9d80e4f459308 | import smtplib
MY_EMAIL = "[email protected]"
MY_PASSWORD = "MY_PASSWORD"
SMTP_SERVER = "smtp.gmail.com"
def send_emails(emails, message, google_flight_link):
with smtplib.SMTP(SMTP_SERVER) as connection:
connection.starttls()
connection.login(MY_EMAIL, MY_PASSWORD)
for email in emails:
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=email,
msg=f"Subject:New Low Price Flight!\n\n{message}\n{google_flight_link}".encode('utf-8')
)
|
py | 7dfde3dc09be186c61a7ba461b0bb29903255017 | ascii_art = [
"""
_ _ __ __ ____
| | | | __ _ _ __ __ _ | \/ | __ _ _ __ / ___| __ _ _ __ ___ ___
| |_| | / _` | | '_ \ / _` | _____ | |\/| | / _` | | '_ \ | | _ / _` | | '_ ` _ \ / _ \.
| _ | | (_| | | | | | | (_| | |_____| | | | | | (_| | | | | | | |_| | | (_| | | | | | | | | __/
|_| |_| \__,_| |_| |_| \__, | |_| |_| \__,_| |_| |_| \____| \__,_| |_| |_| |_| \___|
|___/
"""
]
print(ascii_art[0])
|
py | 7dfde3f456ee7afa27f4b3199d232506021488ed | """Everything related to training drones goes here"""
import numpy as np
from sc2.constants import DRONE, OVERLORD
class TrainWorker:
"""Needs improvements, its very greedy sometimes"""
def __init__(self, main):
self.controller = main
async def should_handle(self):
"""Should this action be handled, needs more smart limitations, its very greedy sometimes"""
local_controller = self.controller
workers_total = len(local_controller.workers)
geysers = local_controller.extractors
drones_in_queue = local_controller.already_pending(DRONE)
if (
not local_controller.close_enemies_to_base
and local_controller.can_train(DRONE)
and not local_controller.counter_attack_vs_flying
):
if workers_total == 12 and not drones_in_queue:
return True
if (
workers_total in (13, 14, 15)
and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1
):
return True
optimal_workers = min(
sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)
)
return (
workers_total + drones_in_queue < optimal_workers
and np.sum(
np.array(
[
len(local_controller.zerglings),
len(local_controller.hydras),
len(local_controller.ultralisks),
]
)
* np.array([1, 2, 3])
)
> 15
)
return False
async def handle(self):
"""Execute the action of training drones"""
local_controller = self.controller
local_controller.add_action(local_controller.larvae.random.train(DRONE))
return True
|
py | 7dfde517db0bd1ed69b489ce47ec846f89c304bc | from collections import defaultdict
import datetime as dt
import os
import pickle as pkl
from typing import List
import numpy as np
import torch
from torch.utils import data
from torch.utils.data import ConcatDataset, DataLoader, Subset
from torchvision.transforms import transforms
import zarr
from transforms import (
Identity,
Normalize,
RandomSamplePixels,
RandomSampleTimeSteps,
ToTensor,
)
from utils import label_utils
class PixelSetData(data.Dataset):
def __init__(
self,
data_root,
dataset_name,
classes,
transform=None,
indices=None,
with_extra=False,
):
super(PixelSetData, self).__init__()
self.folder = os.path.join(data_root, dataset_name)
self.dataset_name = dataset_name # country/tile/year
self.country = dataset_name.split("/")[-3]
self.tile = dataset_name.split("/")[-2]
self.data_folder = os.path.join(self.folder, "data")
self.meta_folder = os.path.join(self.folder, "meta")
self.transform = transform
self.with_extra = with_extra
self.classes = classes
self.class_to_idx = {cls: idx for idx, cls in enumerate(classes)}
self.samples, self.metadata = self.make_dataset(
self.data_folder, self.meta_folder, self.class_to_idx, indices, self.country
)
self.dates = self.metadata["dates"]
self.date_positions = self.days_after(self.metadata["start_date"], self.dates)
self.date_indices = np.arange(len(self.date_positions))
def get_shapes(self):
return [
(len(self.dates), 10, parcel["n_pixels"])
for parcel in self.metadata["parcels"]
]
def get_labels(self):
return np.array([x[2] for x in self.samples])
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
path, parcel_idx, y, extra = self.samples[index]
pixels = zarr.load(path) # (T, C, S)
sample = {
"index": index,
"parcel_index": parcel_idx, # mapping to metadata
"pixels": pixels,
"valid_pixels": np.ones(
(pixels.shape[0], pixels.shape[-1]), dtype=np.float32),
"positions": np.array(self.date_positions),
"extra": np.array(extra),
"label": y,
}
if self.transform is not None:
sample = self.transform(sample)
return sample
def make_dataset(self, data_folder, meta_folder, class_to_idx, indices, country):
metadata = pkl.load(open(os.path.join(meta_folder, "metadata.pkl"), "rb"))
instances = []
new_parcel_metadata = []
code_to_class_name = label_utils.get_code_to_class(country)
unknown_crop_codes = set()
for parcel_idx, parcel in enumerate(metadata["parcels"]):
if indices is not None:
if not parcel_idx in indices:
continue
crop_code = parcel["label"]
if country == "austria":
crop_code = int(crop_code)
parcel_path = os.path.join(data_folder, f"{parcel_idx}.zarr")
if crop_code not in code_to_class_name:
unknown_crop_codes.add(crop_code)
class_name = code_to_class_name.get(crop_code, "unknown")
class_index = class_to_idx.get(class_name, class_to_idx["unknown"])
extra = parcel['geometric_features']
item = (parcel_path, parcel_idx, class_index, extra)
instances.append(item)
new_parcel_metadata.append(parcel)
for crop_code in unknown_crop_codes:
print(
f"Parcels with crop code {crop_code} was not found in .yml class mapping and was assigned to unknown."
)
metadata["parcels"] = new_parcel_metadata
assert len(metadata["parcels"]) == len(instances)
return instances, metadata
def days_after(self, start_date, dates):
def parse(date):
d = str(date)
return int(d[:4]), int(d[4:6]), int(d[6:])
def interval_days(date1, date2):
return abs((dt.datetime(*parse(date1)) - dt.datetime(*parse(date2))).days)
date_positions = [interval_days(d, start_date) for d in dates]
return date_positions
def get_unknown_labels(self):
"""
Reports the categorization of crop codes for this dataset
"""
class_count = defaultdict(int)
class_parcel_size = defaultdict(float)
# metadata = pkl.load(open(os.path.join(self.meta_folder, 'metadata.pkl'), 'rb'))
metadata = self.metadata
for meta in metadata["parcels"]:
class_count[meta["label"]] += 1
class_parcel_size[meta["label"]] += meta["n_pixels"]
class_avg_parcel_size = {
cls: total_px / class_count[cls]
for cls, total_px in class_parcel_size.items()
}
code_to_class_name = label_utils.get_code_to_class(self.country)
codification_table = label_utils.get_codification_table(self.country)
unknown = []
known = defaultdict(list)
for code, count in class_count.items():
avg_pixels = class_avg_parcel_size[code]
if self.country == "denmark":
code = int(code)
code_name = codification_table[str(code)]
if code in code_to_class_name:
known[code_to_class_name[code]].append(
(code, code_name, count, avg_pixels)
)
else:
unknown.append((code, code_name, count, avg_pixels))
print("\nCategorized crop codes:")
for class_name, codes in known.items():
total_parcels = sum(x[2] for x in codes)
avg_parcel_size = sum(x[3] for x in codes) / len(codes)
print(f"{class_name} (n={total_parcels}, avg size={avg_parcel_size:.3f}):")
codes = reversed(sorted(codes, key=lambda x: x[2]))
for code, code_name, count, avg_pixels in codes:
print(f" {code}: {code_name} (n={count}, avg pixels={avg_pixels:.1f})")
unknown = reversed(sorted(unknown, key=lambda x: x[2]))
print("\nUncategorized crop codes:")
for code, code_name, count, avg_pixels in unknown:
print(f" {code}: {code_name} (n={count}, avg pixels={avg_pixels:.1f})")
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def create_train_loader(ds, batch_size, num_workers):
return DataLoader(
dataset=ds,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
drop_last=True,
pin_memory=torch.cuda.is_available(),
worker_init_fn=worker_init_fn,
)
def create_evaluation_loaders(dataset_name, splits, config, sample_pixels_val=False):
"""
Create data loaders for unsupervised domain adaptation
"""
is_tsnet = config.model == "tsnet"
# Validation dataset
val_transform = transforms.Compose(
[
RandomSamplePixels(config.num_pixels) if sample_pixels_val else Identity(),
RandomSampleTimeSteps(config.seq_length) if is_tsnet else Identity(),
Normalize(),
ToTensor(),
]
)
val_dataset = PixelSetData(
config.data_root,
dataset_name,
config.classes,
val_transform,
indices=splits[dataset_name]["val"],
)
val_loader = data.DataLoader(
val_dataset,
num_workers=config.num_workers,
batch_sampler=GroupByShapesBatchSampler(
val_dataset, config.batch_size, by_pixel_dim=not sample_pixels_val
),
)
# Test dataset
test_transform = transforms.Compose(
[
RandomSampleTimeSteps(config.seq_length) if is_tsnet else Identity(),
Normalize(),
ToTensor(),
]
)
test_dataset = PixelSetData(
config.data_root,
dataset_name,
config.classes,
test_transform,
indices=splits[dataset_name]["test"],
)
test_loader = data.DataLoader(
test_dataset,
num_workers=config.num_workers,
batch_sampler=GroupByShapesBatchSampler(test_dataset, config.batch_size),
)
print(f"evaluation dataset:", dataset_name)
print(f"val target data: {len(val_dataset)} ({len(val_loader)} batches)")
print(f"test taget data: {len(test_dataset)} ({len(test_loader)} batches)")
return val_loader, test_loader
class GroupByShapesBatchSampler(torch.utils.data.BatchSampler):
"""
Group parcels by their time and/or pixel dimension, allowing for batches
with varying dimensionality.
"""
def __init__(self, data_source, batch_size, by_time=True, by_pixel_dim=True):
self.batches = []
self.data_source = data_source
datasets: List[PixelSetData] = []
# shapes[index] contains data_source[index] (seq_length, n_channels, n_pixels)
if isinstance(data_source, PixelSetData):
datasets = [data_source]
shapes = data_source.get_shapes()
elif isinstance(data_source, ConcatDataset):
datasets = data_source.datasets
shapes = [shape for d in datasets for shape in d.get_shapes()]
elif isinstance(data_source, Subset):
datasets = [data_source]
if isinstance(data_source.dataset, ConcatDataset):
shapes = [
shape
for d in data_source.dataset.datasets
for shape in d.get_shapes()
]
shapes = [
shape
for idx, shape in enumerate(shapes)
if idx in data_source.indices
]
else:
shapes = [
shape
for idx, shape in enumerate(data_source.dataset.get_shapes())
if idx in data_source.indices
]
else:
raise NotImplementedError
# group indices by (seq_length, n_pixels)
shp_to_indices = defaultdict(list) # unique shape -> sample indices
for idx, shp in enumerate(shapes):
key = []
if by_time:
key.append(shp[0])
if by_pixel_dim:
key.append(shp[2])
shp_to_indices[tuple(key)].append(idx)
# create batches grouped by shape
batches = []
for indices in shp_to_indices.values():
if len(indices) > batch_size:
batches.extend(
[
indices[i : i + batch_size]
for i in range(0, len(indices), batch_size)
]
)
else:
batches.append(indices)
self.batches = batches
self.datasets = datasets
self.batch_size = batch_size
# self._unit_test()
def __iter__(self):
for batch in self.batches:
yield batch
def __len__(self):
return len(self.batches)
def _unit_test(self):
# make sure that we iterate across all items
# 1) no duplicates
assert sum(len(batch) for batch in self.batches) == sum(
len(d) for d in self.datasets
)
# 2) all indices are present
assert set([idx for indices in self.batches for idx in indices]) == set(
range(sum(len(d) for d in self.datasets))
)
# make sure that no batch is larger than batch size
assert all(len(batch) <= self.batch_size for batch in self.batches)
class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler):
"""
BatchSampler - from a MNIST-like dataset, samples n_samples for each of the n_classes.
Returns batches of size n_classes * (batch_size // n_classes)
Taken from https://github.com/criteo-research/pytorch-ada/blob/master/adalib/ada/datasets/sampler.py
"""
def __init__(self, labels, batch_size):
classes = sorted(set(labels))
n_classes = len(classes)
self._n_samples = batch_size // n_classes
if self._n_samples == 0:
raise ValueError(
f"batch_size should be bigger than the number of classes, got {batch_size}"
)
self._class_iters = [
InfiniteSliceIterator(np.where(labels == class_)[0], class_=class_)
for class_ in classes
]
batch_size = self._n_samples * n_classes
self.n_dataset = len(labels)
self._n_batches = self.n_dataset // batch_size
if self._n_batches == 0:
raise ValueError(
f"Dataset is not big enough to generate batches with size {batch_size}"
)
print(f"using batch size={batch_size} for balanced batch sampler")
def __iter__(self):
for _ in range(self._n_batches):
indices = []
for class_iter in self._class_iters:
indices.extend(class_iter.get(self._n_samples))
np.random.shuffle(indices)
yield indices
for class_iter in self._class_iters:
class_iter.reset()
def __len__(self):
return self._n_batches
class InfiniteSliceIterator:
def __init__(self, array, class_):
assert type(array) is np.ndarray
self.array = array
self.i = 0
self.class_ = class_
def reset(self):
self.i = 0
def get(self, n):
len_ = len(self.array)
# not enough element in 'array'
if len_ < n:
print(f"there are really few items in class {self.class_}")
self.reset()
np.random.shuffle(self.array)
mul = n // len_
rest = n - mul * len_
return np.concatenate((np.tile(self.array, mul), self.array[:rest]))
# not enough element in array's tail
if len_ - self.i < n:
self.reset()
if self.i == 0:
np.random.shuffle(self.array)
i = self.i
self.i += n
return self.array[i : self.i]
if __name__ == "__main__":
classes = label_utils.get_classes("france")
dataset = PixelSetData("/media/data/mark_pixels", "france/31TCJ/2017", classes, with_extra=True)
print(dataset[0])
print(dataset.date_positions)
|
py | 7dfde57ea5f17505f3ac180b865af79bf3bb05bf | #crie um programa que leia quanto dinheiro ela tem na carteira
# e mostra quanto teria em doll, considerando doll a 3,27
real= float(input('Digite a quantidade de Reais que você tem: '))
doll= (real/ 3.27)
print('a quantidade de dollar que você pode ter é: {:.2f}'.format(doll))
|
py | 7dfde5ccc076003728279930f6707195452e82af | # (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
""" This module contains:
* all low-level code for extracting, linking and unlinking packages
* a very simple CLI
These API functions have argument names referring to:
dist: canonical package name (e.g. 'numpy-1.6.2-py26_0')
pkgs_dir: the "packages directory" (e.g. '/opt/anaconda/pkgs' or
'/home/joe/envs/.pkgs')
prefix: the prefix of a particular environment, which may also
be the "default" environment (i.e. sys.prefix),
but is otherwise something like '/opt/anaconda/envs/foo',
or even any prefix, e.g. '/home/joe/myenv'
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import logging
import os
import re
from errno import EACCES, EEXIST, EPERM, EROFS
from os import chmod, makedirs, stat
from os.path import (dirname, isdir, isfile, join, normcase, normpath)
from textwrap import dedent
from .base.constants import PREFIX_PLACEHOLDER
from .common.compat import on_win
from .gateways.disk.delete import delete_trash, move_path_to_trash, rm_rf
delete_trash, move_path_to_trash = delete_trash, move_path_to_trash
from .core.linked_data import is_linked, linked, linked_data # NOQA
is_linked, linked, linked_data = is_linked, linked, linked_data
from .core.package_cache import rm_fetched # NOQA
rm_fetched = rm_fetched
log = logging.getLogger(__name__)
stdoutlog = logging.getLogger('stdoutlog')
# backwards compatibility for conda-build
prefix_placeholder = PREFIX_PLACEHOLDER
# backwards compatibility for conda-build
def package_cache():
log.warn('package_cache() is a no-op and deprecated')
return {}
if on_win:
def win_conda_bat_redirect(src, dst, shell):
"""Special function for Windows XP where the `CreateSymbolicLink`
function is not available.
Simply creates a `.bat` file at `dst` which calls `src` together with
all command line arguments.
Works of course only with callable files, e.g. `.bat` or `.exe` files.
"""
# ensure that directory exists first
try:
makedirs(dirname(dst))
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and isdir(dirname(dst)):
pass
else:
raise
# bat file redirect
if not isfile(dst + '.bat'):
with open(dst + '.bat', 'w') as f:
f.write(dedent("""\
@echo off
call "{}" %%*
""").format(src))
# TODO: probably need one here for powershell at some point
def win_conda_unix_redirect(src, dst, shell):
"""Special function for Windows where the os.symlink function
is unavailable due to a lack of user priviledges.
Simply creates a source-able intermediate file.
"""
# ensure that directory exists first
try:
os.makedirs(os.path.dirname(dst))
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and os.path.isdir(os.path.dirname(dst)):
pass
else:
raise
from conda.utils import shells
# technically these are "links" - but for obvious reasons
# os.path.islink wont work
if not isfile(dst):
with open(dst, "w") as f:
shell_vars = shells[shell]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! ensure the file ends with a blank line this is !!
# !! critical for Windows support !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# conda is used as an executable
if src.endswith("conda"):
command = shell_vars['path_to'](src+".exe")
text = dedent("""\
#!/usr/bin/env {shebang}
{command} {allargs}
""").format(
shebang=re.sub(
r'\.\w+$',
r'',
os.path.basename(shell_vars["exe"])),
command=command,
**shell_vars)
f.write(text)
# all others are used as sourced
else:
command = shell_vars["source"].format(shell_vars['path_to'](src))
text = dedent("""\
#!/usr/bin/env {shebang}
{command} {allargs}
""").format(
shebang=re.sub(
r'\.\w+$',
r'',
os.path.basename(shell_vars["exe"])),
command=command,
**shell_vars)
f.write(text)
# Make the new file executable
# http://stackoverflow.com/a/30463972/1170370
mode = stat(dst).st_mode
mode |= (mode & 292) >> 2 # copy R bits to X
chmod(dst, mode)
# Should this be an API function?
def symlink_conda(prefix, root_dir, shell=None):
# do not symlink root env - this clobbers activate incorrectly.
# prefix should always be longer than, or outside the root dir.
if normcase(normpath(prefix)) in normcase(normpath(root_dir)):
return
if shell is None:
shell = "bash.msys"
if on_win:
where = 'Scripts'
else:
where = 'bin'
if on_win:
if shell in ["cmd.exe", "powershell.exe"]:
symlink_fn = functools.partial(win_conda_bat_redirect, shell=shell)
else:
symlink_fn = functools.partial(win_conda_unix_redirect, shell=shell)
else:
symlink_fn = os.symlink
if not isdir(join(prefix, where)):
os.makedirs(join(prefix, where))
symlink_conda_hlp(prefix, root_dir, where, symlink_fn)
def symlink_conda_hlp(prefix, root_dir, where, symlink_fn):
scripts = ["conda", "activate", "deactivate"]
prefix_where = join(prefix, where)
if not isdir(prefix_where):
os.makedirs(prefix_where)
for f in scripts:
root_file = join(root_dir, where, f)
prefix_file = join(prefix_where, f)
try:
# try to kill stale links if they exist
if os.path.lexists(prefix_file):
rm_rf(prefix_file)
# if they're in use, they won't be killed, skip making new symlink
if not os.path.lexists(prefix_file):
symlink_fn(root_file, prefix_file)
except (IOError, OSError) as e:
if (os.path.lexists(prefix_file) and (e.errno in [EPERM, EACCES, EROFS, EEXIST])):
log.debug("Cannot symlink {0} to {1}. Ignoring since link already exists."
.format(root_file, prefix_file))
else:
raise
|
py | 7dfde7fc1db6f4c3e071b5648cfa23550d2e014a | # -*- encoding: utf-8 -*-
from . import FixtureTest
class HideEarlyAtmBusStopTest(FixtureTest):
def test_atm(self):
import dsl
z, x, y = (16, 10473, 25332)
self.generate_fixtures(
# https://www.openstreetmap.org/node/3161882181
dsl.point(3161882181, (-122.466755, 37.769587), {
'amenity': 'atm',
'name': 'Wells Fargo',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 3161882181,
'kind': 'atm',
'min_zoom': 18,
})
def test_highway_bus_stop(self):
import dsl
z, x, y = (16, 10482, 25328)
self.generate_fixtures(
# https://www.openstreetmap.org/node/381940953
dsl.point(381940953, (-122.416392, 37.787220), {
'bulb': 'no',
'highway': 'bus_stop',
'operator': 'San Francisco Municipal Railway',
'route_ref': '2;3;4;76',
'shelter': 'no',
'source': 'openstreetmap.org',
'ticker': 'no',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 381940953,
'kind': 'bus_stop',
'min_zoom': 18,
})
def test_platform_bus_stop(self):
import dsl
z, x, y = (16, 10511, 25255)
self.generate_fixtures(
# https://www.openstreetmap.org/node/1866509704
dsl.point(1866509704, (-122.259434, 38.100169), {
'bus': 'yes',
'covered': 'yes',
'highway': 'platform',
'local_ref': '85',
'network': 'SolTrans',
'operator': 'Soltrans',
'public_transport': 'platform',
'ref': 'Y',
'source': 'openstreetmap.org',
'wheelchair': 'yes',
'wifi': 'free for guests',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 1866509704,
'kind': 'bus_stop',
'min_zoom': 18,
})
def test_public_transport_bus_stop(self):
import dsl
z, x, y = (16, 10483, 25329)
self.generate_fixtures(
# https://www.openstreetmap.org/node/1847714412
dsl.point(1847714412, (-122.412267, 37.781180), {
'bus': 'yes',
'highway': 'bus_stop',
'operator': 'San Francisco Municipal Railway',
'public_transport': 'platform',
'route_ref': '5',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 1847714412,
'kind': 'bus_stop',
'min_zoom': 18,
})
def test_street_lamp(self):
import dsl
z, x, y = (16, 10483, 25330)
self.generate_fixtures(
# https://www.openstreetmap.org/node/5441990644
dsl.point(5441990644, (-122.413513, 37.777848), {
'highway': 'street_lamp',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 5441990644,
'kind': 'street_lamp',
'min_zoom': 18,
})
def test_post_box(self):
import dsl
z, x, y = (16, 10483, 25328)
self.generate_fixtures(
# https://www.openstreetmap.org/node/669137638
dsl.point(669137638, (-122.412930, 37.785763), {
'amenity': 'post_box',
'note': 'TODO: location',
'operator': 'US Mail Service',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 669137638,
'kind': 'post_box',
'min_zoom': 18,
})
def test_telephone(self):
import dsl
z, x, y = (16, 10479, 25328)
self.generate_fixtures(
# https://www.openstreetmap.org/node/429649021
dsl.point(429649021, (-122.436433, 37.785572), {
'amenity': 'telephone',
'capacity': '1',
'outside': 'yes',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 429649021,
'kind': 'telephone',
'min_zoom': 18,
})
def test_amenity_bus_stop_unsupported(self):
import dsl
z, x, y = (16, 32768, 32768)
self.generate_fixtures(
dsl.point(1, (0, 0), {
'amenity': 'bus_stop',
'source': 'openstreetmap.org',
}),
)
# should not produce any POI in the output
self.assert_no_matching_feature(
z, x, y, 'pois', {
'kind': 'bus_stop',
})
|
py | 7dfde826d93b4c694581fe074e114fa5e7d03d36 | import ast
import pickle
import logging
import networkx
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2019, 1, 1),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
# don't auto-schedule the dag
# https://airflow.readthedocs.io/en/stable/scheduler.html
dag = DAG('pypi_static_versions_8', default_args=default_args, schedule_interval=None)
# periodically run the dag
# dag = DAG('tutorial', default_args=default_args, schedule_interval=timedelta(days=1))
# load dep_tree for packages, relative to AIRFLOW_HOME
pypi_dep_path = "./dags/pypi.with_stats.popular.versions.dep_graph_8.pickle"
dep_tree = pickle.load(open(pypi_dep_path, "rb"))
logging.info("loaded dep_tree with %d nodes", dep_tree.number_of_nodes())
def get_bash_op(pkg_name, pkg_version, dag, configpath='/home/maloss/config/astgen_python_smt.config', cache_dir='/home/maloss/metadata', outdir='/home/maloss/result'):
return BashOperator(
task_id='%s..%s' % (pkg_name, pkg_version),
execution_timeout=timedelta(hours=2),
bash_command='cd /home/maloss/src/ && python main.py astfilter -n %s -v %s -c %s -d %s -o %s -l python' % (pkg_name, pkg_version, configpath, cache_dir, outdir),
dag=dag)
# all analysis jobs
# get all leaves
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.simple_paths.all_simple_paths.html
# leaves = (v for v, d in dep_tree.out_degree() if d == 0)
pkg2op = {}
for pkg_ver_id in dep_tree.nodes():
pkg_ver_id = str(pkg_ver_id)
pkg, ver = ast.literal_eval(pkg_ver_id)
dep_pkg_ver_ids = list(dep_tree.successors(pkg_ver_id))
logging.debug("%s has %d dep_pkgs", pkg, len(dep_pkg_ver_ids))
if pkg_ver_id not in pkg2op:
pkg2op[pkg_ver_id] = get_bash_op(pkg_name=pkg, pkg_version=ver, dag=dag)
else:
continue
pkg_ver_task = pkg2op[pkg_ver_id]
dep_pkg_ver_tasks = set()
for dep_pkg_ver_id in dep_pkg_ver_ids:
dep_pkg, dep_ver = ast.literal_eval(dep_pkg_ver_id)
# avoid cycles
if dep_pkg_ver_id == pkg_ver_id:
continue
if dep_pkg_ver_id not in pkg2op:
pkg2op[dep_pkg_ver_id] = get_bash_op(pkg_name=dep_pkg, pkg_version=dep_ver, dag=dag)
dep_pkg_ver_tasks.add(pkg2op[dep_pkg_ver_id])
# default trigger rule is all_success
# use all_done instead
pkg_ver_task << list(dep_pkg_ver_tasks)
|
py | 7dfde82ee531aa271567d5d60b80815917991e7b | """Unit test package for domicolor."""
|
py | 7dfde83496e8bc39b1914f70e2107b416523362c | """
二叉树的三种遍历方式
https://leetcode-cn.com/problems/binary-tree-preorder-traversal/
https://leetcode-cn.com/problems/binary-tree-inorder-traversal/
https://leetcode-cn.com/problems/binary-tree-postorder-traversal/
方法解析,完全模仿系统栈操作
https://leetcode-cn.com/problems/binary-tree-postorder-traversal/solution/mo-fang-di-gui-zhi-bian-yi-xing-by-sonp/417844
"""
from typing import List
class TreeNode:
def __init__(self, x: int):
self.val = x
self.left = None
self.right = None
# 前序遍历 递归
def preR(root: TreeNode) -> List[int]:
return [root.val] + preR(root.left) + preR(root.right) if root else []
# 前序遍历 迭代
def pre(root: TreeNode) -> List[int]:
if not root:
return []
stack, res = [root], []
while stack:
node = stack.pop()
if not node:
node = stack.pop()
res.append(node.val)
else:
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
stack.append(node)
stack.append(None)
return res
# 中序遍历 递归
def inR(root: TreeNode) -> List[int]:
return inR(root.left) + [root.val] + inR(root.right) if root else []
# 中序遍历 迭代
def ino(root: TreeNode) -> List[int]:
if not root:
return []
stack, res = [root], []
while stack:
node = stack.pop()
if not node:
node = stack.pop()
res.append(node.val)
else:
if node.right:
stack.append(node.right)
stack.append(node)
stack.append(None)
if node.left:
stack.append(node.left)
return res
# 后序遍历 递归
def postR(root: TreeNode) -> List[int]:
return postR(root.left) + postR(root.right) + [root.val] if root else []
# 后序遍历 迭代
def post(root: TreeNode) -> List[int]:
if not root:
return []
stack, res = [root], []
while stack:
node = stack.pop()
if not node:
node = stack.pop()
res.append(node.val)
else:
stack.append(node)
stack.append(None)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.