content
stringlengths 5
1.05M
|
---|
#Criar um programa que some, divida e multiplique e mostra os resultados ao final
n1 = int(input('Digite um numero:'))
n2 = int(input('Digite mais um numero:'))
n = n1 + n2
print('A soma entre {} e {} vale {}!'.format(n1, n2, n))
n = n1 * n2
print('A multiplicação entre {} e {} vale {}!'.format(n1, n2, n))
n = n1 / n2
print('A divisão entre {} e {} vale {}!, obrigado por usar nosso programa :)'.format(n1, n2, n)) |
import os
import requests
import yaml
import re
import json
from pathlib import Path
compiled_models = []
preserved_keys = ["config_url", "applications", "download_url", "name", "description", "cite", "authors", "documentation", "tags", "covers"]
assert 'url' not in preserved_keys
models_yaml_file = Path(__file__).parent / "manifest.model.yaml"
models_yaml = yaml.safe_load(models_yaml_file.read_text())
compiled_apps = {}
for k in models_yaml['applications']:
app_url = models_yaml['applications'][k]
if not app_url.startswith('http'):
app_url = models_yaml['applications'][k].strip('/').strip('./')
app_url = models_yaml['url_root'].strip('/') + '/' + app_url
compiled_apps[k] = app_url
for item in models_yaml['models']:
config_url = item['config_url']
root_url = '/'.join(config_url.split('/')[:-1])
response = requests.get(config_url)
if response.status_code != 200:
print('Failed to fetch model config from ' + config_url)
continue
model_config = yaml.safe_load(response.content)
# merge item from models.yaml to model config
model_config.update(item)
model_info = {"root_url": root_url}
for k in model_config:
# normalize relative path
if k in ['documentation']:
model_config[k] = model_config[k].strip('/').strip('./')
if k == 'covers':
for j in range(len(model_config[k])):
model_config[k][j] = model_config[k][j].strip('/').strip('./')
if k in preserved_keys:
model_info[k] = model_config[k]
compiled_models.append(model_info)
compiled_models.sort(key=lambda m: m['name'], reverse=True)
with (Path(__file__).parent / "../manifest.model.json").open("wb") as f:
models_yaml['models'] = compiled_models
models_yaml['applications'] = compiled_apps
f.write(json.dumps(models_yaml, indent=2, separators=(',', ': ')).encode('utf-8'))
|
# MIT License
#
# Copyright (c) 2018 Benjamin Bueno (bbueno5000)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A collection of random agents
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.agents import random_agent
class RandomAgent001(random_agent.RandomAgent):
"""
Generic Random Agent.
"""
def __init__(self):
super(RandomAgent001, self).__init__()
self.results = {}
self.results['agent_id'] = "RandomAgent"
self.results['episode_data'] = {'episode_lengths': [], 'episode_rewards': []}
def reset(self):
super(RandomAgent001, self).reset()
self.results['episode_data']['episode_lengths'].append(self.steps)
self.results['episode_data']['episode_rewards'].append(self.reward)
self.reward = 0
self.steps = 0
class RandomAgent002:
"""
OpenAI Gym Random Agent.
"""
def __init__(self, action_space):
self.action_space = action_space
def act(self, observation, reward, done):
return self.action_space.sample()
|
#! /usr/bin/python
# Filename: function2.py
# Description: This script is used to test function with parameters.
def funcWithParameter(a, b):
if a > b:
print 'max number is a =', a
elif a < b:
print 'max number is b=', b
else:
print 'equal'
x = 4
y = 8
funcWithParameter(x,y)
|
import discord
import update_demonlist as update
import commit_player_record as pcommit
import commit_player_record_list as plcommit
import commit_creator_record as ccommit
import delete_player_record as pdelete
import delete_creator_record as cdelete
import show_player_record as p
import show_creator_record as c
import modify_creator_record as cmodify
import const
# チャンネルID
DEMONLIST = const.DEMONLIST_ID
PRECORD = const.PRECORD_ID
CRECORD = const.CRECORD_ID
TOKEN = const.TOKEN
PPATH = const.PPATH
CPATH = const.CPATH
AVATAR = const.AVATAR
PAR = const.PAR
dstr = ''
pstr = ''
cstr = ''
client = discord.Client()
@client.event
async def on_message(message):
global dstr
global pstr
global cstr
# メッセージの取得とprefixの取得
msg = message.content
prefix = msg[:2]
# メッセージを送ったユーザーのロール
roles = message.author.roles
permi = roles[-1].permissions
# チャンネルの設定
precord_data = client.get_channel(PRECORD)
crecord_data = client.get_channel(CRECORD)
# botからの送信をはじく
if message.author.bot:
return
# prefixがr!ならコマンド
if prefix == 'r!':
# prefix以降最初のコマンド r!pcommitのpcommitなど
command = msg[2:].split()
# commandで分岐
match(command[0]):
# 'p': プレイヤーレコード表示
case 'p':
record_list = p.get_player_record(pstr)
for list in record_list:
for record in list:
await message.channel.send('>>> '+ record)
# 'pcommit': レコード追加
case 'pcommit':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_precord, is_exist, changed = pcommit.commit_player_record(msg[len('r!pcommit '):], pstr)
# print(new_precord)
with open(PPATH, "w") as record_file:
record_file.write(new_precord)
await precord_data.send(file=discord.File(PPATH))
if is_exist:
await message.channel.send(">>> プレイヤーは既に追加されています")
elif not changed:
await message.channel.send(">>> 指定されたレベルはリスト上に存在しません。圏外であれば`-1`オプションを付けてください")
else:
pstr = update.update_list(dstr, new_precord)
await message.channel.send(">>> レコードを更新しました")
# 'plcommit': リスト形式でのレコード追加
case 'plcommit':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_precord, player_exists, not_changed = plcommit.commit_player_record_list(msg[len('r!plcommit '):], pstr)
with open(PPATH,"w") as record_file:
record_file.write(new_precord)
await precord_data.send(file=discord.File(PPATH))
# 既に追加されているレベルの表示
if len(player_exists) != 0:
await message.channel.send(">>> 以下のレベルについてプレイヤーは既に追加されています")
player_exists_level_list = ''
for level in player_exists:
player_exists_level_list += level + ' '
await message.channel.send(">>> " + player_exists_level_list + "\n")
# リストに存在しないレベルの表示
if len(not_changed) != 0:
await message.channel.send(">>> 以下のレベルはリストに存在しません。リスト内であればレベル名の確認、圏外であれば各個'pcommit'を使ってレコードを追加してください")
levels_not_exist_on_list = ''
for level in not_changed:
levels_not_exist_on_list += level + ' '
await message.channel.send(">>> " + levels_not_exist_on_list + "\n")
# リストの更新
pstr = update.update_list(dstr, new_precord)
await message.channel.send(">>> レコードを更新しました")
# 'pdelete': レコード削除
case 'pdelete':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_precord, changed = pdelete.delete_player_record(msg[len('r!pdelete '):], pstr)
with open(PPATH, "w") as record_file:
record_file.write(new_precord)
if not changed:
await message.channel.send(">>> 該当のレベルもしくはプレイヤーは存在しません")
else:
pstr = update.update_list(dstr, new_precord)
await precord_data.send(file=discord.File(PPATH))
await message.channel.send(">>> レコードを更新しました")
# 'c': クリエイターレコード表示
case 'c':
record_list = c.get_creator_record(cstr)
embed=discord.Embed(
title='Creators Record List',
color=0x00ff00,
)
for record in record_list:
level, creator, icon, id, link, has_link = record
if has_link:
embed.add_field(
name=(PAR + level + ' By ' + creator + ' ' + icon),
value=('ID: ' + id + '\n' + link),
inline=False
)
else:
embed.add_field(
name=(PAR + level + ' By ' + creator + ' ' + icon),
value=('ID: ' + id),
inline=False
)
await message.channel.send(embed=embed)
# 'ccommit': レコードの追加
case 'ccommit':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_crecord, found = ccommit.commit_creator_record(msg[len('r!ccommit '):], cstr)
with open(CPATH, "w") as record_file:
record_file.write(new_crecord)
if not found:
await message.channel.send(">>> 該当のレベルは存在しません")
else:
await message.channel.send(">>> レコードを更新しました")
await crecord_data.send(file=discord.File(CPATH))
cstr = new_crecord
# 'cdelete': レコードの削除
case 'cdelete':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_crecord, deleted = cdelete.delete_creator_record(msg[len('r!cdelete '):], cstr)
with open(CPATH, "w") as record_file:
record_file.write(new_crecord)
await crecord_data.send(file=discord.File(CPATH))
if deleted:
await message.channel.send(">>> レコードを更新しました")
cstr = new_crecord
else:
await message.channel.send(">>> 該当のレベルは存在しません")
# 'cmodify': レコードの編集
case 'cmodify':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
new_crecord, changed, invalid_option = cmodify.modify_creator_record(msg[len('r!cmodify '):], cstr)
if invalid_option:
await message.channel.send(">>> オプションが無効です。(使用可能オプション: 'LNAME' 'CNAME', 'CICON', 'ID', 'LINK')")
elif not changed:
await message.channel.send(">>> 該当のレベルは存在しません")
else:
with open(CPATH, "w") as record_file:
record_file.write(new_crecord)
await crecord_data.send(file=discord.File(CPATH))
await message.channel.send(">>> レコードを更新しました")
cstr = new_crecord
# 'update': demonlistを取得してアップデート
case 'update':
if permi.manage_channels != True:
await message.channel.send(">>> **このコマンドはヘルパー以上の役職を持っていないと使うことはできません**")
return
demonlist_data = client.get_channel(DEMONLIST)
did = demonlist_data.last_message_id
pid = precord_data.last_message_id
demonlist = await demonlist_data.fetch_message(did)
precord = await precord_data.fetch_message(pid)
dbyte_data = await demonlist.attachments[0].read()
pbyte_data = await precord.attachments[0].read()
dstr = dbyte_data.decode()
pstr = pbyte_data.decode()
pstr = update.update_list(dstr, pstr)
print(pstr)
await message.channel.send(">>> Demonlistをアップデートしました")
# 入力は有効なコマンドではない
case _:
await message.channel.send(">>> 有効なコマンドではありません")
pass
# Botの起動とDiscordサーバーへの接続
client.run(TOKEN)
|
def autoplot(server, dataset, parameters, start, stop, **kwargs):
"""Plot data from a HAPI server using Autoplot.
If not found, autoplot.jar is downloaded an launched. If found,
autoplot.jar is updated if server version is newer than cached version.
Example
-------
>>> from hapiclient import autoplot
>>> server = 'http://hapi-server.org/servers/TestData/hapi'
>>> autoplot(server, 'dataset1', 'scalar,vector', '1970-01-01', '1970-01-02')
Autoplot application launches or its canvas is updated.
The options are the same as that for `hapiplot` with the addition of
the kwargs
stack : bool [False] Create a stack plot of parameters.
port : int [8079]
The port number to use to connect to Autoplot.
version : string ['devel']
The version of Autoplot to use. Can be a version string, e.g.,
'v2018a_11', 'devel', 'latest', or 'nightly'. See
<http://autoplot.org/developer#Development_Versions> for a
description of the difference between versions.
"""
import os
import re
import platform
import subprocess
from hapiclient.util import setopts, log, urlopen, download, urlquote
from hapiclient.hapi import cachedir
opts = {
'logging': False,
'cache': True,
'cachedir': cachedir(),
'usecache': False,
'newwindow': False,
'version': 'devel',
'port': 8079
}
# Override defaults
opts = setopts(opts, kwargs)
autoplotserver = "http://localhost:" + str(opts['port']) + "/"
url = server + "?id=" + dataset + "¶meters=" + parameters
url = url + "&timerange=" + start + "/" + stop
serverrunning = False
try:
# See if server needs to be started.
if opts['logging']: log('Trying test. Requesting ' + autoplotserver, opts)
f = urlopen(autoplotserver)
res = f.read().decode('utf-8')
if res.startswith('OK'):
log('Server running.', opts)
serverrunning = True
else:
log('Server responding but with wrong response to test.', opts)
f.close()
except Exception as e:
#print(e)
log('Server not running. Will start server.', opts)
print(url)
if serverrunning:
# Send request to update GUI.
try:
# This won't detect if the version requested matches
# the version running.
rurl = autoplotserver + "?uri=" + urlquote("vap+hapi:"+ url)
if opts['logging']: print("autoplot(): Requesting " + rurl)
log('Autoplot GUI should be updating.', opts)
f = urlopen(rurl)
res = f.read().decode('utf-8')
if res.startswith('OK'):
log('Request successful. Autoplot GUI updated.', opts)
f.close()
return
else:
f.close()
log('Request unsuccessful.', opts)
serverrunning = False
except Exception as e:
print(e)
# Request was sent, so return.
if serverrunning == True: return
if opts['version'] == 'nightly':
jarurl = 'https://ci-pw.physics.uiowa.edu/job/autoplot-release/lastSuccessfulBuild/artifact/autoplot/Autoplot/dist/autoplot.jar'
elif opts['version'] == 'devel':
jarurl = 'http://autoplot.org/jnlp/devel/autoplot.jar'
elif opts['version'].startswith('v'):
jarurl = 'http://autoplot.org/jnlp/'+opts['version']+'/autoplot.jar'
else:
opts['version'] = 'latest'
jarurl = 'http://autoplot.org/jnlp/latest/autoplot.jar'
try:
result = subprocess.check_output('java -version', shell=True, stderr=subprocess.STDOUT)
version = re.sub(r'.*"(.*)".*',r'\1', result.decode().split('\n')[0])
log("Java version: " + version, opts)
except:
log("Java is required. See https://www.java.com/en/download/", opts)
return
jydir = os.path.dirname(os.path.realpath(__file__))
jarpath = os.path.join(opts['cachedir'], 'jar/autoplot-' + opts['version'] + '.jar')
jaricon = os.path.join(jydir, 'autoplot.png')
# Download jar file if needed.
log('Checking if autoplot.jar needs to be downloaded or updated.', opts)
download(jarpath, jarurl, **opts)
com = "java"
if 'darwin' in platform.platform().lower():
com = com + " -Xdock:icon=" + jaricon
com = com + ' -Xdock:name="Autoplot"'
com = com + " -DPORT=" + str(opts['port'])
com = com + " -DHAPI_DATA=" + opts['cachedir']
com = com + " -DhapiServerCache=true"
com = com + " -jar " + jarpath
com = com + " --noAskParams"
com = com + " '" + os.path.join(jydir, 'server.jy?uri=')
com = com + urlquote("vap+hapi:"+ url) + "'"
com = com + " &"
if opts['logging']: log("Executing " + com, opts)
os.system(com)
# TODO: Show console output?
|
# Generated by Django 2.1.5 on 2020-07-24 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0002_service'),
]
operations = [
migrations.AlterField(
model_name='service',
name='ticket',
field=models.CharField(default='6EBOVLWEBD02EPEVZRXQ3TTJO4DNWQ7L2UPQWVSHJIQGYEEKBW', max_length=50),
),
]
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import random
import sys
p = argparse.ArgumentParser()
p.add_argument('-n', type=int, default=100)
p.add_argument('--seed', type=int, default=1234)
args = p.parse_args()
random.seed(args.seed)
sample = []
for index, line in enumerate(sys.stdin):
if index < args.n:
sample.append(line.strip())
else:
r = random.randint(0, index)
if r < args.n:
sample[r] = line.strip()
for line in sample:
print(line)
|
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
from shutil import which
import subprocess
import sys
import time
# Make sure we're using Python3
assert sys.version.startswith('3'), "This script is only meant to work with Python3"
# Make sure to get osrf_pycommon from the vendor folder
vendor_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'vendor'))
sys.path.insert(0, os.path.join(vendor_path, 'osrf_pycommon'))
import osrf_pycommon
# Assert that we got it from the right place
assert osrf_pycommon.__file__.startswith(vendor_path), \
("osrf_pycommon imported from '{0}' which is not in the vendor folder '{1}'"
.format(osrf_pycommon.__file__, vendor_path))
from osrf_pycommon.cli_utils.common import extract_argument_group
from osrf_pycommon.terminal_color import sanitize
from .packaging import build_and_test_and_package
from .util import change_directory
from .util import remove_folder
from .util import force_color
from .util import generated_venv_vars
from .util import info
from .util import log
from .util import UnbufferedIO
# Enforce unbuffered output
sys.stdout = UnbufferedIO(sys.stdout)
sys.stderr = UnbufferedIO(sys.stderr)
pip_dependencies = [
'EmPy',
'coverage',
'catkin_pkg',
'flake8',
'flake8-blind-except==0.1.1',
'flake8-builtins',
'flake8-class-newline',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-docstrings',
'flake8-import-order',
'flake8-quotes',
'importlib-metadata',
'lark-parser',
'mock',
'mypy',
'nose',
'pep8',
'pydocstyle',
'pyflakes',
'pyparsing',
'pytest',
'pytest-cov',
'pytest-mock',
'pytest-repeat',
'pytest-rerunfailures',
'pytest-runner',
'pyyaml',
'vcstool',
]
# https://github.com/pyca/cryptography/issues/5433
pip_cryptography_version = '==3.0'
if sys.platform in ('darwin'):
pip_dependencies += [
f'cryptography{pip_cryptography_version}',
'lxml',
'netifaces'
]
colcon_packages = [
'colcon-core',
'colcon-defaults',
'colcon-library-path',
'colcon-metadata',
'colcon-mixin',
'colcon-output',
'colcon-package-information',
'colcon-package-selection',
'colcon-parallel-executor',
'colcon-powershell',
'colcon-python-setup-py',
'colcon-recursive-crawl',
'colcon-test-result',
'colcon-cmake',
'colcon-ros',
]
if sys.platform != 'win32':
colcon_packages += [
'colcon-bash',
'colcon-zsh',
]
gcov_flags = '--coverage'
colcon_space_defaults = {
'sourcespace': 'src',
'buildspace': 'build',
'installspace': 'install',
}
def main(sysargv=None):
args = get_args(sysargv=sysargv)
blacklisted_package_names = []
if not args.packaging:
build_function = build_and_test
blacklisted_package_names += [
'actionlib_msgs',
'common_interfaces',
'cv_bridge',
'opencv_tests',
'ros1_bridge',
'shape_msgs',
'stereo_msgs',
'vision_opencv',
]
else:
build_function = build_and_test_and_package
if sys.platform in ('darwin', 'win32'):
blacklisted_package_names += [
'pendulum_control',
'ros1_bridge',
'rttest',
'tlsf',
'tlsf_cpp',
]
# There are no Windows debug packages available for PyQt5 and PySide2, so
# python_qt_bindings can't be imported to run or test rqt_graph or
# rqt_py_common.
if sys.platform == 'win32' and args.cmake_build_type == 'Debug':
blacklisted_package_names.append('rqt_graph')
blacklisted_package_names.append('rqt_py_common')
blacklisted_package_names.append('rqt_reconfigure')
# TODO(wjwwood): remove this when a better solution is found, as
# this is just a work around for https://github.com/ros2/build_cop/issues/161
# If on Windows, kill any still running `colcon` processes to avoid
# problems when trying to delete files from pip or the workspace during
# this job.
if sys.platform == 'win32':
os.system('taskkill /f /im colcon.exe')
time.sleep(2) # wait a bit to avoid a race
return run(args, build_function, blacklisted_package_names=blacklisted_package_names)
def get_args(sysargv=None):
parser = argparse.ArgumentParser(
description="Builds the ROS2 repositories as a single batch job")
parser.add_argument(
'--packaging', default=False, action='store_true',
help='create an archive of the install space')
parser.add_argument(
'--repo-file-url', required=True,
help="url of the ros2.repos file to fetch and use for the basis of the batch job")
parser.add_argument(
'--supplemental-repo-file-url', default=None,
help="url of a .repos file to fetch and merge with the ros2.repos file")
parser.add_argument(
'--test-branch', default=None,
help="branch to attempt to checkout before doing batch job")
parser.add_argument(
'--colcon-branch', default=None,
help='Use a specific branch of the colcon repositories, if the branch '
"doesn't exist fall back to the default branch (default: latest "
'release)')
parser.add_argument(
'--white-space-in', nargs='*', default=[],
choices=['sourcespace', 'buildspace', 'installspace', 'workspace'],
help="which folder structures in which white space should be added")
parser.add_argument(
'--do-venv', default=False, action='store_true',
help="create and use a virtual env in the build process")
parser.add_argument(
'--os', default=None, choices=['linux', 'osx', 'windows'])
parser.add_argument(
'--ignore-rmw', nargs='*', default=[],
help='ignore the passed RMW implementations as well as supporting packages')
parser.add_argument(
'--connext-debs', default=False, action='store_true',
help="use Debian packages for Connext instead of binaries off the RTI website (Linux only)")
parser.add_argument(
'--isolated', default=False, action='store_true',
help="build and install each package a separate folders")
parser.add_argument(
'--force-ansi-color', default=False, action='store_true',
help="forces this program to output ansi color")
parser.add_argument(
'--ros-distro', required=True,
help="The ROS distribution being built")
parser.add_argument(
'--ros1-path', default=None,
help="path of ROS 1 workspace to be sourced")
parser.add_argument(
'--mixed-ros-overlay-pkgs', nargs='*', default=[],
help='space separated list of packages to be built in an overlay workspace with ROS 1')
parser.add_argument(
'--colcon-mixin-url', default=None,
help='A mixin index url to be included by colcon')
parser.add_argument(
'--cmake-build-type', default=None,
help='select the CMake build type')
parser.add_argument(
'--build-args', default=None,
help="arguments passed to the 'build' verb")
parser.add_argument(
'--test-args', default=None,
help="arguments passed to the 'test' verb")
parser.add_argument(
'--src-mounted', default=False, action='store_true',
help="src directory is already mounted into the workspace")
parser.add_argument(
'--compile-with-clang', default=False, action='store_true',
help="compile with clang instead of gcc")
parser.add_argument(
'--coverage', default=False, action='store_true',
help="enable collection of coverage statistics")
parser.add_argument(
'--workspace-path', default=None,
help="base path of the workspace")
parser.add_argument(
'--python-interpreter', default=None,
help='pass different Python interpreter')
parser.add_argument(
'--visual-studio-version', default=None, required=(os.name == 'nt'),
help='select the Visual Studio version')
parser.add_argument(
'--source-space', dest='sourcespace',
help='source directory path')
parser.add_argument(
'--build-space', dest='buildspace',
help='build directory path')
parser.add_argument(
'--install-space', dest='installspace',
help='install directory path')
argv = sysargv[1:] if sysargv is not None else sys.argv[1:]
argv, build_args = extract_argument_group(argv, '--build-args')
if '--test-args' in argv:
argv, test_args = extract_argument_group(argv, '--test-args')
else:
build_args, test_args = extract_argument_group(build_args, '--test-args')
args = parser.parse_args(argv)
args.build_args = build_args
args.test_args = test_args
for name in ('sourcespace', 'buildspace', 'installspace'):
space_directory = getattr(args, name)
if name in args.white_space_in and space_directory is not None:
raise Exception('Argument {} and "--white-space-in" cannot both be used'.format(name))
elif space_directory is None:
space_directory = colcon_space_defaults[name]
if name in args.white_space_in:
space_directory += ' space'
setattr(args, name, space_directory)
return args
def process_coverage(args, job):
print('# BEGIN SUBSECTION: coverage analysis')
# Capture all gdca/gcno files (all them inside buildspace)
coverage_file = os.path.join(args.buildspace, 'coverage.info')
cmd = [
'lcov',
'--capture',
'--directory', args.buildspace,
'--output', str(coverage_file)]
print(cmd)
subprocess.run(cmd, check=True)
# Filter out system coverage and test code
cmd = [
'lcov',
'--remove', coverage_file,
'--output', coverage_file,
'/usr/*', # no system files in reports
'/home/rosbuild/*', # remove rti_connext installed in rosbuild
'*/test/*',
'*/tests/*',
'*gtest_vendor*',
'*gmock_vendor*']
print(cmd)
subprocess.run(cmd, check=True)
# Transform results to the cobertura format
outfile = os.path.join(args.buildspace, 'coverage.xml')
print('Writing coverage.xml report at path {}'.format(outfile))
cmd = ['lcov_cobertura', coverage_file, '--output', outfile]
subprocess.run(cmd, check=True)
print('# END SUBSECTION')
return 0
def build_and_test(args, job):
compile_with_clang = args.compile_with_clang and args.os == 'linux'
print('# BEGIN SUBSECTION: build')
cmd = [
args.colcon_script, 'build',
'--base-paths', '"%s"' % args.sourcespace,
'--build-base', '"%s"' % args.buildspace,
'--install-base', '"%s"' % args.installspace,
] + (['--merge-install'] if not args.isolated else []) + \
args.build_args
cmake_args = ['-DBUILD_TESTING=ON', '--no-warn-unused-cli']
if args.cmake_build_type:
cmake_args.append(
'-DCMAKE_BUILD_TYPE=' + args.cmake_build_type)
if compile_with_clang:
cmake_args.extend(
['-DCMAKE_C_COMPILER=/usr/bin/clang', '-DCMAKE_CXX_COMPILER=/usr/bin/clang++'])
if '--cmake-args' in cmd:
index = cmd.index('--cmake-args')
cmd[index + 1:index + 1] = cmake_args
else:
cmd.append('--cmake-args')
cmd.extend(cmake_args)
if args.coverage:
if args.os == 'linux':
ament_cmake_args = [
'-DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} ' + gcov_flags + '"',
'-DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} ' + gcov_flags + '"']
if '--ament-cmake-args' in cmd:
index = cmd.index('--ament-cmake-args')
cmd[index + 1:index + 1] = ament_cmake_args
else:
cmd.append('--ament-cmake-args')
cmd.extend(ament_cmake_args)
ret_build = job.run(cmd, shell=True)
info("colcon build returned: '{0}'".format(ret_build))
print('# END SUBSECTION')
if ret_build:
return ret_build
print('# BEGIN SUBSECTION: test')
test_cmd = [
args.colcon_script, 'test',
'--base-paths', '"%s"' % args.sourcespace,
'--build-base', '"%s"' % args.buildspace,
'--install-base', '"%s"' % args.installspace,
]
if not args.isolated:
test_cmd.append('--merge-install')
if args.coverage:
test_cmd.append('--pytest-with-coverage')
test_cmd.extend(args.test_args)
# In Foxy and prior, xunit2 format is needed to make Jenkins xunit plugin 2.x happy
# After Foxy, we introduced per-package changes to make local builds and CI
# builds act the same.
if args.ros_distro in ('dashing', 'eloquent', 'foxy'):
pytest_args = ['-o', 'junit_family=xunit2']
# We should only have one --pytest-args option, or some options might get ignored
if '--pytest-args' in test_cmd:
pytest_opts_index = test_cmd.index('--pytest-args') + 1
test_cmd = test_cmd[:pytest_opts_index] + pytest_args + test_cmd[pytest_opts_index:]
else:
test_cmd.append('--pytest-args')
test_cmd.extend(pytest_args)
ret_test = job.run(test_cmd, exit_on_error=False, shell=True)
info("colcon test returned: '{0}'".format(ret_test))
print('# END SUBSECTION')
if ret_test:
return ret_test
print('# BEGIN SUBSECTION: test-result --all')
# Collect the test results
ret_test_results = job.run(
[args.colcon_script, 'test-result', '--test-result-base', '"%s"' % args.buildspace, '--all'],
exit_on_error=False, shell=True
)
info("colcon test-result returned: '{0}'".format(ret_test_results))
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: test-result')
# Collect the test results
ret_test_results = job.run(
[args.colcon_script, 'test-result', '--test-result-base', '"%s"' % args.buildspace],
exit_on_error=False, shell=True
)
info("colcon test-result returned: '{0}'".format(ret_test_results))
print('# END SUBSECTION')
if args.coverage and args.os == 'linux':
process_coverage(args, job)
# Uncomment this line to failing tests a failrue of this command.
# return 0 if ret_test == 0 and ret_testr == 0 else 1
return 0
def run(args, build_function, blacklisted_package_names=None):
if blacklisted_package_names is None:
blacklisted_package_names = []
if args.force_ansi_color:
force_color()
info("run_ros2_batch called with args:")
for arg in vars(args):
info(sanitize(" - {0}={1}".format(arg, getattr(args, arg))))
job = None
args.workspace = 'work space' if 'workspace' in args.white_space_in else 'ws'
platform_name = platform.platform().lower()
if args.os == 'linux' or platform_name.startswith('linux'):
args.os = 'linux'
from .linux_batch import LinuxBatchJob
job = LinuxBatchJob(args)
elif args.os == 'osx' or platform_name.startswith('darwin') or platform_name.startswith('macos'):
args.os = 'osx'
from .osx_batch import OSXBatchJob
job = OSXBatchJob(args)
elif args.os == 'windows' or platform_name.startswith('windows'):
args.os = 'windows'
from .windows_batch import WindowsBatchJob
job = WindowsBatchJob(args)
if args.do_venv and args.os == 'windows':
sys.exit("--do-venv is not supported on windows")
# Set the TERM env variable to coerce the output of Make to be colored.
os.environ['TERM'] = os.environ.get('TERM', 'xterm-256color')
if args.os == 'windows':
# Set the ConEmuANSI env variable to trick some programs (vcs) into
# printing ANSI color codes on Windows.
os.environ['ConEmuANSI'] = 'ON'
# Set the appropriate GIT_* env variables in case vcs needs to merge branches
os.environ['GIT_AUTHOR_EMAIL'] = '[email protected]'
os.environ['GIT_AUTHOR_NAME'] = 'nobody'
os.environ['GIT_COMMITTER_EMAIL'] = '[email protected]'
os.environ['GIT_COMMITTER_NAME'] = 'nobody'
info("Using workspace: @!{0}", fargs=(args.workspace,))
# git doesn't work reliably inside qemu, so we're assuming that somebody
# already checked out the code on the host and mounted it in at the right
# place in <workspace>/src, which we don't want to remove here.
if args.src_mounted:
remove_folder(os.path.join(args.workspace, 'build'))
remove_folder(os.path.join(args.workspace, 'install'))
else:
remove_folder(args.workspace)
if not os.path.isdir(args.workspace):
os.makedirs(args.workspace)
# Allow batch job to do OS specific stuff
job.pre()
# ROS_DOMAIN_ID must be unique to each CI machine on a network to avoid crosstalk
if 'ROS_DOMAIN_ID' not in os.environ:
raise KeyError('ROS_DOMAIN_ID environment variable must be set')
# Check the env
job.show_env()
colcon_script = None
# Enter a venv if asked to, the venv must be in a path without spaces
if args.do_venv:
print('# BEGIN SUBSECTION: enter virtualenv')
if args.os != 'linux':
# Do not try this on Linux as elevated privileges are needed.
# The Linux host or Docker image will need to ensure the right
# version of virtualenv is available.
job.run([sys.executable, '-m', 'pip', 'install', '-U', 'virtualenv==16.7.9'])
venv_subfolder = 'venv'
remove_folder(venv_subfolder)
job.run([
sys.executable, '-m', 'virtualenv', '--system-site-packages',
'-p', sys.executable, venv_subfolder])
venv_path = os.path.abspath(os.path.join(os.getcwd(), venv_subfolder))
venv, venv_python = generated_venv_vars(venv_path)
job.push_run(venv) # job.run is now venv
job.push_python(venv_python) # job.python is now venv_python
job.show_env()
print('# END SUBSECTION')
# Now inside of the workspace...
with change_directory(args.workspace):
print('# BEGIN SUBSECTION: install Python packages')
# Update setuptools
job.run(['"%s"' % job.python, '-m', 'pip', 'install', '-U', 'pip', 'setuptools'],
shell=True)
# Print setuptools version
job.run(['"%s"' % job.python, '-c', '"import setuptools; print(setuptools.__version__)"'],
shell=True)
# Print the pip version
job.run(['"%s"' % job.python, '-m', 'pip', '--version'], shell=True)
# Install pip dependencies
pip_packages = list(pip_dependencies)
if sys.platform == 'win32':
# Install fork of pyreadline containing fix for deprecation warnings
# TODO(jacobperron): Until upstream issue is resolved https://github.com/pyreadline/pyreadline/issues/65
pip_packages += ['git+https://github.com/osrf/pyreadline']
if args.cmake_build_type == 'Debug':
if args.ros_distro in ['dashing', 'eloquent']:
pip_packages += [
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cffi-1.12.3-cp37-cp37dm-win_amd64.whl', # required by cryptography
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cryptography-2.7-cp37-cp37dm-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/lxml-archives/lxml-4.3.2-cp37-cp37dm-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/netifaces-archives/netifaces-0.10.9-cp37-cp37dm-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/numpy-archives/numpy-1.16.2-cp37-cp37dm-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/typed-ast-archives/typed_ast-1.4.0-cp37-cp37dm-win_amd64.whl', # required by mypy
]
else:
pip_packages += [
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cffi-1.14.0-cp38-cp38d-win_amd64.whl', # required by cryptography
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cryptography-2.9.2-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/lxml-archives/lxml-4.5.1-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/netifaces-archives/netifaces-0.10.9-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/numpy-archives/numpy-1.18.4-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/typed-ast-archives/typed_ast-1.4.1-cp38-cp38d-win_amd64.whl', # required by mypy
]
else:
pip_packages += [
f'cryptography{pip_cryptography_version}',
'lxml',
'netifaces',
'numpy',
]
if not args.colcon_branch:
pip_packages += colcon_packages
if sys.platform == 'win32':
job.run(
['"%s"' % job.python, '-m', 'pip', 'uninstall', '-y'] +
colcon_packages, shell=True)
# to ensure that the build type specific package is installed
job.run(
['"%s"' % job.python, '-m', 'pip', 'uninstall', '-y'] +
[f'cryptography{pip_cryptography_version}', 'lxml', 'numpy'], shell=True)
pip_cmd = ['"%s"' % job.python, '-m', 'pip', 'install', '-U']
if args.do_venv or sys.platform == 'win32':
# Force reinstall so all dependencies are in virtual environment
# On Windows since we switch between the debug and non-debug
# interpreter all packages need to be reinstalled too
pip_cmd.append('--force-reinstall')
job.run(
pip_cmd + pip_packages,
shell=True)
# OS X can't invoke a file which has a space in the shebang line
# therefore invoking vcs explicitly through Python
if args.do_venv:
vcs_cmd = [
'"%s"' % job.python,
'"%s"' % os.path.join(venv_path, 'bin', 'vcs')]
else:
vcs_cmd = ['vcs']
if args.colcon_branch:
# create .repos file for colcon repositories
os.makedirs('colcon', exist_ok=True)
with open('colcon/colcon.repos', 'w') as h:
h.write('repositories:\n')
for name in colcon_packages:
h.write(' %s:\n' % name)
h.write(' type: git\n')
h.write(
' url: https://github.com/colcon/%s.git\n' % name)
# clone default branches
job.run(
vcs_cmd + [
'import', 'colcon', '--force', '--retry', '5', '--input',
'colcon/colcon.repos'],
shell=True)
# use -b and --track to checkout correctly when file/folder
# with the same name exists
job.run(
vcs_cmd + [
'custom', 'colcon', '--args', 'checkout',
'-b', args.colcon_branch,
'--track', 'origin/' + args.colcon_branch],
exit_on_error=False)
# install colcon packages from local working copies
job.run(
['"%s"' % job.python, '-m', 'pip', 'install', '-U'] +
['colcon/%s' % name for name in colcon_packages],
shell=True)
if sys.platform != 'win32':
colcon_script = os.path.join(venv_path, 'bin', 'colcon')
else:
colcon_script = which('colcon')
args.colcon_script = colcon_script
# Show what pip has
job.run(['"%s"' % job.python, '-m', 'pip', 'freeze', '--all'], shell=True)
print('# END SUBSECTION')
# Fetch colcon mixins
if args.colcon_mixin_url:
true_cmd = 'VER>NUL' if sys.platform == 'win32' else 'true'
job.run([args.colcon_script, 'mixin', 'remove', 'default', '||', true_cmd], shell=True)
job.run([args.colcon_script, 'mixin', 'add', 'default', args.colcon_mixin_url], shell=True)
job.run([args.colcon_script, 'mixin', 'update', 'default'], shell=True)
# Skip git operations on arm because git doesn't work in qemu. Assume
# that somebody has already pulled the code on the host and mounted it
# in.
if not args.src_mounted:
print('# BEGIN SUBSECTION: import repositories')
repos_file_urls = [args.repo_file_url]
if args.supplemental_repo_file_url is not None:
repos_file_urls.append(args.supplemental_repo_file_url)
repos_filenames = []
for index, repos_file_url in enumerate(repos_file_urls):
repos_filename = '{0:02d}-{1}'.format(index, os.path.basename(repos_file_url))
_fetch_repos_file(repos_file_url, repos_filename, job)
repos_filenames.append(repos_filename)
# Use the repository listing and vcstool to fetch repositories
if not os.path.exists(args.sourcespace):
os.makedirs(args.sourcespace)
for filename in repos_filenames:
job.run(vcs_cmd + ['import', '"%s"' % args.sourcespace, '--force', '--retry', '5',
'--input', filename], shell=True)
print('# END SUBSECTION')
if args.test_branch is not None:
print('# BEGIN SUBSECTION: checkout custom branch')
# Store current branch as well-known branch name for later rebasing
info('Attempting to create a well known branch name for all the default branches')
job.run(vcs_cmd + ['custom', '.', '--git', '--args', 'checkout', '-b', '__ci_default'])
# Attempt to switch all the repositories to a given branch
info("Attempting to switch all repositories to the '{0}' branch"
.format(args.test_branch))
# use -b and --track to checkout correctly when file/folder with the same name exists
vcs_custom_cmd = vcs_cmd + [
'custom', '.', '--args', 'checkout',
'-b', args.test_branch, '--track', 'origin/' + args.test_branch]
ret = job.run(vcs_custom_cmd, exit_on_error=False)
info("'{0}' returned exit code '{1}'", fargs=(" ".join(vcs_custom_cmd), ret))
print()
# Attempt to merge the __ci_default branch into the branch.
# This is to ensure that the changes on the branch still work
# when applied to the latest version of the default branch.
info("Attempting to merge all repositories to the '__ci_default' branch")
vcs_custom_cmd = vcs_cmd + ['custom', '.', '--git', '--args', 'merge', '__ci_default']
ret = job.run(vcs_custom_cmd)
info("'{0}' returned exit code '{1}'", fargs=(" ".join(vcs_custom_cmd), ret))
print()
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: repository hashes')
# Show the latest commit log on each repository (includes the commit hash).
job.run(vcs_cmd + ['log', '-l1', '"%s"' % args.sourcespace], shell=True)
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: vcs export --exact')
# Show the output of 'vcs export --exact`
job.run(
vcs_cmd + ['export', '--exact', '"%s"' % args.sourcespace], shell=True,
# if a repo has been rebased against the default branch vcs can't detect the remote
exit_on_error=False)
print('# END SUBSECTION')
# blacklist rmw packages as well as their dependencies where possible
if 'rmw_connext_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_connext_cpp',
'rosidl_typesupport_connext_c',
'rosidl_typesupport_connext_cpp',
]
if 'rmw_connext_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_connext_dynamic_cpp',
]
if 'rmw_connext_cpp' in args.ignore_rmw: # and 'rmw_connext_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'connext_cmake_module',
'rmw_connext_shared_cpp',
]
if 'rmw_cyclonedds_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'cyclonedds',
'cyclonedds_cmake_module',
'rmw_cyclonedds_cpp',
]
if 'rmw_fastrtps_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_fastrtps_cpp',
'rosidl_typesupport_fastrtps_c',
'rosidl_typesupport_fastrtps_cpp',
]
if 'rmw_fastrtps_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_fastrtps_dynamic_cpp',
]
if 'rmw_fastrtps_cpp' in args.ignore_rmw and 'rmw_fastrtps_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'fastcdr',
'fastrtps',
'fastrtps_cmake_module',
'rmw_fastrtps_shared_cpp',
]
if 'rmw_opensplice_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'opensplice_cmake_module',
'rmw_opensplice_cpp',
'rosidl_typesupport_opensplice_c',
'rosidl_typesupport_opensplice_cpp',
]
# Allow the batch job to push custom sourcing onto the run command
job.setup_env()
# create COLCON_IGNORE files in package folders which should not be used
if blacklisted_package_names:
print('# BEGIN SUBSECTION: ignored packages')
print('Trying to ignore the following packages:')
[print('- ' + name) for name in blacklisted_package_names]
output = subprocess.check_output(
[colcon_script, 'list', '--base-paths', args.sourcespace])
for line in output.decode().splitlines():
package_name, package_path, _ = line.split('\t', 2)
if package_name in blacklisted_package_names:
marker_file = os.path.join(package_path, 'COLCON_IGNORE')
print('Create marker file: ' + marker_file)
with open(marker_file, 'w'):
pass
print('# END SUBSECTION')
rc = build_function(args, job)
job.post()
return rc
def _fetch_repos_file(url, filename, job):
"""Use curl to fetch a repos file and display the contents."""
job.run(['curl', '-skL', url, '-o', filename])
log("@{bf}==>@| Contents of `%s`:" % filename)
with open(filename, 'r') as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
|
## @ingroup Methods-Missions-Segments-Common
# Aerodynamics.py
#
# Created: Jul 2014, SUAVE Team
# Modified: Jan 2016, E. Botero
# Jul 2017, E. Botero
# Aug 2021, M. Clarke
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
# ----------------------------------------------------------------------
# Update Altitude
# ----------------------------------------------------------------------
## @ingroup Methods-Missions-Segments-Common
def update_altitude(segment):
""" Updates freestream altitude from inertial position
Assumptions:
N/A
Inputs:
segment.state.conditions:
frames.inertial.position_vector [meters]
Outputs:
segment.state.conditions:
freestream.altitude [meters]
Properties Used:
N/A
"""
altitude = -segment.state.conditions.frames.inertial.position_vector[:,2]
segment.state.conditions.freestream.altitude[:,0] = altitude
# ----------------------------------------------------------------------
# Update Atmosphere
# ----------------------------------------------------------------------
## @ingroup Methods-Missions-Segments-Common
def update_atmosphere(segment):
""" Computes conditions of the atmosphere at given altitudes
Assumptions:
N/A
Inputs:
state.conditions:
freestream.altitude [meters]
segment.analyses.atmoshere [Function]
Outputs:
state.conditions:
freestream.pressure [pascals]
freestream.temperature [kelvin]
freestream.density [kilogram/meter^3]
freestream.speed_of_sound [meter/second]
freestream.dynamic_viscosity [pascals-seconds]
freestream.kinematic_viscosity [meters^2/second]
freestream.thermal_conductivity [Watt/meter-Kelvin]
freestream.prandtl_number [unitless]
Properties Used:
N/A
"""
# unpack
conditions = segment.state.conditions
h = conditions.freestream.altitude
temperature_deviation = segment.temperature_deviation
atmosphere = segment.analyses.atmosphere
# compute
atmo_data = atmosphere.compute_values(h,temperature_deviation)
# pack
conditions.freestream.pressure = atmo_data.pressure
conditions.freestream.temperature = atmo_data.temperature
conditions.freestream.thermal_conductivity = atmo_data.thermal_conductivity
conditions.freestream.density = atmo_data.density
conditions.freestream.speed_of_sound = atmo_data.speed_of_sound
conditions.freestream.dynamic_viscosity = atmo_data.dynamic_viscosity
conditions.freestream.kinematic_viscosity = atmo_data.kinematic_viscosity
conditions.freestream.prandtl_number = atmo_data.prandtl_number
return
# ----------------------------------------------------------------------
# Update Freestream
# ----------------------------------------------------------------------
## @ingroup Methods-Missions-Segments-Common
def update_freestream(segment):
""" Computes freestream values
Assumptions:
N/A
Inputs:
segment.state.conditions:
frames.inertial.velocity_vector [meter/second]
freestream.density [kilogram/meter^3]
freestream.speed_of_sound [meter/second]
freestream.dynamic_viscosity [pascals-seconds]
Outputs:
segment.state.conditions:
freestream.dynamic pressure [pascals]
freestream.mach number [Unitless]
freestream.reynolds number [1/meter]
Properties Used:
N/A
"""
# unpack
conditions = segment.state.conditions
Vvec = conditions.frames.inertial.velocity_vector
rho = conditions.freestream.density
a = conditions.freestream.speed_of_sound
mu = conditions.freestream.dynamic_viscosity
# velocity magnitude
Vmag2 = np.sum( Vvec**2, axis=1)[:,None] # keep 2d column vector
Vmag = np.sqrt(Vmag2)
# dynamic pressure
q = 0.5 * rho * Vmag2 # Pa
# Mach number
M = Vmag / a
# Reynolds number
Re = rho * Vmag / mu # per m
# pack
conditions.freestream.velocity = Vmag
conditions.freestream.mach_number = M
conditions.freestream.reynolds_number = Re
conditions.freestream.dynamic_pressure = q
return
# ----------------------------------------------------------------------
# Update Aerodynamics
# ----------------------------------------------------------------------
## @ingroup Methods-Missions-Segments-Common
def update_aerodynamics(segment):
""" Gets aerodynamics conditions
Assumptions:
+X out nose
+Y out starboard wing
+Z down
Inputs:
segment.analyses.aerodynamics_model [Function]
aerodynamics_model.settings.maximum_lift_coefficient [unitless]
aerodynamics_model.geometry.reference_area [meter^2]
segment.state.conditions.freestream.dynamic_pressure [pascals]
Outputs:
conditions.aerodynamics.lift_coefficient [unitless]
conditions.aerodynamics.drag_coefficient [unitless]
conditions.frames.wind.lift_force_vector [newtons]
conditions.frames.wind.drag_force_vector [newtons]
Properties Used:
N/A
"""
# unpack
conditions = segment.state.conditions
aerodynamics_model = segment.analyses.aerodynamics
q = segment.state.conditions.freestream.dynamic_pressure
Sref = aerodynamics_model.geometry.reference_area
CLmax = aerodynamics_model.settings.maximum_lift_coefficient
# call aerodynamics model
results = aerodynamics_model( segment.state )
# unpack results
CL = results.lift.total
CD = results.drag.total
CL[q<=0.0] = 0.0
CD[q<=0.0] = 0.0
# CL limit
CL[CL>CLmax] = CLmax
CL[CL< -CLmax] = -CLmax
# dimensionalize
L = segment.state.ones_row(3) * 0.0
D = segment.state.ones_row(3) * 0.0
L[:,2] = ( -CL * q * Sref )[:,0]
D[:,0] = ( -CD * q * Sref )[:,0]
results.lift_force_vector = L
results.drag_force_vector = D
# pack conditions
conditions.aerodynamics.lift_coefficient = CL
conditions.aerodynamics.drag_coefficient = CD
conditions.frames.wind.lift_force_vector[:,:] = L[:,:] # z-axis
conditions.frames.wind.drag_force_vector[:,:] = D[:,:] # x-axis
# ----------------------------------------------------------------------
# Update Stability
# ----------------------------------------------------------------------
## @ingroup Methods-Missions-Segments-Common
def update_stability(segment):
""" Initiates the stability model
Assumptions:
N/A
Inputs:
segment.state.conditions [Data]
segment.analyses.stability [function]
Outputs:
N/A
Properties Used:
N/A
"""
# unpack
conditions = segment.state.conditions
stability_model = segment.analyses.stability
# call aerodynamics model
if stability_model:
results = stability_model( segment.state.conditions )
conditions.stability.update(results)
return
|
# 2020.08.30
# maybe won't do leetcode tomorrow
# Problem Statement:
# https://leetcode.com/problems/unique-paths-ii/
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
m = len(obstacleGrid[0])
n = len(obstacleGrid)
# check corner cases and do early return
if m == 1 or n == 1:
s = 0
for i in range(0, n):
for j in range(0, m):
if obstacleGrid[i][j] == 1:
return 0
return 1
# initialize the answer
answer = [[0 for i in range(m)] for j in range(n)]
# do it line by line
for i in range(0, n):
for j in range(0, m):
# can not reach the obstacle, therefore 0 is filled in
if obstacleGrid[i][j] == 1:
answer[i][j] = 0
# side values, depend on their neighbour
elif (i == 0 and j == 0) or \
(i == 0 and j>=1 and answer[0][j-1] != 0) or \
(j == 0 and i>=1 and answer[i-1][j] != 0):
answer[i][j] = 1
else:
answer[i][j] = answer[i][j-1] + answer[i-1][j]
return answer[n-1][m-1] |
import requests
from selenium import webdriver
driver = webdriver.Chrome()
import time
from bs4 import BeautifulSoup
counter = 0
total = 11
url = "https://www.soccerstats.com/homeaway.asp?league=england3"
data = requests.get(url,time.sleep(2))
soup = BeautifulSoup(data.content)
div = soup.find("div", id="h2h-team1")
table = div.find("table", id="btable")
f = open("england3/homeRows.txt", "w")
print(table)
counter +=1
print(str(counter) + " out of " + str(total))
url = "https://www.soccerstats.com/homeaway.asp?league=england3"
data = requests.get(url,time.sleep(2))
soup = BeautifulSoup(data.content)
div = soup.find("div", id="h2h-team2")
table = div.find("table", id="btable")
print(table)
print("Away Rows Complete")
counter +=1
print("Good To Go")
|
class ColumnOrder:
# Enforce column order for relational table outputs
FORM_SPECIFICATIONS = [
'custom_form_id',
'custom_form_organizations',
'custom_form_class',
'custom_form_type_of_form',
'custom_form_name',
'custom_form_description',
'custom_form_header',
#
# Excluding these for now for readability:
#
# 'custom_form_updated_at',
'custom_form_created_at',
# 'custom_form_custom_form',
# 'custom_form_workflows',
'custom_form_active',
'question_id',
'question_label',
'question_text',
'question_field_type',
'question_formik_key',
'question_value',
#
# Excluding these for now for readability:
#
# 'question_active',
# 'question_number_questions_to_repeat',
# 'question_side_label',
# 'question_validation',
'answer_id',
'answer_label',
'answer_value',
'answer_text',
'answer_text_key',
'answer_text_question'
]
FORM_RESULTS = [
'form_result_id',
'form_result_surveying_organization',
'form_result_surveying_user',
'custom_form_id',
'form_result_response_id',
'question_title',
'question_answer',
'question_type',
#
# Excluding these for now for readability:
#
# 'custom_form_name',
# 'custom_form_description',
# 'form_result_created_at',
# 'form_result_updated_at',
'form_result_p_client',
# 'form_result_p_parse_user',
]
class ColumnReplace:
FORM_RESULTS = {
'id': 'form_result_id',
'form_specifications_id': 'form_result_custom_form_id',
'fields': 'form_result_fields',
'p_client': 'form_result_p_client',
'p_parse_user': 'form_result_p_parse_user',
'created_at': 'form_result_created_at',
'updated_at': 'form_result_updated_at',
'description': 'form_result_question_description',
'organizations': 'custom_form_fields',
'surveying_organization': 'form_result_surveying_organization',
'surveying_user': 'form_result_surveying_user',
'title': 'custom_form_header'
}
class Outputs:
JSON = 'json'
PICKLE_DICT = 'pickle_dict'
PICKLE_DATAFRAME = 'pickle_df'
PICKLE_LIST = 'pickle_list'
class PuenteTables:
ROLE = 'Role'
SESSION = 'Session'
USER = 'User'
ALLERGIES = 'Allergies'
ASSETS = 'Assets'
EVALUATION_MEDICAL = 'EvaluationMedical'
FORM_ASSET_RESULTS = 'FormAssetResults'
FORM_RESULTS = 'FormResults'
FORM_SPECIFICATIONS = 'FormSpecificationsV2'
HISTORY_ENVIRONMENTAL_HEALTH = 'HistoryEnvironmentalHealth'
HISTORY_MEDICAL = 'HistoryMedical'
HOUSEHOLD = 'Household'
SURVEY_DATA = 'SurveyData'
VITALS = 'Vitals'
class SurveyDataColumns:
columns = [
'_id',
'memberofthefollowingorganizations',
'familyhistory',
'otherOrganizationsYouKnow',
'location',
'fname',
'lname',
'dob',
'sex',
'marriageStatus',
'numberofIndividualsLivingintheHouse',
'numberofChildrenLivingintheHouse',
'numberofChildrenLivinginHouseUndertheAgeof5',
'occupation',
'educationLevel',
'telephoneNumber',
'yearsLivedinthecommunity',
'waterAccess',
'trashDisposalLocation',
'dayMostConvenient',
'hourMostConvenient',
'latitude',
'longitude',
'surveyingUser',
'surveyingOrganization',
'_created_at',
'_updated_at',
'communityname',
'yearsLivedinThisHouse',
'medicalIllnesses2',
'whenDiagnosed2',
'whatDoctorDoyousee2',
'didDoctorRecommend2',
'treatment2',
'DentalAssessmentandEvaluation',
'cedulaNumber',
'nickname',
'familyRelationships',
'city',
'province',
'insuranceNumber',
'insuranceProvider',
'clinicProvider',
'relationship_id',
'relationship',
'picture',
'signature',
'householdId',
'altitude',
'age',
'subcounty',
'region',
'country',
'appVersion',
'_p_parseUser',
'phoneOS'
]
|
from django.db import models
from django.utils import timezone
from django.conf import settings
# Create your models here.
class Tag(models.Model):
""" Represents the Tag model """
title = models.CharField(max_length=200)
def __str__(self):
return self.title
def __repr__(self):
return self.title
class Category(models.Model):
""" Represents the Category model """
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.title
def __repr__(self):
return self.title
class Post(models.Model):
""" Represents the Post model """
title = models.CharField(max_length=200)
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
text = models.TextField()
category = models.ForeignKey('blog.Category', on_delete=models.CASCADE)
def __str__(self):
return self.title
def __repr__(self):
return self.title
def publish(self):
self.published_date = timezone.now()
self.save()
class Comment(models.Model):
""" Represents the Post's Comment model """
post = models.ForeignKey('blog.Post', on_delete=models.CASCADE, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateField(default=timezone.now)
approved = models.BooleanField(default=False)
def __str__(self):
return self.text
def __repr__(self):
return self.text
def approve(self):
self.approved = True
self.save()
|
import json
import boto3
client = None
def handler(event, context):
print('Received event:', json.dumps(event))
global client
if not client:
client = boto3.client('cognito-idp')
token = event['headers']['Auth']
res = client.get_user(AccessToken=token)
username = res['Username']
return generateAllow(username, event['methodArn'])
def generatePolicy(principalId, effect, resource):
authResponse = {}
authResponse['principalId'] = principalId
if effect and resource:
policyDocument = {}
policyDocument['Version'] = '2012-10-17'
policyDocument['Statement'] = []
statementOne = {}
statementOne['Action'] = 'execute-api:Invoke'
statementOne['Effect'] = effect
statementOne['Resource'] = resource
policyDocument['Statement'].append(statementOne)
authResponse['policyDocument'] = policyDocument
return authResponse
def generateAllow(principalId, resource):
return generatePolicy(principalId, 'Allow', resource)
def generateDeny(principalId, resource):
return generatePolicy(principalId, 'Deny', resource) |
from django.core.management.base import BaseCommand
from channels import Channel
import irc.bot
import irc.client
import irc.connection
import irc.buffer
import socket
import ssl
import logging
logging.getLogger('irc.client').setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from webirc.asgi import channel_layer
from webirc.models import IRCServer
def create_message(c, e):
pass
class DualStackFactory(irc.connection.Factory):
def connect(self, server_address):
logger.debug(f'server_address: {server_address}')
addrinfo = socket.getaddrinfo(*server_address, type=socket.SOCK_STREAM)
if len(addrinfo) == 0:
raise Exception(f"No addresses found for {server_address[0]}:{server_address[1]}")
addrinfo = addrinfo[0]
logger.debug(f'addrinfo: {addrinfo}')
sock = self.wrapper(socket.socket(*addrinfo[:3]))
self.bind_address and sock.bind(self.bind_address)
sock.connect(server_address)
return sock
__call__ = connect
class IRCClient(irc.bot.SingleServerIRCBot):
buffer_class = irc.buffer.LenientDecodingLineBuffer
@classmethod
def from_webirc_server(cls, server):
server_spec = irc.bot.ServerSpec(server.hostname, server.port, server.password)
connect_factory = DualStackFactory(wrapper=ssl.wrap_socket if server.ssl else lambda x: x)
return cls([server_spec], server.nickname, server.realname, server_id=server.id, connect_factory=connect_factory)
def __init__(self, *args, server_id=None, **kwargs):
super().__init__(*args, **kwargs)
self.server_id = server_id
self.connection.add_global_handler('all_events', self.on_all_events, -20)
forwarded_events = [
'pubmsg',
'privmsg',
'pubnotice',
'privnotice',
'join',
'part',
'quit',
'kick',
'nick',
'currenttopic'
'topicinfo',
'mode',
'action',
]
logger_blacklist = [
'all_raw_messages',
'pong',
'whospcrpl', 'endofwho',
'namreply', 'endofnames',
'motd', 'endofmotd',
]
def on_all_events(self, c, e):
if e.type not in self.logger_blacklist:
logger.info(f'event(type={e.type}, source={e.source}, target={e.target}, args={e.arguments}')
if not e.type in self.forwarded_events:
return
# if e.type == 'all_raw_messages':
# # logger.debug(e)
# return
self.send_event(
type=e.type,
source=e.source,
target=e.target,
arguments=e.arguments,
tags=e.tags
)
def on_join(self, c, e):
channel = e.target
self.broadcast_names(channel)
def on_kick(self, c, e):
channel = e.target
self.broadcast_names(channel)
def on_part(self, c, e):
channel = e.target
self.broadcast_names(channel)
def on_nick(self, c, e):
pass
def on_endofnames(self, c, e):
channel = e.arguments[0]
self.broadcast_names(channel)
def on_endofwho(self, c, e):
channel = e.arguments[0]
self.broadcast_names(channel)
def broadcast_names(self, channel):
names = list(self.channels[channel].users())
self.send_event(
type='webirc_names',
arguments=[channel, names]
)
def run(self):
logger.debug('run')
while True:
try:
logger.debug('connect')
self._connect()
self.connection.set_keepalive(30)
# print(vars(self))
# print(vars(self.connection))
while True:
channel, raw_message = channel_layer.receive([f'irc.send.{self.server_id}'])
if channel:
logger.info('channel_layer received {} {}'.format(channel, raw_message))
if raw_message['type'] == 'privmsg':
target = raw_message['target']
text = raw_message['text']
self.connection.privmsg(target, text)
# send message back to client
self.send_event(
type='privmsg',
source=self.connection.get_nickname(),
target=target,
arguments=[text]
)
elif raw_message['type'] == 'names':
print('names')
self.connection.names([raw_message['channel']])
self.reactor.process_once(0.2)
except irc.client.ServerNotConnectedError:
logger.warning('irc.client.ServerNotConnectedError')
continue
except KeyboardInterrupt:
logger.info('Received SIGINT, bye bye.')
break
finally:
self.disconnect()
def send_event(self, *, type,
source=None, target=None, arguments=None, tags=None):
import asgiref.base_layer
try:
Channel('irc.receive').send({
'server_id': self.server_id,
'event': {
'type': type,
'source': source,
'target': target,
'arguments': arguments,
'tags': tags,
}
})
except asgiref.base_layer.BaseChannelLayer.ChannelFull:
logger.exception('ChannelFull!')
import sys
sys.exit(1)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('server_id')
def handle(self, *args, server_id=None, **options):
irc_server = IRCServer.objects.get(id=server_id)
client = IRCClient.from_webirc_server(irc_server)
client.run() |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def flip(self, node):
if not node:
return None
hold_node = node.left
node.left = node.right
node.right = hold_node
self.flip(node.left)
self.flip(node.right)
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.flip(root)
return root
|
#!/usr/bin/env python3
import os
import subprocess
import time
import json
configuration = {}
def executeTest(cluster, url, payload, rate, connections, duration, id, max_retries):
failures = 0
for tries in range(int(max_retries)):
process = subprocess.run(['./autobench', '-cluster', cluster, '-url', url, '-payload', payload, '-rate', rate, '-connections', connections, '-duration', duration, '-out', '{}_{}_{}_{}_{}_{}.txt'.format(id, cluster, payload, rate, connections, duration)])
if process.returncode != 0:
tries += 1
print("{}: {} failed {} / {}".format(id, cluster, str(tries), str(max_retries)))
failures += 1
time.sleep(int(configuration['waitBetweenTests']))
else:
break
with open("test_failures.txt", "a") as myfile:
myfile.write("{}_{}_{}_{}_{}: {}\n".format(id, cluster, payload, rate, connections, str(failures)))
def runTest(rate, connections, payload, duration, id, clusters, max_retries = "3"):
for cluster in clusters:
print("Testing {} with {} bytes payload, {} rate and {} connections for {} on {}"
.format(id, payload, rate, connections, duration, cluster))
executeTest(cluster, next((c['url'] for c in configuration['clusters'] if c['name'] == cluster), None), payload, rate, connections, duration, id, max_retries)
time.sleep(30)
def main():
os.chdir(os.path.expanduser("~") + "/test")
global configuration
with open('config.json') as json_data:
config = json.load(json_data)
configuration = config['configuration']
print("Following Tests will be executed:")
for t in config['tests']:
print("ID: {}, Clusters: [{}], Rate: {} msg/s, Payload: {} bytes, Connections: {}, Duration: {}, Max Retries: {}"
.format(t['id'], ', '.join(t['clusters']), t['rate'], t['payload'], t['connections'], t['duration'], t['max_retries']))
print('\n')
for t in config['tests']:
runTest(t['rate'], t['connections'], t['payload'], t['duration'], t['id'], t['clusters'])
if __name__== "__main__":
main()
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import *
# Create database and create a shortcut
engine = create_engine('postgresql://catalog:password@localhost/catalog')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create user
User1 = User(name="Youssef", email="[email protected]")
session.add(User1)
session.commit()
# Create category of Soccer
category1 = Categories(user_id=1, name="Soccer")
session.add(category1)
session.commit()
# Create Items For Soccer
SoccerItem1 = CategoryItem(name="Goal Keeping Gloves", user_id=1,
description="Gloves provides a better grip",
categories=category1)
session.add(SoccerItem1)
session.commit()
SoccerItem2 = CategoryItem(name="Soccer cleats", user_id=1,
description="Soccer Cleats are made of rubber",
categories=category1)
session.add(SoccerItem2)
session.commit()
SoccerItem3 = CategoryItem(name="Shin Guards", user_id=1,
description="Provides protection to player shins",
categories=category1)
session.add(SoccerItem3)
session.commit()
SoccerItem4 = CategoryItem(name="Socks", user_id=1,
description="Socks are usually knee length",
categories=category1)
session.add(SoccerItem4)
session.commit()
SoccerItem5 = CategoryItem(name="Shorts", user_id=1,
description="Soccer shorts are above the knee",
categories=category1)
session.add(SoccerItem5)
session.commit()
# Create category of Basketball
category2 = Categories(user_id=1, name="Basketball")
session.add(category2)
session.commit()
# Create Items For Basketball
BasketballItem1 = CategoryItem(name="Basketball Bat", user_id=1,
description="The Basketball Arbitral Tribunal"
"(BAT) is an independent body, officially"
"recognised by FIBA and outlined by the FIBA"
"General Statutes, providing services for"
"the rapid and simple resolution of disputes"
"between players, agents, coaches and clubs"
"through arbitration. It was established in"
"2006 (as the FIBA Arbitral Tribunal).",
categories=category2)
session.add(BasketballItem1)
session.commit()
# Create category of Baseball
category3 = Categories(user_id=1, name="Baseball")
session.add(category3)
session.commit()
# Create Items For Baseball
BaseballItem1 = CategoryItem(name="Baseball Bat", user_id=1,
description="baseball to hit the ball after it is"
"thrown by the pitcher. By regulation it may be "
"no more than 2.75 inches (70 mm) in diameter at"
"the thickest part and no more than 42 inches"
"(1,100 mm) long.", categories=category3)
session.add(BaseballItem1)
session.commit()
# Create category of Fribee
category4 = Categories(user_id=1, name="Fribee")
session.add(category4)
session.commit()
# Create Items For Fribee
FribeeItem1 = CategoryItem(name="Fribee", user_id=1,
description="a game played on a rectangular field"
"between two seven-player teams in which a plastic"
"disc is advanced by being thrown from player to"
"player and in which a team scores by catching"
"a throw in the opponent's end zone",
categories=category4)
session.add(FribeeItem1)
session.commit()
# Create category of Snowboarding
category5 = Categories(user_id=1, name="Snowboarding")
session.add(category5)
session.commit()
# Create Items For Snowboarding
SnowboardingItem1 = CategoryItem(name="Snowboarding", user_id=1,
description="Snowboarding is a recreational"
"activity and Olympic and Paralympic sport"
"that involves descending a snow-covered"
"slope while standing on a snowboard attached"
"to a rider's feet.", categories=category5)
session.add(SnowboardingItem1)
session.commit()
# Create category Rock Climbing
category6 = Categories(user_id=1, name="Rock Climbing")
session.add(category6)
session.commit()
# Create Items For Climbing
ClimbingItem1 = CategoryItem(name="Climbing Mountains", user_id=1,
description="Mountain climbing (or mountaineer"
"ing)"
"is a hobby where people climb mountains.[2] It "
"may involve hiking, , rock climbing, as well as"
"crossing glaciers. Someone who does mountain"
"climbing is called a mountain climber.",
categories=category6)
session.add(ClimbingItem1)
session.commit()
# Create category of Foosball
category7 = Categories(user_id=1, name="Foosball")
session.add(category7)
session.commit()
# Create Items For Foosball
FoosballItem1 = CategoryItem(name="Foosball", user_id=1,
description="Table football or table soccer,"
"foosball in North America, is a table-top game"
"that is loosely based on football.[1] The aim of"
"the game is to use the control knobs to move the"
"ball into the opponent’s goal. There are no"
"unified rules for playing the game, in the sense"
"that rules vary in different countries and even"
" in"
"cities, and sometimes between different clubs in"
"the same city.", categories=category7)
session.add(FoosballItem1)
session.commit()
# Create category of Skating
category8 = Categories(user_id=1, name="Skating")
session.add(category8)
session.commit()
# Create Items For Skating
SkatingItem1 = CategoryItem(name="Shorts", user_id=1,
description="Skating involves any sports or"
"recreational activity which consists of traveling"
"on surfaces or on ice using skates.There are"
"several different kinds of skating",
categories=category8)
session.add(SkatingItem1)
session.commit()
# Create category of Hockey
category9 = Categories(user_id=1, name="Hockey")
session.add(category9)
session.commit()
# Create Items For Hockey
HockeyItem1 = CategoryItem(name="Hockey", user_id=1,
description="Ice Hockey is a contact team sport"
"played on ice, usually in a rink, in which two"
"teams of skaters use their sticks to shoot a"
"vulcanized rubber puck into their opponent's"
"net to score points", categories=category9)
session.add(HockeyItem1)
session.commit()
print ("Done!")
|
a = 1
j = "Less than ten" if a < 10 else "More than ten"
# j = "Less than ten"
|
import mock
from cStringIO import StringIO
from tornado import httpclient
from viewfinder.backend.base import testing
kURL = "http://www.example.com/"
class MockAsyncHTTPClientTestCase(testing.BaseTestCase):
def setUp(self):
super(MockAsyncHTTPClientTestCase, self).setUp()
self.http_client = testing.MockAsyncHTTPClient()
def test_unmapped(self):
"""Requests not on the whitelist raise an error."""
with self.assertRaises(ValueError):
self.http_client.fetch(kURL, self.stop)
def test_string(self):
"""Map a url to a constant string."""
self.http_client.map(kURL, "hello world")
self.http_client.fetch(kURL, self.stop)
response = self.wait()
self.assertEqual(response.body, "hello world")
def test_callable(self):
"""Map a url to a function returning a string."""
self.http_client.map(kURL, lambda request: "hello world")
self.http_client.fetch(kURL, self.stop)
response = self.wait()
self.assertEqual(response.body, "hello world")
def test_response(self):
"""Map a url to a function returning an HTTPResponse.
HTTPResponse's constructor requires a request object, so there is no
fourth variant that returns a constant HTTPResponse.
"""
self.http_client.map(kURL, lambda request: httpclient.HTTPResponse(
request, 404, buffer=StringIO("")))
self.http_client.fetch(kURL, self.stop)
response = self.wait()
self.assertEqual(response.code, 404)
def test_with_patch(self):
"""Replace the AsyncHTTPClient class using mock.patch."""
self.http_client.map(kURL, "hello world")
with mock.patch('tornado.httpclient.AsyncHTTPClient', self.http_client):
real_client = httpclient.AsyncHTTPClient()
self.assertIs(self.http_client, real_client)
real_client.fetch(kURL, self.stop)
response = self.wait()
self.assertEqual(response.body, "hello world")
|
from __future__ import annotations
from model.image.entity.subtag_condition import SubtagCondition
class VirtualTag:
def __init__(self, name: str) -> None:
self.name = name
self.subtags: list[VirtualTag.Subtag] = []
def add_subtag(self, subtag_name: str, subtag_condition: SubtagCondition) -> VirtualTag:
self.subtags.append(VirtualTag.Subtag(subtag_name, subtag_condition))
return self
class Subtag:
def __init__(self, name: str, condition: SubtagCondition) -> None:
self.name = name
self.condition = condition
|
# Generated by Django 2.2.4 on 2020-07-14 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commerce', '0028_auto_20200710_1214'),
]
operations = [
migrations.AddField(
model_name='order',
name='reminder_sent',
field=models.DateTimeField(blank=True, default=None, null=True, verbose_name='reminder sent'),
),
]
|
'''run.py - Joshua Wallace - Mar 2019
This is an example for how to run the simple_deblend code.
'''
import numpy as np
import sys, os
sys.path.insert(1,os.path.abspath('../src'))
import data_processing as dproc # one of the simple_deblend codes
def get_input_light_curves(list_of_ids,list_of_times,list_of_mags,list_of_errs):
'''
This can be modified as desired to allow for arbitrary input,
as long as the output format matches what is here.
'''
return_dict = {}
for i in range(len(list_of_ids)):
return_dict[list_of_ids[i]] = (list_of_times[i],list_of_mags[i],list_of_errs[i])
# return format: a dictionary of form {ID:(t,lc,err)}
return return_dict
def get_xy(list_of_ids,list_of_x,list_of_y):
'''
This can be modified as desired to allow for arbitrary input,
as long as the output format matches what is here.
'''
return_dict = {}
for i in range(len(list_of_ids)):
return_dict[list_of_ids[i]] = (list_of_x[i],list_of_y[i])
# return format: a ditionary of form {ID:(x,y)}
return return_dict
def sample_threshold(period):
if period < 10.:
return 12.
else:
return 13.
def main():
# Radius to consider things neighbors
neighbor_radius = 10
# Number of master processes
n_control_workers = 1
# Minimum and maximum period to search, in days
min_p = 1.
max_p = 30.
# Automatically determine frequency search step size
autofreq = True
# These are set to None because autofreq is set to True
stepsize_ls = None
stepsize_pdm = None
stepsize_bls = None
nphasebins_bls = None
# Various parameters for the median filtering
freq_window_epsilon = 4.
median_filter_window_ls = 80
median_filter_window_pdm = 80
median_filter_window_bls = 200
# Minimum and maximum transit duration for BLS search
min_transit_duration = 0.01
max_transit_duration = 0.5
##################################################################
# Now generate sample (fake) light curves
IDs = ['A','B','C','D']
n = 1002
sigma = 0.1
rand1 = np.random.RandomState(seed=1)
times = [np.linspace(0,90,n),np.linspace(0,90,n),
np.linspace(0,90,n),np.linspace(0,90,n)]
mags = [ np.sin(times[0]) + 10. + sigma*rand1.randn(n),
0.1*np.sin(times[1]) + 10. + sigma*rand1.randn(n),
np.sin(times[2]/2.227349239) + 10. + sigma*rand1.randn(n),
np.sin(times[3]) + 10. + sigma*rand1.randn(n)]
errs = [[sigma]*n,[sigma]*n,[sigma]*n,[sigma]*n]
x = [0,1,1,100]
y = [0,0,1,100]
# Get the light curves
lcs = get_input_light_curves(IDs,times,mags,errs)
# Get xy positions
xy = get_xy(IDs,x,y)
##################################################################
# Initialize the object to be ran
col = dproc.lc_collection_for_processing(neighbor_radius,
n_control_workers=n_control_workers)
# Add objects
for ID in lcs.keys():
lc = lcs[ID]
this_xy = xy[ID]
col.add_object(lc[0],lc[1],lc[2],this_xy[0],this_xy[1],ID)
# Run Lomb-Scargle
col.run_ls(startp=min_p,endp=max_p,autofreq=autofreq,
stepsize=stepsize_ls,
sigclip=np.inf,verbose=False,medianfilter=True,
freq_window_epsilon_mf=freq_window_epsilon,
freq_window_epsilon_snr=freq_window_epsilon,
median_filter_size=median_filter_window_ls,
snr_filter_size=median_filter_window_ls,
snr_threshold=sample_threshold)
# Run Phase Dispersion Minimization
col.run_pdm(startp=min_p,endp=max_p,autofreq=autofreq,
stepsize=stepsize_pdm,
sigclip=np.inf,verbose=False,medianfilter=True,
freq_window_epsilon_mf=freq_window_epsilon,
freq_window_epsilon_snr=freq_window_epsilon,
median_filter_size=median_filter_window_pdm,
snr_filter_size=median_filter_window_pdm,
snr_threshold=sample_threshold)
# Run Box-fitting Least Squares
col.run_bls(startp=min_p,endp=max_p,autofreq=autofreq,
stepsize=stepsize_bls,
nphasebins=nphasebins_bls,
mintransitduration=min_transit_duration,
maxtransitduration=max_transit_duration,
sigclip=np.inf,medianfilter=True,
freq_window_epsilon_mf=freq_window_epsilon,
freq_window_epsilon_snr=freq_window_epsilon,
median_filter_size=median_filter_window_bls,
snr_filter_size=median_filter_window_bls,
snr_threshold=sample_threshold)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import argparse
import datetime
import sys
# XXX we're using more than one date format (one for arg, one for parsing), name is confusing
# this one is the date format used in the standard text file
DATEFMT = "%Y%m%dT%H%M%S"
def main():
cfg = _parse_args()
if cfg.date_min:
dt_min = cfg.date_min
else:
dt_min = datetime.datetime.now() - datetime.timedelta(hours=24)
if cfg.date_max:
dt_max = cfg.date_max
else:
dt_max = datetime.datetime.now()
_extract_datapoints(sys.stdin, sys.stdout, dt_min, dt_max)
def _parse_args():
parser = argparse.ArgumentParser()
# TODO actually implement UTC support kek, or remove it everywhere
parser.add_argument("-u", "--utc", action="store_true", help="format time in UTC")
parser.add_argument("--date-min", type=_cli_date, help="in format YYYYMMDDThhmm")
parser.add_argument("--date-max", type=_cli_date, help="in format YYYYMMDDThhmm")
return parser.parse_args()
def _cli_date(value):
return datetime.datetime.strptime(value, "%Y%m%dT%H%M")
def _extract_datapoints(fobj_in, fobj_out, dt_min, dt_max):
textual_min = dt_min.strftime(DATEFMT)
textual_max = dt_max.strftime(DATEFMT)
l = len(textual_min)
# find first line matching dt_min
for line in fobj_in:
# ignore invalid lines, by checking if the first 8 characters are all digits
if not line[:8].isdigit():
continue
if line[:l] >= textual_min:
break
else:
# read the whole file without finding any date earlier than dt_min
return
fobj_out.write(line)
# find last line matching dt_max
for line in fobj_in:
# ignore invalid lines, by checking if the first 8 characters are all digits
if not line[:8].isdigit():
continue
if line[:l] < textual_max:
fobj_out.write(line)
else:
break
if __name__ == "__main__":
main()
|
from django.test import TestCase
from django.contrib.auth.models import User
from dwitter.models import Dweet
from dwitter.models import Comment
from django.utils import timezone
from datetime import timedelta
class DweetTestCase(TestCase):
def setUp(self):
user1 = User.objects.create(id=1, username="user1", password="")
user2 = User.objects.create(id=2, username="user2", password="")
now = timezone.now()
dweet1 = Dweet.objects.create(id=1,
code="dweet1 code",
posted=now - timedelta(minutes=1),
author=user1)
dweet2 = Dweet.objects.create(id=2,
code="dweet2 code",
posted=now,
reply_to=dweet1,
author=user2)
Comment.objects.create(id=1,
text="comment1 text",
posted=now - timedelta(minutes=1),
reply_to=dweet2,
author=user1)
Comment.objects.create(id=2,
text="comment2 text",
posted=now,
reply_to=dweet1,
author=user2)
def test_comment_renders_to_string_correctly(self):
self.assertEqual(Comment.objects.get(id=1).__str__(),
"c/1 (user1) to d/2 (user2)")
self.assertEqual(Comment.objects.get(id=2).__str__(),
"c/2 (user2) to d/1 (user1)")
def test_comment_reply_to_do_nothing_on_soft_delete(self):
Dweet.objects.get(id=2).delete()
self.assertTrue(Comment.objects.get(id=1).reply_to.deleted)
self.assertEqual(Comment.objects.get(id=2).reply_to,
Dweet.objects.get(id=1))
def test_comment_author_cascade_on_delete(self):
User.objects.get(username="user1").delete()
with self.assertRaises(Comment.DoesNotExist):
Comment.objects.get(id=1)
self.assertEqual(Comment.objects.get(id=2).author,
User.objects.get(id=2))
|
#
# Copyright (C) 2013, 2014 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
"""anyconfig globals.
"""
import logging
import os
AUTHOR = 'Satoru SATOH <[email protected]>'
VERSION = "0.0.5"
_LOGGING_FORMAT = "%(asctime)s %(name)s: [%(levelname)s] %(message)s"
def get_logger(name="anyconfig", log_format=_LOGGING_FORMAT,
level=logging.WARNING):
"""
Initialize custom logger.
"""
if os.environ.get("ANYCONFIG_DEBUG", False):
level = logging.DEBUG
logging.basicConfig(level=level, format=log_format)
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(handler)
return logger
LOGGER = get_logger()
# vim:sw=4:ts=4:et:
|
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/3/24.
↓↓↓ 权限组管理接口 ↓↓↓
"""
from app.libs.core import find_auth_module, get_ep_name
from app.libs.error_code import Success, NotFound, ForbiddenException
from app.libs.redprint import RedPrint
from app.libs.token_auth import auth
from app.models.base import db
from app.models.user import User as UserModel
from app.models.group import Group as GroupModel
from app.models.auth import Auth as AuthModel
from app.api_docs.cms import group as api_doc
from app.validators.base import BaseValidator
from app.validators.forms import PaginateValidator, UpdateGroupValidator
__author__ = 'Allen7D'
api = RedPrint(name='group', description='权限组管理', api_doc=api_doc, alias='cms_group')
@api.route('/all', methods=['GET'])
@api.route_meta(auth='查询所有权限组', module='管理员', mount=False)
@api.doc(auth=True)
@auth.admin_required
def get_group_all():
'''查询所有权限组'''
group_list = GroupModel.get_all()
if not group_list:
raise NotFound(msg='不存在任何权限组')
return Success(group_list)
@api.route('/<string:id>', methods=['GET'])
@api.doc(args=['g.path.group_id'], auth=True)
@auth.admin_required
def get_group(id):
'''查询一个权限组及其权限'''
group = GroupModel.get_or_404(id=id, msg='分组不存在')
group.append('auth_list')
return Success(group)
@api.route('', methods=['POST'])
@api.doc(args=['body.group_name', 'body.auth_ids', 'body.info'], auth=True)
@auth.admin_required
def create_group():
'''新建权限组'''
validator = BaseValidator.get_json()
name = validator['name'] # 权限组名
auth_ids = validator['auth_ids'] # 权限IDs
auth_list = [get_ep_name(auth_id) for auth_id in auth_ids] # 权限名列表
info = validator['info'] # 权限组名描述
group = GroupModel.create(name=name, info=info)
for auth in auth_list:
meta = find_auth_module(auth)
if meta:
AuthModel.create(auth=meta.auth, module=meta.module, group_id=group.id)
return Success(error_code=1)
@api.route('/<string:id>', methods=['PUT'])
@api.doc(args=['g.path.group_id', 'body.group_name', 'body.info'], auth=True)
@auth.admin_required
def update_group(id):
'''更新权限组'''
form = UpdateGroupValidator().validate_for_api()
group = GroupModel.get_or_404(id=id, msg='分组不存在,更新失败')
group.renew(name=form.name.data, info=form.info.data)
return Success()
@api.route('/<string:id>', methods=['DELETE'])
@api.doc(args=['g.path.group_id'], auth=True)
@auth.admin_required
def delete_group(id):
'''删除权限组'''
group = GroupModel.get_or_404(id=id, msg='分组不存在,删除失败')
if UserModel.get(group_id=id):
raise ForbiddenException(msg='分组下存在用户,不可删除')
# 删除group拥有的权限
AuthModel.objects.filter(group_id=id).delete()
group.delete()
return Success()
|
from django.urls import path
from apple import views
app_name = "apple"
urlpatterns = [
path(
"receipt-type-query/",
views.ReceiptTypeQueryView.as_view(),
name="receipt-type-query",
)
]
|
# coding: utf-8
import time
import random
import os
import json
import re
import sys
sys.path.append(os.getcwd() + "/class/core")
import mw
app_debug = False
if mw.isAppleSystem():
app_debug = True
def getPluginName():
return 'rsyncd'
def getInitDTpl():
path = getPluginDir() + "/init.d/" + getPluginName() + ".tpl"
return path
def getPluginDir():
return mw.getPluginDir() + '/' + getPluginName()
def getServerDir():
return mw.getServerDir() + '/' + getPluginName()
def getInitDFile():
if app_debug:
return '/tmp/' + getPluginName()
return '/etc/init.d/' + getPluginName()
def getArgs():
args = sys.argv[2:]
tmp = {}
args_len = len(args)
if args_len == 1:
t = args[0].strip('{').strip('}')
t = t.split(':')
tmp[t[0]] = t[1]
elif args_len > 1:
for i in range(len(args)):
t = args[i].split(':')
tmp[t[0]] = t[1]
return tmp
def checkArgs(data, ck=[]):
for i in range(len(ck)):
if not ck[i] in data:
return (False, mw.returnJson(False, '参数:(' + ck[i] + ')没有!'))
return (True, mw.returnJson(True, 'ok'))
def contentReplace(content):
service_path = mw.getServerDir()
content = content.replace('{$SERVER_PATH}', service_path)
return content
def status():
data = mw.execShell(
"ps -ef|grep rsync |grep -v grep | grep -v python | awk '{print $2}'")
if data[0] == '':
return 'stop'
return 'start'
def appConf():
if mw.isAppleSystem():
return getServerDir() + '/rsyncd.conf'
return '/etc/rsyncd.conf'
def appConfPwd():
if mw.isAppleSystem():
return getServerDir() + '/rsyncd.passwd'
return '/etc/rsyncd.passwd'
def getLog():
conf_path = appConf()
conf = mw.readFile(conf_path)
rep = 'log file\s*=\s*(.*)'
tmp = re.search(rep, conf)
if not tmp:
return ''
return tmp.groups()[0]
def initDreplace():
conf_path = appConf()
conf = mw.readFile(conf_path)
compile_sub = re.compile('^#(.*)', re.M)
conf = compile_sub.sub('', conf)
conf_tpl_path = getPluginDir() + '/conf/rsyncd.conf'
if conf.strip() == '':
content = mw.readFile(conf_tpl_path)
mw.writeFile(conf_path, content)
confpwd_path = appConfPwd()
if not os.path.exists(confpwd_path):
mw.writeFile(confpwd_path, '')
mw.execShell('chmod 0600 ' + confpwd_path)
initD_path = getServerDir() + '/init.d'
if not os.path.exists(initD_path):
os.mkdir(initD_path)
file_bin = initD_path + '/' + getPluginName()
file_tpl = getInitDTpl()
# initd replace
if not os.path.exists(file_bin):
content = mw.readFile(file_tpl)
content = contentReplace(content)
mw.writeFile(file_bin, content)
mw.execShell('chmod +x ' + file_bin)
if os.path.exists('/usr/lib/systemd/system/rsyncd.service'):
mw.execShell('rm -rf /usr/lib/systemd/system/rsyncd*')
rlog = getLog()
if os.path.exists(rlog):
mw.writeFile(rlog, '')
return file_bin
def start():
file = initDreplace()
data = mw.execShell(file + ' start')
if data[1] == '':
return 'ok'
return 'fail'
def stop():
file = initDreplace()
data = mw.execShell(file + ' stop')
if data[1] == '':
return 'ok'
return 'fail'
def restart():
if mw.isAppleSystem():
return "Apple Computer does not support"
stop()
start()
return 'ok'
def reload():
if mw.isAppleSystem():
return "Apple Computer does not support"
# data = mw.execShell('systemctl reload rsyncd.service')
# if data[1] == '':
# return 'ok'
# return 'fail'
stop()
start()
return 'ok'
def initdStatus():
if not app_debug:
if mw.isAppleSystem():
return "Apple Computer does not support"
initd_bin = getInitDFile()
if os.path.exists(initd_bin):
return 'ok'
return 'fail'
def initdInstall():
import shutil
if not app_debug:
if mw.isAppleSystem():
return "Apple Computer does not support"
p_bin = initDreplace()
initd_bin = getInitDFile()
shutil.copyfile(p_bin, initd_bin)
mw.execShell('chmod +x ' + initd_bin)
mw.execShell('chkconfig --add ' + getPluginName())
return 'ok'
def initdUinstall():
if not app_debug:
if mw.isAppleSystem():
return "Apple Computer does not support"
initd_bin = getInitDFile()
os.remove(initd_bin)
mw.execShell('chkconfig --del ' + getPluginName())
return 'ok'
def getRecListData():
path = appConf()
content = mw.readFile(path)
flist = re.findall("\[(.*)\]", content)
flist_len = len(flist)
ret_list = []
for i in range(flist_len):
tmp = {}
tmp['name'] = flist[i]
n = i + 1
reg = ''
if n == flist_len:
reg = '\[' + flist[i] + '\](.*)\[?'
else:
reg = '\[' + flist[i] + '\](.*)\[' + flist[n] + '\]'
t1 = re.search(reg, content, re.S)
if t1:
args = t1.groups()[0]
# print 'args start', args, 'args_end'
t2 = re.findall('\s*(.*)\s*=\s*(.*)', args, re.M)
for i in range(len(t2)):
tmp[t2[i][0].strip()] = t2[i][1]
ret_list.append(tmp)
return ret_list
def getRecList():
ret_list = getRecListData()
return mw.returnJson(True, 'ok', ret_list)
def getUPwdList():
pwd_path = appConfPwd()
pwd_content = mw.readFile(pwd_path)
plist = pwd_content.strip().split('\n')
plist_len = len(plist)
data = {}
for x in range(plist_len):
tmp = plist[x].split(':')
data[tmp[0]] = tmp[1]
return data
def addRec():
args = getArgs()
data = checkArgs(args, ['name', 'path', 'pwd', 'ps'])
if not data[0]:
return data[1]
args_name = args['name']
args_pwd = args['pwd']
args_path = args['path']
args_ps = args['ps']
pwd_path = appConfPwd()
pwd_content = mw.readFile(pwd_path)
pwd_content += args_name + ':' + args_pwd + "\n"
mw.writeFile(pwd_path, pwd_content)
path = appConf()
content = mw.readFile(path)
con = "\n\n" + '[' + args_name + ']' + "\n"
con += 'path = ' + args_path + "\n"
con += 'comment = ' + args_ps + "\n"
con += 'auth users = ' + args_name + "\n"
con += 'read only = false'
content = content + con
mw.writeFile(path, content)
return mw.returnJson(True, '添加成功')
def delRec():
args = getArgs()
data = checkArgs(args, ['name'])
if not data[0]:
return data[1]
args_name = args['name']
cmd = "sed -i '_bak' '/" + args_name + "/d' " + appConfPwd()
mw.execShell(cmd)
try:
path = appConf()
content = mw.readFile(path)
ret_list = getRecListData()
ret_list_len = len(ret_list)
is_end = False
next_name = ''
for x in range(ret_list_len):
tmp = ret_list[x]
if tmp['name'] == args_name:
if x + 1 == ret_list_len:
is_end = True
else:
next_name = ret_list[x + 1]['name']
reg = ''
if is_end:
reg = '\[' + args_name + '\]\s*(.*)'
else:
reg = '\[' + args_name + '\]\s*(.*)\s*\[' + next_name + '\]'
conre = re.search(reg, content, re.S)
content = content.replace(
"[" + args_name + "]\n" + conre.groups()[0], '')
mw.writeFile(path, content)
return mw.returnJson(True, '删除成功!')
except Exception as e:
return mw.returnJson(False, '删除失败!')
def cmdRec():
args = getArgs()
data = checkArgs(args, ['name'])
if not data[0]:
return data[1]
an = args['name']
pwd_list = getUPwdList()
ip = mw.getLocalIp()
cmd = 'echo "' + pwd_list[an] + '" > /tmp/p.pass' + "<br>"
cmd += 'chmod 600 /tmp/p.pass' + "<br>"
cmd += 'rsync -arv --password-file=/tmp/p.pass --progress --delete /project ' + \
an + '@' + ip + '::' + an
return mw.returnJson(True, 'OK!', cmd)
# rsyncdReceive
if __name__ == "__main__":
func = sys.argv[1]
if func == 'status':
print(status())
elif func == 'start':
print(start())
elif func == 'stop':
print(stop())
elif func == 'restart':
print(restart())
elif func == 'reload':
print(reload())
elif func == 'initd_status':
print(initdStatus())
elif func == 'initd_install':
print(initdInstall())
elif func == 'initd_uninstall':
print(initdUinstall())
elif func == 'conf':
print(appConf())
elif func == 'conf_pwd':
print(appConfPwd())
elif func == 'run_log':
print(getLog())
elif func == 'rec_list':
print(getRecList())
elif func == 'add_rec':
print(addRec())
elif func == 'del_rec':
print(delRec())
elif func == 'cmd_rec':
print(cmdRec())
else:
print('error')
|
#!/usr/bin/env python
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='cabot-alert-rocketchat',
version='0.1.2',
description='A RocketChat alert plugin for Cabot',
long_description=readme(),
license='MIT',
author='Objectif Libre',
author_email='[email protected]',
url='https://objectif-libre.com',
packages=find_packages(),
)
|
import torch
def get_device(verbose=True):
torch.cuda.is_available()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if verbose: print('Torch running on:', device)
return device
def to_numpy(x):
return x.detach().cpu().numpy()
def to_torch(x, device):
return torch.from_numpy(x).float().to(device)
def _my_field_grad(f, dim):
"""
dim = 1 : derivative wrt x direct
dim = 2 : derivative wrt y direct
courtesy from https://github.com/Rose-STL-Lab/Turbulent-Flow-Net/
"""
assert f.shape[0] == f.shape[1] # input must be in shape (R,R)
R = f.shape[0]
dx = 1/R
dim += 1
D = 2
assert D == len(f.shape)
out = torch.zeros_like(f)
# initialize slices
slice1 = [slice(None)]*D
slice2 = [slice(None)]*D
slice3 = [slice(None)]*D
slice4 = [slice(None)]*D
# 2nd order interior
slice1[-dim] = slice(1, -1)
slice2[-dim] = slice(None, -2)
slice3[-dim] = slice(1, -1)
slice4[-dim] = slice(2, None)
out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2*dx)
# 2nd order edges
slice1[-dim] = 0
slice2[-dim] = 0
slice3[-dim] = 1
slice4[-dim] = 2
a = -1.5 / dx
b = 2. / dx
c = -0.5 / dx
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
slice1[-dim] = -1
slice2[-dim] = -3
slice3[-dim] = -2
slice4[-dim] = -1
a = 0.5 / dx
b = -2. / dx
c = 1.5/ dx
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
return out |
#!/usr/bin/env python
import time
from datetime import datetime
from datetime import timedelta
from datetime import date
import sys
import threading
import RPi.GPIO as GPIO
import Adafruit_DHT
from Adafruit_LED_Backpack import SevenSegment
import holidays
us_holidays = holidays.US()
holiday_list = [
'New Year\'s Day',
'New Year\'s Day (Observed)',
'Memorial Day',
'Independence Day',
'Independence Day (Observed)',
'Labor Day',
'Thanksgiving',
'Christmas Day',
'Christmas Day (Observed)'
]
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
BLUE_LED = 18
GREEN_LED = 23
GPIO.setup(BLUE_LED, GPIO.OUT)
GPIO.setup(GREEN_LED, GPIO.OUT)
left_display = SevenSegment.SevenSegment(address=0x71)
right_display = SevenSegment.SevenSegment(address=0x72)
left_display.begin()
right_display.begin()
sensor = Adafruit_DHT.DHT22
pin = 4
ctr = 0
start_date = datetime(2017, 6, 22, 0, 1, 1)
def update_temp():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if temperature is not None:
right_display.clear()
right_display.set_colon(False)
right_display.print_float(temperature)
right_display.write_display()
def leds_on(now):
GPIO.output(BLUE_LED, True)
first = (start_date - timedelta(days=start_date.weekday()))
second = (now - timedelta(days=now.weekday()))
weeks = (first - second).days / 7
if weeks % 2 == 1:
GPIO.output(GREEN_LED, True)
def leds_off():
GPIO.output(BLUE_LED, False)
GPIO.output(GREEN_LED, False)
def update_time(now):
hour = now.hour
minute = now.minute
second = now.second
left_display.clear()
left_display.set_digit(0, int(hour / 10))
left_display.set_digit(1, hour % 10)
left_display.set_digit(2, int(minute / 10))
left_display.set_digit(3, minute % 10)
left_display.set_colon(second % 2)
left_display.write_display()
def holiday_week(day):
if holiday(day):
return True
else:
n = -1
for x in range(0, 3):
previous_day = day + timedelta(days=n)
if holiday(previous_day):
return True
n -= 1
return False
def holiday(day):
hol_test = us_holidays.get(day)
if hol_test in holiday_list:
return True
elif isinstance(hol_test, list):
return bool(set(holiday_list) & set(us_holidays.get(day)))
return False
print('Press Ctrl-C to quit.')
try:
while(True):
now = datetime.now()
# once every 30 seconds
if ctr == 120:
t = threading.Thread(target=update_temp)
t.start()
ctr = 0
leds_off()
weekday = now.weekday()
if weekday == 3:
# thursday
if not holiday_week(now):
leds_on(now)
elif weekday == 4:
# friday
previous_day = now + timedelta(days=-1)
if holiday_week(previous_day):
leds_on(now)
update_time(now)
time.sleep(0.25)
ctr += 1
finally:
GPIO.cleanup()
|
import numpy as _onp
from numpy import pi as _pi
_deg2rad = 180. / _pi
_rad2deg = _pi / 180.
def degrees(x):
"""Converts an input x from radians to degrees"""
return x * _deg2rad
def radians(x):
"""Converts an input x from degrees to radians"""
return x * _rad2deg
def sind(x):
"""Returns the sin of an angle x, given in degrees"""
return _onp.sin(radians(x))
def cosd(x):
"""Returns the cos of an angle x, given in degrees"""
return _onp.cos(radians(x))
def tand(x):
"""Returns the tangent of an angle x, given in degrees"""
return _onp.tan(radians(x))
def arcsind(x):
"""Returns the arcsin of an x, in degrees"""
return degrees(_onp.arcsin(x))
def arccosd(x):
"""Returns the arccos of an x, in degrees"""
return degrees(_onp.arccos(x))
def arctan2d(y, x):
"""Returns the angle associated with arctan(y, x), in degrees"""
return degrees(_onp.arctan2(y, x))
|
from typing import List
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
def mergeTwoLists(l1: ListNode, l2: ListNode) -> ListNode:
sentinel = ListNode(-1) # 哨兵结点
curr = sentinel
while l1 and l2:
if l1.val <= l2.val:
curr.next = l1
l1 = l1.next
else:
curr.next = l2
l2 = l2.next
curr = curr.next
curr.next = l1 if l1 is not None else l2
return sentinel.next
if not lists:
return None
l = lists[0]
for i in range(1, len(lists)):
l = mergeTwoLists(l, lists[i])
return l
|
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry, Region
from msrest.authentication import ApiKeyCredentials
import time
import os
def main():
from dotenv import load_dotenv
global training_client
global custom_vision_project
try:
# Get Configuration Settings
load_dotenv()
training_endpoint = os.getenv('TrainingEndpoint')
training_key = os.getenv('TrainingKey')
project_id = os.getenv('ProjectID')
# Authenticate a client for the training API
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
training_client = CustomVisionTrainingClient(training_endpoint, credentials)
# Get the Custom Vision project
custom_vision_project = training_client.get_project(project_id)
# Upload and tag images
Upload_Images('more-training-images')
# Train the model
Train_Model()
except Exception as ex:
print(ex)
def Upload_Images(folder):
print("Uploading images...")
tags = training_client.get_tags(custom_vision_project.id)
for tag in tags:
print(tag.name)
for image in os.listdir(os.path.join(folder,tag.name)):
image_data = open(os.path.join(folder,tag.name,image), "rb").read()
training_client.create_images_from_data(custom_vision_project.id, image_data, [tag.id])
def Train_Model():
print("Training ...")
iteration = training_client.train_project(custom_vision_project.id)
while (iteration.status != "Completed"):
iteration = training_client.get_iteration(custom_vision_project.id, iteration.id)
print (iteration.status, '...')
time.sleep(5)
print ("Model trained!")
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import argparse
import json
import re
import readline
import requests
import sys
import time
from utils import *
from attacks import *
class GraphQLmap(object):
author = "@pentest_swissky"
version = "1.0"
endpoint = "graphql"
method = "POST"
args = None
url = None
def __init__(self, args):
print(" _____ _ ____ _ ")
print(" / ____| | | / __ \| | ")
print(" | | __ _ __ __ _ _ __ | |__ | | | | | _ __ ___ __ _ _ __ ")
print(" | | |_ | '__/ _` | '_ \| '_ \| | | | | | '_ ` _ \ / _` | '_ \ ")
print(" | |__| | | | (_| | |_) | | | | |__| | |____| | | | | | (_| | |_) |")
print(" \_____|_| \__,_| .__/|_| |_|\___\_\______|_| |_| |_|\__,_| .__/ ")
print(" | | | | ")
print(" |_| |_| ")
print(" "*30 + f"\033[1mAuthor\033[0m: {self.author} \033[1mVersion\033[0m: {self.version} ")
self.args = args
self.url = args.url
self.method = args.method
while True:
query = input("GraphQLmap > ")
cmdlist.append(query)
if query == "exit" or query == "q":
exit()
elif query == "help":
display_help()
elif query == "debug":
display_types(self.url, self.method)
elif query == "dump_new":
dump_schema(self.url, self.method, 15)
elif query == "dump_old":
dump_schema(self.url, self.method, 14)
elif query == "nosqli":
blind_nosql(self.url, self.method)
elif query == "postgresqli":
blind_postgresql(self.url, self.method)
elif query == "mysqli":
blind_mysql(self.url, self.method)
elif query == "mssqli":
blind_mssql(self.url, self.method)
else:
exec_advanced(args.url, self.method, query)
if __name__ == "__main__":
readline.set_completer(auto_completer)
readline.parse_and_bind("tab: complete")
args = parse_args()
GraphQLmap(args) |
import numpy as np
import torch
import torch.nn as nn
from utils.transforms import outer_product
VERY_SMALL_NUMBER = 1e-16
def noise_like(tensor, noise_type, noise, label_slices=None):
if noise_type == 'expand':
noise_tensor = randn_like_expand(tensor, label_slices, sigma1=noise)
elif noise_type == 'const':
noise_tensor = randn_like_with_clamp(tensor) * noise
else:
raise NotImplementedError
return noise_tensor
def randn_like_with_clamp(tensor, clamp_std=3):
noise_tensor = torch.randn_like(tensor)
return torch.clamp(noise_tensor, min=-clamp_std, max=clamp_std)
def randn_like_expand(tensor, label_slices, sigma0=0.01, sigma1=10, num_sigma=50):
# max_noise_std = 20 if noise >= 0.5 else 5
# noise_std_list = np.linspace(0, 1, 11)[:-1].tolist() + np.linspace(1, max_noise_std, 20).tolist()
# idx = np.random.randint(0, len(noise_std_list))
# noise_tensor = torch.randn_like(tensor) * noise_std_list[idx]
sigmas = np.exp(np.linspace(np.log(sigma0), np.log(sigma1), num_sigma))
batch_noise_std = np.random.choice(sigmas, len(label_slices))
batch_noise_std = torch.tensor(batch_noise_std, dtype=torch.float32)
batch_noise_std = torch.repeat_interleave(batch_noise_std, torch.tensor(label_slices))
# print('noise tensor shape: ', tensor.shape, 'noise std shape: ', batch_noise_std.shape)
# print('label slices: ', label_slices)
# print('batch noise std: ', batch_noise_std)
noise_tensor = torch.randn_like(tensor) * batch_noise_std.unsqueeze(-1).to(tensor)
# print('noise tensor: ', noise_tensor.shape)
return noise_tensor
class GaussianSmearing(nn.Module):
def __init__(self, start=0.0, stop=5.0, num_gaussians=50):
super(GaussianSmearing, self).__init__()
self.start = start
self.stop = stop
self.num_gaussians = num_gaussians
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2
self.register_buffer('offset', offset)
def __repr__(self):
return f'GaussianSmearing(start={self.start}, stop={self.stop}, num_gaussians={self.num_gaussians})'
def forward(self, dist):
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
class AngleExpansion(nn.Module):
def __init__(self, start=1.0, stop=5.0, half_expansion=10):
super(AngleExpansion, self).__init__()
l_mul = 1. / torch.linspace(stop, start, half_expansion)
r_mul = torch.linspace(start, stop, half_expansion)
coeff = torch.cat([l_mul, r_mul], dim=-1)
self.register_buffer('coeff', coeff)
def forward(self, angle):
return torch.cos(angle.view(-1, 1) * self.coeff.view(1, -1))
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
NONLINEARITIES = {
"tanh": nn.Tanh(),
"relu": nn.ReLU(),
"softplus": nn.Softplus(),
"elu": nn.ELU(),
"swish": Swish()
}
class MLP(nn.Module):
"""MLP with the same hidden dim across all layers."""
def __init__(self, in_dim, out_dim, hidden_dim, num_layer=2, norm=True, act_fn='relu', act_last=False):
super().__init__()
layers = []
for layer_idx in range(num_layer):
if layer_idx == 0:
layers.append(nn.Linear(in_dim, hidden_dim))
elif layer_idx == num_layer - 1:
layers.append(nn.Linear(hidden_dim, out_dim))
else:
layers.append(nn.Linear(hidden_dim, hidden_dim))
if layer_idx < num_layer - 1 or act_last:
if norm:
layers.append(nn.LayerNorm(hidden_dim))
layers.append(NONLINEARITIES[act_fn])
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def convert_dgl_to_batch(dgl_batch, device):
batch_idx = []
for idx, num_nodes in enumerate(dgl_batch.batch_num_nodes().tolist()):
batch_idx.append(torch.ones(num_nodes, dtype=torch.long) * idx)
batch_idx = torch.cat(batch_idx).to(device)
return batch_idx
def get_h_dist(dist_metric, hi, hj):
if dist_metric == 'euclidean':
h_dist = torch.sum((hi - hj) ** 2, -1, keepdim=True)
return h_dist
elif dist_metric == 'cos_sim':
hi_norm = torch.norm(hi, p=2, dim=-1, keepdim=True)
hj_norm = torch.norm(hj, p=2, dim=-1, keepdim=True)
h_dist = torch.sum(hi * hj, -1, keepdim=True) / (hi_norm * hj_norm)
return h_dist, hj_norm
def get_r_feat(r, r_exp_func, node_type, edge_index, mode):
if mode == 'origin':
r_feat = r
elif mode == 'basic':
r_feat = r_exp_func(r)
elif mode == 'sparse':
src, dst = edge_index
nt_src = node_type[src] # [n_edges, 8]
nt_dst = node_type[dst]
r_exp = r_exp_func(r)
r_feat = outer_product(nt_src, nt_dst, r_exp)
else:
raise ValueError(mode)
return r_feat |
from pypika import analytics as an
from pyspark.sql import functions as F
ENGINE = "spark"
agg_mapping = {
"min": {"spark": F.min, "sql": an.Min},
"max": {"spark": F.max, "sql": an.Max}
}
funcs = [F.min, F.max, F.stddev, F.kurtosis, F.mean, F.skewness, F.sum, F.variance]
class Agg:
@staticmethod
def min():
return agg_mapping["min"][ENGINE]
@staticmethod
def max():
return agg_mapping["max"][ENGINE]
@staticmethod
def stddev():
return agg_mapping["stddev"][ENGINE]
@staticmethod
def kurtosis():
# https://www.periscopedata.com/blog/understanding-outliers-with-skew-and-kurtosis
return agg_mapping["stddev"][ENGINE]
@staticmethod
def mean():
return agg_mapping["mean"][ENGINE]
@staticmethod
def skewness():
return agg_mapping["mean"][ENGINE]
@staticmethod
def sum():
return agg_mapping["mean"][ENGINE]
@staticmethod
def variance():
return agg_mapping["mean"][ENGINE]
|
# -*- coding: utf-8 -*-
"""VIF calculation."""
from wildfires.analysis import vif
from ..cache import cache
@cache
def calculate_vif(X):
"""Calculate the VIF."""
return vif(X, verbose=True).set_index("Name", drop=True).T
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import rclpy
import numpy as np
from rclpy.qos import QoSDurabilityPolicy
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
from rclpy.node import Node
from rclpy.parameter import Parameter
import time
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import Twist, PoseStamped, TransformStamped
from path_following.lib.utils import pathReader, findLocalPath, purePursuit, pidController, velocityPlanning, vaildObject, cruiseControl
from tf2_msgs.msg import TFMessage
from math import cos,sin,sqrt,pow,atan2,pi
class pid_planner(Node):
def __init__(self):
super().__init__('pid_planner')
QOS_RKL10V = QoSProfile(
reliability=QoSReliabilityPolicy.RELIABLE,
history=QoSHistoryPolicy.KEEP_LAST,
depth=10,
durability=QoSDurabilityPolicy.VOLATILE)
self.configure()
self.is_status=False ## 차량 상태 점검
self.pub_tf = self.create_publisher(TFMessage, "/tf", QOS_RKL10V) ## dashing에서 tf2 모듈을 지원하지 않기 때문에 직접 TFMessage를 publish
# path data reader
path_reader = pathReader('path_following') ## 경로 파일의 패키지 위치
self.global_path = path_reader.read_txt(self.path_file_name, self.path_frame) ## 출력할 경로의 이름
# cmd publisher
self.ctrl_pub = self.create_publisher(Twist, '/ctrl_cmd', QOS_RKL10V)
self.ctrl_msg = Twist()
## Twist 메시지로 publish하므로 실제로 deepracer가 해당 선속도와 각속도로 주행하도록 해야함
self.global_path_pub = self.create_publisher(Path, '/global_path', QOS_RKL10V) ## global_path publisher
self.local_path_pub = self.create_publisher(Path, '/local_path', QOS_RKL10V) ## local_path publisher
self.odometry_path_pub = self.create_publisher(Path, '/odom_path', QOS_RKL10V) ## odometry history
self.odometry_path_msg = Path()
# ros subscriber
self.status_subscriber = self.create_subscription(Odometry, "/Ego_globalstate", self.statusCB, QOS_RKL10V) ## Vehicl Status Subscriber
## subscribe하는 메세지 타입이 Odometry여야함
# class
self.pure_pursuit = purePursuit(self.vehicle_length, self.lfd, self.min_lfd, self.max_lfd) ## purePursuit import
self.pid = pidController(self.p_gain, self.i_gain, self.d_gain, self.control_time)
self.cc = cruiseControl(0.5,1) ## cruiseControl import (object_vel_gain, object_dis_gain)
ref_vel = float(self.reference_velocity)/float(3.6) # m/s
self.vel_planner = velocityPlanning(ref_vel, self.road_friction) ## 속도 계획 (reference velocity, friciton)
self.vel_profile = self.vel_planner.curveBasedVelocity(self.global_path,100)
def configure(self):
# declare parameters
self.declare_parameter('path_file_name', 'turtlebot.txt')
self.declare_parameter('platform', 'turtlebot')
self.declare_parameter('frequency', 20)
self.declare_parameter('path_frame', '/odom')
self.declare_parameter('local_path_step', 5)
self.declare_parameter('vehicle_length', 0.28)
self.declare_parameter('initial_lfd', 0.5)
self.declare_parameter('min_lfd', 0.5)
self.declare_parameter('max_lfd', 3)
self.declare_parameter('road_friction', 0.15)
self.declare_parameter('reference_velocity', 1.5)
self.declare_parameter('p_gain', 1.0)
self.declare_parameter('i_gain', 0.0)
self.declare_parameter('d_gain', 0.05)
self.path_file_name = self.get_parameter("path_file_name").value
self.platform = self.get_parameter("platform").value
self.frequency = self.get_parameter("frequency").value
self.path_frame = self.get_parameter("path_frame").value
self.local_path_step = self.get_parameter("local_path_step").value
# Steering (purePursuit)
self.vehicle_length = self.get_parameter("vehicle_length").value
self.lfd = self.get_parameter("initial_lfd").value
self.min_lfd = self.get_parameter("min_lfd").value
self.max_lfd = self.get_parameter("max_lfd").value
# PID Controller
self.road_friction = self.get_parameter("road_friction").value
self.reference_velocity = self.get_parameter("reference_velocity").value
self.p_gain = self.get_parameter("p_gain").value
self.i_gain = self.get_parameter("i_gain").value
self.d_gain = self.get_parameter("d_gain").value
self.control_time = float(1)/float(self.get_parameter("frequency").value)
def pub_local_path_ctrl_msg(self):
## global_path와 차량의 status_msg를 이용해 현제 waypoint와 local_path를 생성
self.local_path, self.current_waypoint = findLocalPath(self.global_path, self.status_msg, self.path_frame, self.local_path_step)
# Steering Control (steering_angle; pure pursuit control)
self.get_steering_angle()
# Cruise Control (control_input; velocity)
self.get_control_velocity()
self.local_path_pub.publish(self.local_path) ## Local Path 출력
self.ctrl_pub.publish(self.ctrl_msg) ## Vehicl Control 출력
def get_steering_angle(self):
self.pure_pursuit.getPath(self.local_path) ## pure_pursuit 알고리즘에 Local path 적용
self.pure_pursuit.getEgoStatus(self.status_msg) ## pure_pursuit 알고리즘에 차량의 status 적용
ego_current_velocity = self.status_msg.twist.twist.linear
velocity = ego_current_velocity.x
steering_angle = self.pure_pursuit.steering_angle()
if not steering_angle:
self.get_logger().info("no found forward point")
L = self.vehicle_length # vehicle length (m)
self.ctrl_msg.angular.z = velocity * sin(steering_angle) / L # angular velocity
def get_control_velocity(self):
ego_current_velocity = self.status_msg.twist.twist.linear
target_velocity = self.cc.acc(ego_current_velocity, self.vel_profile[self.current_waypoint]) ## advanced cruise control 적용한 속도 계획
control_input = self.pid.pid(target_velocity, ego_current_velocity.x) ## 속도 제어를 위한 PID 적용 (target Velocity, Status Velocity)
if control_input > 0:
self.ctrl_msg.linear.x = control_input # (km/h)
else :
self.ctrl_msg.linear.x = 0.0
self.ctrl_msg.linear.y = 0.0
self.ctrl_msg.linear.x = 0.0
self.ctrl_msg.angular.x = 0.0
self.ctrl_msg.angular.y = 0.0
self.ctrl_msg.angular.x = 0.0
def sendTransform(self, translation, rotation, time, child, parent):
t = TransformStamped()
t.header.frame_id = parent
t.header.stamp = time
t.child_frame_id = child
t.transform.translation.x = translation[0]
t.transform.translation.y = translation[1]
t.transform.translation.z = translation[2]
t.transform.rotation.x = rotation[0]
t.transform.rotation.y = rotation[1]
t.transform.rotation.z = rotation[2]
t.transform.rotation.w = rotation[3]
tf = TFMessage()
tf.transforms = [t]
self.pub_tf.publish(tf)
def statusCB(self, msg): ## Vehicle Status Subscriber
self.is_status=True
self.status_msg = msg
Ego_HeadingAngle = [self.status_msg.pose.pose.orientation.x, self.status_msg.pose.pose.orientation.y, self.status_msg.pose.pose.orientation.z, self.status_msg.pose.pose.orientation.w]
# Map -> gps TF Broadcaster
self.sendTransform([self.status_msg.pose.pose.position.x, self.status_msg.pose.pose.position.y, 0.0],
Ego_HeadingAngle,
self.get_clock().now().to_msg(),
"gps", # child frame "base_link"
"odom") # parent frame "map"
# Odometry history viewer
last_point = PoseStamped()
last_point.pose.position.x = self.status_msg.pose.pose.position.x
last_point.pose.position.y = self.status_msg.pose.pose.position.y
last_point.pose.position.z = 0.0
last_point.pose.orientation.x = 0.0
last_point.pose.orientation.y = 0.0
last_point.pose.orientation.z = 0.0
last_point.pose.orientation.w = 1.0
self.odometry_path_msg.header.frame_id = self.path_frame
self.odometry_path_msg.poses.append(last_point)
self.odometry_path_pub.publish(self.odometry_path_msg)
def getEgoVel(self):
vx = self.status_msg.twist.twist.linear.x
vy = self.status_msg.twist.twist.linear.y
return np.sqrt(np.power(vx, 2) + np.power(vy,2))
def main():
rclpy.init(args=None)
try:
node = pid_planner()
try:
count = 0
while rclpy.ok():
rclpy.spin_once(node)
node.pub_local_path_ctrl_msg()
if count == node.frequency:
count=0
node.global_path_pub.publish(node.global_path)
count+=1
time.sleep(node.control_time)
except KeyboardInterrupt:
node.get_logger().info('Keyboard Interrypt (SIGINT)')
finally:
node.destroy_node()
finally:
rclpy.shutdown()
if __name__ == '__main__':
main() |
import basevcstest
class TestVCSBoxfill(basevcstest.VCSBaseTest):
def boxfillProjection(self, projection, zoom):
a = self.clt("clt")
self.x.clear()
p = self.x.getprojection(projection)
b = self.x.createboxfill()
b.projection = p
if zoom is None:
self.x.plot(a(latitude=(90, -90)), b, bg=self.bg)
zm = ""
elif zoom == 'subset':
self.x.plot(a(latitude=(-50, 90), longitude=(30, -30)),
b, bg=self.bg)
zm = "_%s" % zoom
else:
b.datawc_x1 = 30
b.datawc_x2 = -30
b.datawc_y1 = -50
b.datawc_y2 = 90
self.x.plot(a, b, bg=self.bg)
zm = "_%s" % zoom
fileName = "test_vcs_boxfill_%s%s.png" % (projection, zm)
self.checkImage(fileName)
def testBoxfillProjection(self):
for proj in "polar mollweide lambert orthographic mercator polyconic robinson".split():
for zoom in [None, "datawc", "subset"]:
self.boxfillProjection(proj, zoom)
|
"""
This script reads the original labels of Cityscapes (CO) and compares them against
the Cityscapes-Panoptic-Parts (CPP) labels. It verifies that the semantic and instance
level labels of Cityscapes Panoptic Parts (CPP) are equivalent to
original Cityscapes (CO), i.e., sids_iids_CPP == sids_iids_CO.
"""
import sys
assert float(sys.version[:3]) >= 3.6, 'This test uses Python >= 3.6 functionality.'
import os.path as op
import glob
import multiprocessing
import numpy as np
from PIL import Image
from panoptic_parts.utils.format import decode_uids
# find all label paths
BASEPATH_LABELS_ORIGINAL = 'tests/tests_files/cityscapes/gtFine'
labels_paths_original = glob.glob(op.join(BASEPATH_LABELS_ORIGINAL, 'train', '*', '*_instanceIds.png'))
labels_paths_original.extend(glob.glob(op.join(BASEPATH_LABELS_ORIGINAL, 'val', '*', '*_instanceIds.png')))
print(len(labels_paths_original))
labels_paths_ours = [
lp.replace('cityscapes/gtFine', 'cityscapes_panoptic_parts/gtFine_v2').replace('_instanceIds.png', 'PanopticParts.tif')
for lp in labels_paths_original]
print(len(labels_paths_ours))
def _sids_iids_are_maintained(inpts):
lp_orig, lp_ours = inpts
labels_orig = np.asarray(Image.open(lp_orig), dtype=np.int32)
labels_ours = np.asarray(Image.open(lp_ours), dtype=np.int32)
_, _, _, sids_iids = decode_uids(labels_ours, return_sids_iids=True)
returns = np.all(np.equal(labels_orig, sids_iids))
# if not returns:
# print(lp_orig, lp_ours, sep='\n')
# print(np.unique(labels_orig), print(np.unique(sids_iids)), np.unique(labels_ours), sep='\n')
return returns
# validate labels
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
maintained_bools =[mb for mb in pool.imap_unordered(
_sids_iids_are_maintained, zip(labels_paths_original, labels_paths_ours), chunksize=10)]
print(len(maintained_bools), 'files were verified.')
assert all(maintained_bools), 'some sids_iids are not the same'
|
# Copyright 2012 NetApp. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Share API module."""
import copy
import datetime
import uuid
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from manila.common import constants
from manila import context
from manila import db as db_api
from manila.db.sqlalchemy import models
from manila import exception
from manila import quota
from manila import share
from manila.share import api as share_api
from manila.share import share_types
from manila import test
from manila.tests import db_utils
from manila.tests import utils as test_utils
from manila import utils
CONF = cfg.CONF
def fake_share(id, **kwargs):
share = {
'id': id,
'size': 1,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'snapshot_id': None,
'share_network_id': None,
'share_type_id': None,
'availability_zone': 'fakeaz',
'status': 'fakestatus',
'display_name': 'fakename',
'metadata': None,
'display_description': 'fakedesc',
'share_proto': 'nfs',
'export_location': 'fake_location',
'host': 'fakehost',
'is_public': False,
'consistency_group_id': None,
'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1)
}
share.update(kwargs)
return share
def fake_snapshot(id, **kwargs):
snapshot = {
'id': id,
'share_size': 1,
'size': 1,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'share_id': None,
'availability_zone': 'fakeaz',
'status': 'fakestatus',
'display_name': 'fakename',
'display_description': 'fakedesc',
'share_proto': 'nfs',
'progress': 'fakeprogress99%',
'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'share': {'host': 'fake_source_host'},
}
snapshot.update(kwargs)
return snapshot
def fake_access(id, **kwargs):
access = {
'id': id,
'share_id': 'fakeshareid',
'access_type': 'fakeacctype',
'access_to': 'fakeaccto',
'access_level': 'rw',
'state': 'fakeactive',
'STATE_NEW': 'fakenew',
'STATE_ACTIVE': 'fakeactive',
'STATE_DELETING': 'fakedeleting',
'STATE_DELETED': 'fakedeleted',
'STATE_ERROR': 'fakeerror',
}
access.update(kwargs)
return access
_FAKE_LIST_OF_ALL_SHARES = [
{
'name': 'foo',
'status': constants.STATUS_AVAILABLE,
'project_id': 'fake_pid_1',
'share_server_id': 'fake_server_1',
},
{
'name': 'bar',
'status': constants.STATUS_ERROR,
'project_id': 'fake_pid_2',
'share_server_id': 'fake_server_2',
},
{
'name': 'foo',
'status': constants.STATUS_AVAILABLE,
'project_id': 'fake_pid_2',
'share_server_id': 'fake_server_3',
},
{
'name': 'bar',
'status': constants.STATUS_ERROR,
'project_id': 'fake_pid_2',
'share_server_id': 'fake_server_3',
},
]
_FAKE_LIST_OF_ALL_SNAPSHOTS = [
{
'name': 'foo',
'status': constants.STATUS_AVAILABLE,
'project_id': 'fake_pid_1',
'share_id': 'fake_server_1',
},
{
'name': 'bar',
'status': constants.STATUS_ERROR,
'project_id': 'fake_pid_2',
'share_id': 'fake_server_2',
},
{
'name': 'foo',
'status': constants.STATUS_AVAILABLE,
'project_id': 'fake_pid_2',
'share_id': 'fake_share_id_3',
},
{
'name': 'bar',
'status': constants.STATUS_ERROR,
'project_id': 'fake_pid_2',
'share_id': 'fake_share_id_3',
},
]
@ddt.ddt
class ShareAPITestCase(test.TestCase):
def setUp(self):
super(ShareAPITestCase, self).setUp()
self.context = context.get_admin_context()
self.scheduler_rpcapi = mock.Mock()
self.share_rpcapi = mock.Mock()
self.api = share.API()
self.mock_object(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi)
self.mock_object(self.api, 'share_rpcapi', self.share_rpcapi)
self.mock_object(quota.QUOTAS, 'reserve',
lambda *args, **kwargs: None)
self.dt_utc = datetime.datetime.utcnow()
self.mock_object(timeutils, 'utcnow',
mock.Mock(return_value=self.dt_utc))
self.mock_object(share_api.policy, 'check_policy')
def _setup_create_mocks(self, protocol='nfs', **kwargs):
share = db_utils.create_share(
user_id=self.context.user_id,
project_id=self.context.project_id,
share_type_id='fake',
**kwargs
)
share_data = {
'share_proto': protocol,
'size': 1,
'display_name': 'fakename',
'display_description': 'fakedesc',
'availability_zone': 'fakeaz'
}
self.mock_object(db_api, 'share_create', mock.Mock(return_value=share))
self.mock_object(self.api, 'create_instance')
return share, share_data
def _setup_create_instance_mocks(self):
host = 'fake'
share_type_id = "fake_share_type"
share = db_utils.create_share(
user_id=self.context.user_id,
project_id=self.context.project_id,
create_share_instance=False,
share_type_id=share_type_id,
)
share_instance = db_utils.create_share_instance(share_id=share['id'])
share_metadata = {'fake': 'fake'}
share_type = {'fake': 'fake'}
self.mock_object(db_api, 'share_instance_create',
mock.Mock(return_value=share_instance))
self.mock_object(db_api, 'share_metadata_get',
mock.Mock(return_value=share_metadata))
self.mock_object(db_api, 'share_type_get',
mock.Mock(return_value=share_type))
az_mock = mock.Mock()
type(az_mock.return_value).id = mock.PropertyMock(
return_value='fake_id')
self.mock_object(db_api, 'availability_zone_get', az_mock)
self.mock_object(self.api.share_rpcapi, 'create_share_instance')
self.mock_object(self.api.scheduler_rpcapi, 'create_share_instance')
return host, share, share_instance
def _setup_create_from_snapshot_mocks(self, use_scheduler=True, host=None):
CONF.set_default("use_scheduler_creating_share_from_snapshot",
use_scheduler)
original_share = db_utils.create_share(
user_id=self.context.user_id,
project_id=self.context.project_id,
status=constants.STATUS_AVAILABLE,
host=host if host else 'fake',
size=1
)
snapshot = db_utils.create_snapshot(
share_id=original_share['id'],
status=constants.STATUS_AVAILABLE,
size=1
)
share, share_data = self._setup_create_mocks(
snapshot_id=snapshot['id'])
request_spec = {
'share_properties': share.to_dict(),
'share_proto': share['share_proto'],
'share_id': share['id'],
'share_type': None,
'snapshot_id': share['snapshot_id'],
}
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value='reservation'))
self.mock_object(quota.QUOTAS, 'commit')
self.mock_object(share_types, 'get_share_type')
return snapshot, share, share_data, request_spec
def _setup_delete_mocks(self, status, snapshots=[], **kwargs):
share = db_utils.create_share(status=status, **kwargs)
self.mock_object(db_api, 'share_delete')
self.mock_object(db_api, 'share_server_update')
self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=snapshots))
self.mock_object(self.api, 'delete_instance')
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value='reservation'))
self.mock_object(quota.QUOTAS, 'commit')
return share
def _setup_delete_share_instance_mocks(self, **kwargs):
share = db_utils.create_share(**kwargs)
self.mock_object(db_api, 'share_instance_update',
mock.Mock(return_value=share.instance))
self.mock_object(self.api.share_rpcapi, 'delete_share_instance')
self.mock_object(db_api, 'share_server_update')
return share.instance
def test_get_all_admin_no_filters(self):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True)
shares = self.api.get_all(ctx)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_1', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares)
def test_get_all_admin_filter_by_all_tenants(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True)
self.mock_object(db_api, 'share_get_all',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES))
shares = self.api.get_all(ctx, {'all_tenants': 1})
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
db_api.share_get_all.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at', filters={})
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares)
def test_get_all_non_admin_filter_by_share_server(self):
def fake_policy_checker(*args, **kwargs):
if 'list_by_share_server_id' == args[2] and not args[0].is_admin:
raise exception.NotAuthorized
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
self.mock_object(share_api.policy, 'check_policy',
mock.Mock(side_effect=fake_policy_checker))
self.assertRaises(
exception.NotAuthorized,
self.api.get_all,
ctx,
{'share_server_id': 'fake'},
)
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
mock.call(ctx, 'share', 'list_by_share_server_id'),
])
def test_get_all_admin_filter_by_share_server_and_all_tenants(self):
# NOTE(vponomaryov): if share_server_id provided, 'all_tenants' opt
# should not make any influence.
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True)
self.mock_object(db_api, 'share_get_all_by_share_server',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[2:]))
self.mock_object(db_api, 'share_get_all')
self.mock_object(db_api, 'share_get_all_by_project')
shares = self.api.get_all(
ctx, {'share_server_id': 'fake_server_3', 'all_tenants': 1})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
mock.call(ctx, 'share', 'list_by_share_server_id'),
])
db_api.share_get_all_by_share_server.assert_called_once_with(
ctx, 'fake_server_3', sort_dir='desc', sort_key='created_at',
filters={},
)
db_api.share_get_all_by_project.assert_has_calls([])
db_api.share_get_all.assert_has_calls([])
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2:], shares)
def test_get_all_admin_filter_by_name(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True)
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(ctx, {'name': 'bar'})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares)
def test_get_all_admin_filter_by_name_and_all_tenants(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True)
self.mock_object(db_api, 'share_get_all',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES))
shares = self.api.get_all(ctx, {'name': 'foo', 'all_tenants': 1})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at', filters={})
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[::2], shares)
def test_get_all_admin_filter_by_status(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True)
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(ctx, {'status': constants.STATUS_AVAILABLE})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2::4], shares)
def test_get_all_admin_filter_by_status_and_all_tenants(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True)
self.mock_object(db_api, 'share_get_all',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES))
shares = self.api.get_all(
ctx, {'status': constants.STATUS_ERROR, 'all_tenants': 1})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at', filters={})
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares)
def test_get_all_non_admin_filter_by_all_tenants(self):
# Expected share list only by project of non-admin user
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False)
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(ctx, {'all_tenants': 1})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares)
def test_get_all_non_admin_with_name_and_status_filters(self):
ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False)
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(
ctx, {'name': 'bar', 'status': constants.STATUS_ERROR})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False
)
# two items expected, one filtered
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares)
# one item expected, two filtered
shares = self.api.get_all(
ctx, {'name': 'foo', 'status': constants.STATUS_AVAILABLE})
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2::4], shares)
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_has_calls([
mock.call(ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False),
mock.call(ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False),
])
@ddt.data('True', 'true', '1', 'yes', 'y', 'on', 't', True)
def test_get_all_non_admin_public(self, is_public):
ctx = context.RequestContext('fake_uid', 'fake_pid_2',
is_admin=False)
self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(
return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(ctx, {'is_public': is_public})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=True
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares)
@ddt.data('False', 'false', '0', 'no', 'n', 'off', 'f', False)
def test_get_all_non_admin_not_public(self, is_public):
ctx = context.RequestContext('fake_uid', 'fake_pid_2',
is_admin=False)
self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(
return_value=_FAKE_LIST_OF_ALL_SHARES[1:]))
shares = self.api.get_all(ctx, {'is_public': is_public})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_2', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares)
@ddt.data('truefoo', 'bartrue')
def test_get_all_invalid_public_value(self, is_public):
ctx = context.RequestContext('fake_uid', 'fake_pid_2',
is_admin=False)
self.assertRaises(ValueError, self.api.get_all,
ctx, {'is_public': is_public})
share_api.policy.check_policy.assert_has_calls([
mock.call(ctx, 'share', 'get_all'),
])
def test_get_all_with_sorting_valid(self):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
shares = self.api.get_all(ctx, sort_key='status', sort_dir='asc')
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='asc', sort_key='status',
project_id='fake_pid_1', filters={}, is_public=False
)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares)
def test_get_all_sort_key_invalid(self):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
self.assertRaises(
exception.InvalidInput,
self.api.get_all,
ctx,
sort_key=1,
)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
def test_get_all_sort_dir_invalid(self):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
self.assertRaises(
exception.InvalidInput,
self.api.get_all,
ctx,
sort_dir=1,
)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
def _get_all_filter_metadata_or_extra_specs_valid(self, key):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
search_opts = {key: {'foo1': 'bar1', 'foo2': 'bar2'}}
shares = self.api.get_all(ctx, search_opts=search_opts.copy())
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
db_api.share_get_all_by_project.assert_called_once_with(
ctx, sort_dir='desc', sort_key='created_at',
project_id='fake_pid_1', filters=search_opts, is_public=False)
self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares)
def test_get_all_filter_by_metadata(self):
self._get_all_filter_metadata_or_extra_specs_valid(key='metadata')
def test_get_all_filter_by_extra_specs(self):
self._get_all_filter_metadata_or_extra_specs_valid(key='extra_specs')
def _get_all_filter_metadata_or_extra_specs_invalid(self, key):
self.mock_object(db_api, 'share_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
search_opts = {key: "{'foo': 'bar'}"}
self.assertRaises(exception.InvalidInput, self.api.get_all, ctx,
search_opts=search_opts)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share', 'get_all')
def test_get_all_filter_by_invalid_metadata(self):
self._get_all_filter_metadata_or_extra_specs_invalid(key='metadata')
def test_get_all_filter_by_invalid_extra_specs(self):
self._get_all_filter_metadata_or_extra_specs_invalid(key='extra_specs')
@ddt.data(True, False)
def test_create_public_and_private_share(self, is_public):
share, share_data = self._setup_create_mocks(is_public=is_public)
az = share_data.pop('availability_zone')
self.api.create(
self.context,
share_data['share_proto'],
share_data['size'],
share_data['display_name'],
share_data['display_description'],
availability_zone=az
)
self.assertSubDictMatch(share_data,
db_api.share_create.call_args[0][1])
@ddt.data('', 'fake', 'Truebar', 'Bartrue')
def test_create_share_with_invalid_is_public_value(self, is_public):
self.assertRaises(exception.InvalidParameterValue,
self.api.create, self.context, 'nfs', '1',
'fakename', 'fakedesc', is_public=is_public)
@ddt.data(*constants.SUPPORTED_SHARE_PROTOCOLS)
def test_create_share_valid_protocol(self, proto):
share, share_data = self._setup_create_mocks(protocol=proto)
az = share_data.pop('availability_zone')
all_protos = ','.join(
proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS)
data = dict(DEFAULT=dict(enabled_share_protocols=all_protos))
with test_utils.create_temp_config_with_opts(data):
self.api.create(
self.context, proto, share_data['size'],
share_data['display_name'],
share_data['display_description'],
availability_zone=az)
self.assertSubDictMatch(share_data,
db_api.share_create.call_args[0][1])
@ddt.data(
None, '', 'fake', 'nfsfake', 'cifsfake', 'glusterfsfake', 'hdfsfake')
def test_create_share_invalid_protocol(self, proto):
share, share_data = self._setup_create_mocks(protocol=proto)
all_protos = ','.join(
proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS)
data = dict(DEFAULT=dict(enabled_share_protocols=all_protos))
with test_utils.create_temp_config_with_opts(data):
self.assertRaises(
exception.InvalidInput,
self.api.create,
self.context, proto, share_data['size'],
share_data['display_name'],
share_data['display_description'])
@ddt.data({'overs': {'gigabytes': 'fake'},
'expected_exception': exception.ShareSizeExceedsAvailableQuota},
{'overs': {'shares': 'fake'},
'expected_exception': exception.ShareLimitExceeded})
@ddt.unpack
def test_create_share_over_quota(self, overs, expected_exception):
share, share_data = self._setup_create_mocks()
usages = {'gigabytes': {'reserved': 5, 'in_use': 5},
'shares': {'reserved': 10, 'in_use': 10}}
quotas = {'gigabytes': 5, 'shares': 10}
exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas)
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc))
self.assertRaises(
expected_exception,
self.api.create,
self.context,
share_data['share_proto'],
share_data['size'],
share_data['display_name'],
share_data['display_description']
)
quota.QUOTAS.reserve.assert_called_once_with(
self.context, shares=1, gigabytes=share_data['size'])
@ddt.data(exception.QuotaError, exception.InvalidShare)
def test_create_share_error_on_quota_commit(self, expected_exception):
share, share_data = self._setup_create_mocks()
reservation = 'fake'
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(return_value=reservation))
self.mock_object(quota.QUOTAS, 'commit',
mock.Mock(side_effect=expected_exception('fake')))
self.mock_object(quota.QUOTAS, 'rollback')
self.mock_object(db_api, 'share_delete')
self.assertRaises(
expected_exception,
self.api.create,
self.context,
share_data['share_proto'],
share_data['size'],
share_data['display_name'],
share_data['display_description']
)
quota.QUOTAS.rollback.assert_called_once_with(self.context,
reservation)
db_api.share_delete.assert_called_once_with(self.context, share['id'])
def test_create_share_instance_with_host_and_az(self):
host, share, share_instance = self._setup_create_instance_mocks()
self.api.create_instance(self.context, share, host=host,
availability_zone='fake')
db_api.share_instance_create.assert_called_once_with(
self.context, share['id'],
{
'share_network_id': None,
'status': constants.STATUS_CREATING,
'scheduled_at': self.dt_utc,
'host': host,
'availability_zone_id': 'fake_id',
}
)
db_api.share_metadata_get.assert_called_once_with(self.context,
share['id'])
db_api.share_type_get.assert_called_once_with(self.context,
share['share_type_id'])
self.api.share_rpcapi.create_share_instance.assert_called_once_with(
self.context,
share_instance,
host,
request_spec=mock.ANY,
filter_properties={},
snapshot_id=share['snapshot_id'],
)
self.assertFalse(
self.api.scheduler_rpcapi.create_share_instance.called)
def test_create_share_instance_without_host(self):
_, share, share_instance = self._setup_create_instance_mocks()
self.api.create_instance(self.context, share)
self.api.scheduler_rpcapi.create_share_instance.\
assert_called_once_with(
self.context, request_spec=mock.ANY, filter_properties={})
self.assertFalse(self.api.share_rpcapi.create_share_instance.called)
def test_manage_new(self):
share_data = {
'host': 'fake',
'export_location': 'fake',
'share_proto': 'fake',
}
driver_options = {}
date = datetime.datetime(1, 1, 1, 1, 1, 1)
timeutils.utcnow.return_value = date
fake_share_data = {
'id': 'fakeid',
'status': constants.STATUS_CREATING,
}
share = db_api.share_create(self.context, fake_share_data)
self.mock_object(db_api, 'share_create',
mock.Mock(return_value=share))
self.mock_object(db_api, 'share_export_locations_update')
self.mock_object(db_api, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.api, 'get_all', mock.Mock(return_value=[]))
self.api.manage(self.context,
copy.deepcopy(share_data),
driver_options)
share_data.update({
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'status': constants.STATUS_MANAGING,
'scheduled_at': date,
})
export_location = share_data.pop('export_location')
self.api.get_all.assert_called_once_with(self.context, mock.ANY)
db_api.share_create.assert_called_once_with(self.context, share_data)
db_api.share_get.assert_called_once_with(self.context, share['id'])
db_api.share_export_locations_update.assert_called_once_with(
self.context, share.instance['id'], export_location
)
self.share_rpcapi.manage_share.assert_called_once_with(
self.context, share, driver_options)
@ddt.data([{'id': 'fake', 'status': constants.STATUS_MANAGE_ERROR}])
def test_manage_retry(self, shares):
share_data = {
'host': 'fake',
'export_location': 'fake',
'share_proto': 'fake',
}
driver_options = {}
fake_share_data = {'id': 'fakeid'}
share = db_api.share_create(self.context, fake_share_data)
self.mock_object(db_api, 'share_update',
mock.Mock(return_value=share))
self.mock_object(db_api, 'share_get',
mock.Mock(return_value=share))
self.mock_object(db_api, 'share_export_locations_update')
self.mock_object(self.api, 'get_all',
mock.Mock(return_value=shares))
self.api.manage(self.context,
copy.deepcopy(share_data),
driver_options)
db_api.share_update.assert_called_once_with(
self.context, 'fake', mock.ANY)
self.share_rpcapi.manage_share.assert_called_once_with(
self.context, mock.ANY, driver_options)
db_api.share_export_locations_update.assert_called_once_with(
self.context, share.instance['id'], mock.ANY
)
def test_manage_duplicate(self):
share_data = {
'host': 'fake',
'export_location': 'fake',
'share_proto': 'fake',
}
driver_options = {}
self.mock_object(self.api, 'get_all',
mock.Mock(return_value=['fake', 'fake2']))
self.assertRaises(exception.ManilaException, self.api.manage,
self.context, share_data, driver_options)
def test_unmanage(self):
share = db_utils.create_share(
id='fakeid',
host='fake',
size='1',
status=constants.STATUS_AVAILABLE,
user_id=self.context.user_id,
project_id=self.context.project_id,
task_state=None)
self.mock_object(db_api, 'share_update', mock.Mock())
self.api.unmanage(self.context, share)
self.share_rpcapi.unmanage_share.assert_called_once_with(
self.context, mock.ANY)
db_api.share_update.assert_called_once_with(
mock.ANY, share['id'], mock.ANY)
def test_unmanage_task_state_busy(self):
share = db_utils.create_share(
id='fakeid',
host='fake',
size='1',
status=constants.STATUS_AVAILABLE,
user_id=self.context.user_id,
project_id=self.context.project_id,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
self.assertRaises(exception.ShareBusyException, self.api.unmanage,
self.context, share)
@mock.patch.object(quota.QUOTAS, 'reserve',
mock.Mock(return_value='reservation'))
@mock.patch.object(quota.QUOTAS, 'commit', mock.Mock())
def test_create_snapshot(self):
snapshot = db_utils.create_snapshot(
with_share=True, status=constants.STATUS_CREATING, size=1)
share = snapshot['share']
fake_name = 'fakename'
fake_desc = 'fakedesc'
options = {
'share_id': share['id'],
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_size': share['size'],
'size': 1,
'display_name': fake_name,
'display_description': fake_desc,
'share_proto': share['share_proto'],
}
with mock.patch.object(db_api, 'share_snapshot_create',
mock.Mock(return_value=snapshot)):
self.api.create_snapshot(self.context, share, fake_name,
fake_desc)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'create_snapshot', share)
quota.QUOTAS.reserve.assert_called_once_with(
self.context, snapshots=1, snapshot_gigabytes=1)
quota.QUOTAS.commit.assert_called_once_with(
self.context, 'reservation')
db_api.share_snapshot_create.assert_called_once_with(
self.context, options)
@mock.patch.object(db_api, 'share_instances_get_all_by_share_server',
mock.Mock(return_value=[]))
@mock.patch.object(db_api, 'consistency_group_get_all_by_share_server',
mock.Mock(return_value=[]))
def test_delete_share_server_no_dependent_shares(self):
server = {'id': 'fake_share_server_id'}
server_returned = {
'id': 'fake_share_server_id',
}
self.mock_object(db_api, 'share_server_update',
mock.Mock(return_value=server_returned))
self.api.delete_share_server(self.context, server)
db_api.share_instances_get_all_by_share_server.assert_called_once_with(
self.context, server['id'])
db_api.consistency_group_get_all_by_share_server.\
assert_called_once_with(self.context, server['id'])
self.share_rpcapi.delete_share_server.assert_called_once_with(
self.context, server_returned)
@mock.patch.object(db_api, 'share_instances_get_all_by_share_server',
mock.Mock(return_value=['fake_share', ]))
@mock.patch.object(db_api, 'consistency_group_get_all_by_share_server',
mock.Mock(return_value=[]))
def test_delete_share_server_dependent_share_exists(self):
server = {'id': 'fake_share_server_id'}
self.assertRaises(exception.ShareServerInUse,
self.api.delete_share_server,
self.context,
server)
db_api.share_instances_get_all_by_share_server.assert_called_once_with(
self.context, server['id'])
@mock.patch.object(db_api, 'share_instances_get_all_by_share_server',
mock.Mock(return_value=[]))
@mock.patch.object(db_api, 'consistency_group_get_all_by_share_server',
mock.Mock(return_value=['fake_cg', ]))
def test_delete_share_server_dependent_cg_exists(self):
server = {'id': 'fake_share_server_id'}
self.assertRaises(exception.ShareServerInUse,
self.api.delete_share_server,
self.context,
server)
db_api.share_instances_get_all_by_share_server.assert_called_once_with(
self.context, server['id'])
db_api.consistency_group_get_all_by_share_server.\
assert_called_once_with(self.context, server['id'])
@mock.patch.object(db_api, 'share_snapshot_update', mock.Mock())
def test_delete_snapshot(self):
snapshot = db_utils.create_snapshot(
with_share=True, status=constants.STATUS_AVAILABLE)
share = snapshot['share']
with mock.patch.object(db_api, 'share_get',
mock.Mock(return_value=share)):
self.api.delete_snapshot(self.context, snapshot)
self.share_rpcapi.delete_snapshot.assert_called_once_with(
self.context, snapshot, share['host'])
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'delete_snapshot', snapshot)
db_api.share_snapshot_update.assert_called_once_with(
self.context,
snapshot['id'],
{'status': constants.STATUS_DELETING})
db_api.share_get.assert_called_once_with(
self.context, snapshot['share_id'])
def test_delete_snapshot_wrong_status(self):
snapshot = db_utils.create_snapshot(
with_share=True, status=constants.STATUS_CREATING)
self.assertRaises(exception.InvalidShareSnapshot,
self.api.delete_snapshot,
self.context,
snapshot)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'delete_snapshot', snapshot)
def test_create_snapshot_if_share_not_available(self):
share = db_utils.create_share(status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShare,
self.api.create_snapshot,
self.context,
share,
'fakename',
'fakedesc')
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'create_snapshot', share)
def test_create_snapshot_invalid_task_state(self):
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
self.assertRaises(exception.ShareBusyException,
self.api.create_snapshot,
self.context,
share,
'fakename',
'fakedesc')
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'create_snapshot', share)
@ddt.data({'use_scheduler': False, 'valid_host': 'fake'},
{'use_scheduler': True, 'valid_host': None})
@ddt.unpack
def test_create_from_snapshot(self, use_scheduler, valid_host):
snapshot, share, share_data, request_spec = (
self._setup_create_from_snapshot_mocks(
use_scheduler=use_scheduler, host=valid_host)
)
az = share_data.pop('availability_zone')
self.api.create(
self.context,
share_data['share_proto'],
None, # NOTE(u_glide): Get share size from snapshot
share_data['display_name'],
share_data['display_description'],
snapshot=snapshot,
availability_zone=az
)
self.assertEqual(0, share_types.get_share_type.call_count)
self.assertSubDictMatch(share_data,
db_api.share_create.call_args[0][1])
self.api.create_instance.assert_called_once_with(
self.context, share, share_network_id=share['share_network_id'],
host=valid_host,
availability_zone=snapshot['share']['availability_zone'],
consistency_group=None, cgsnapshot_member=None)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'create')
quota.QUOTAS.reserve.assert_called_once_with(
self.context, gigabytes=1, shares=1)
quota.QUOTAS.commit.assert_called_once_with(
self.context, 'reservation')
def test_create_from_snapshot_with_different_share_type(self):
snapshot, share, share_data, request_spec = (
self._setup_create_from_snapshot_mocks()
)
share_type = {'id': 'super_fake_share_type'}
self.assertRaises(exception.InvalidInput, self.api.create,
self.context, share_data['share_proto'],
share_data['size'],
share_data['display_name'],
share_data['display_description'],
snapshot=snapshot,
availability_zone=share_data['availability_zone'],
share_type=share_type)
def test_get_snapshot(self):
fake_get_snap = {'fake_key': 'fake_val'}
with mock.patch.object(db_api, 'share_snapshot_get',
mock.Mock(return_value=fake_get_snap)):
rule = self.api.get_snapshot(self.context, 'fakeid')
self.assertEqual(fake_get_snap, rule)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share_snapshot', 'get_snapshot')
db_api.share_snapshot_get.assert_called_once_with(
self.context, 'fakeid')
def test_create_from_snapshot_not_available(self):
snapshot = db_utils.create_snapshot(
with_share=True, status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShareSnapshot, self.api.create,
self.context, 'nfs', '1', 'fakename',
'fakedesc', snapshot=snapshot,
availability_zone='fakeaz')
def test_create_from_snapshot_larger_size(self):
snapshot = db_utils.create_snapshot(
size=100, status=constants.STATUS_AVAILABLE, with_share=True)
self.assertRaises(exception.InvalidInput, self.api.create,
self.context, 'nfs', 1, 'fakename', 'fakedesc',
availability_zone='fakeaz', snapshot=snapshot)
def test_create_share_wrong_size_0(self):
self.assertRaises(exception.InvalidInput, self.api.create,
self.context, 'nfs', 0, 'fakename', 'fakedesc',
availability_zone='fakeaz')
def test_create_share_wrong_size_some(self):
self.assertRaises(exception.InvalidInput, self.api.create,
self.context, 'nfs', 'some', 'fakename',
'fakedesc', availability_zone='fakeaz')
@ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_ERROR)
def test_delete(self, status):
share = self._setup_delete_mocks(status)
self.api.delete(self.context, share)
self.api.delete_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.ShareInstance), force=False
)
db_api.share_snapshot_get_all_for_share.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share['id'])
def test_delete_wrong_status(self):
share = fake_share('fakeid')
self.mock_object(db_api, 'share_get', mock.Mock(return_value=share))
self.assertRaises(exception.InvalidShare, self.api.delete,
self.context, share)
@mock.patch.object(db_api, 'count_cgsnapshot_members_in_share',
mock.Mock(return_value=2))
def test_delete_dependent_cgsnapshot_members(self):
share_server_id = 'fake-ss-id'
share = self._setup_delete_mocks(constants.STATUS_AVAILABLE,
share_server_id)
self.assertRaises(exception.InvalidShare, self.api.delete,
self.context, share)
@mock.patch.object(db_api, 'share_instance_delete', mock.Mock())
def test_delete_no_host(self):
share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, host=None)
self.api.delete(self.context, share)
db_api.share_instance_delete.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'])
def test_delete_share_with_snapshots(self):
share = self._setup_delete_mocks(constants.STATUS_AVAILABLE,
snapshots=['fake'])
self.assertRaises(
exception.InvalidShare,
self.api.delete,
self.context,
share
)
def test_delete_share_invalid_task_state(self):
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
self.assertRaises(exception.ShareBusyException,
self.api.delete,
self.context, share)
def test_delete_share_quota_error(self):
share = self._setup_delete_mocks(constants.STATUS_AVAILABLE)
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=exception.QuotaError('fake')))
self.api.delete(self.context, share)
quota.QUOTAS.reserve.assert_called_once_with(
self.context,
project_id=share['project_id'],
shares=-1,
gigabytes=-share['size']
)
self.assertFalse(quota.QUOTAS.commit.called)
@ddt.data({'status': constants.STATUS_AVAILABLE, 'force': False},
{'status': constants.STATUS_ERROR, 'force': True})
@ddt.unpack
def test_delete_share_instance(self, status, force):
instance = self._setup_delete_share_instance_mocks(
status=status, share_server_id='fake')
self.api.delete_instance(self.context, instance, force=force)
db_api.share_instance_update.assert_called_once_with(
self.context,
instance['id'],
{'status': constants.STATUS_DELETING,
'terminated_at': self.dt_utc}
)
self.api.share_rpcapi.delete_share_instance.assert_called_once_with(
self.context, instance
)
db_api.share_server_update(
self.context,
instance['share_server_id'],
{'updated_at': self.dt_utc}
)
def test_delete_share_instance_invalid_status(self):
instance = self._setup_delete_share_instance_mocks(
status=constants.STATUS_CREATING, share_server_id='fake')
self.assertRaises(
exception.InvalidShareInstance,
self.api.delete_instance,
self.context,
instance
)
@ddt.data('', 'fake', 'Truebar', 'Bartrue')
def test_update_share_with_invalid_is_public_value(self, is_public):
self.assertRaises(exception.InvalidParameterValue,
self.api.update, self.context, 'fakeid',
{'is_public': is_public})
def test_get(self):
share = db_utils.create_share()
with mock.patch.object(db_api, 'share_get',
mock.Mock(return_value=share)):
result = self.api.get(self.context, 'fakeid')
self.assertEqual(share, result)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'get', share)
db_api.share_get.assert_called_once_with(
self.context, 'fakeid')
@mock.patch.object(db_api, 'share_snapshot_get_all_by_project',
mock.Mock())
def test_get_all_snapshots_admin_not_all_tenants(self):
ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=True)
self.api.get_all_snapshots(ctx)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
db_api.share_snapshot_get_all_by_project.assert_called_once_with(
ctx, 'fakepid', sort_dir='desc', sort_key='share_id', filters={})
@mock.patch.object(db_api, 'share_snapshot_get_all', mock.Mock())
def test_get_all_snapshots_admin_all_tenants(self):
self.api.get_all_snapshots(self.context,
search_opts={'all_tenants': 1})
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share_snapshot', 'get_all_snapshots')
db_api.share_snapshot_get_all.assert_called_once_with(
self.context, sort_dir='desc', sort_key='share_id', filters={})
@mock.patch.object(db_api, 'share_snapshot_get_all_by_project',
mock.Mock())
def test_get_all_snapshots_not_admin(self):
ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False)
self.api.get_all_snapshots(ctx)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
db_api.share_snapshot_get_all_by_project.assert_called_once_with(
ctx, 'fakepid', sort_dir='desc', sort_key='share_id', filters={})
def test_get_all_snapshots_not_admin_search_opts(self):
search_opts = {'size': 'fakesize'}
fake_objs = [{'name': 'fakename1'}, search_opts]
ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False)
self.mock_object(db_api, 'share_snapshot_get_all_by_project',
mock.Mock(return_value=fake_objs))
result = self.api.get_all_snapshots(ctx, search_opts)
self.assertEqual([search_opts], result)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
db_api.share_snapshot_get_all_by_project.assert_called_once_with(
ctx, 'fakepid', sort_dir='desc', sort_key='share_id',
filters=search_opts)
def test_get_all_snapshots_with_sorting_valid(self):
self.mock_object(
db_api, 'share_snapshot_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
snapshots = self.api.get_all_snapshots(
ctx, sort_key='status', sort_dir='asc')
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
db_api.share_snapshot_get_all_by_project.assert_called_once_with(
ctx, 'fake_pid_1', sort_dir='asc', sort_key='status', filters={})
self.assertEqual(_FAKE_LIST_OF_ALL_SNAPSHOTS[0], snapshots)
def test_get_all_snapshots_sort_key_invalid(self):
self.mock_object(
db_api, 'share_snapshot_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
self.assertRaises(
exception.InvalidInput,
self.api.get_all_snapshots,
ctx,
sort_key=1,
)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
def test_get_all_snapshots_sort_dir_invalid(self):
self.mock_object(
db_api, 'share_snapshot_get_all_by_project',
mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0]))
ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False)
self.assertRaises(
exception.InvalidInput,
self.api.get_all_snapshots,
ctx,
sort_dir=1,
)
share_api.policy.check_policy.assert_called_once_with(
ctx, 'share_snapshot', 'get_all_snapshots')
@ddt.data(None, 'rw', 'ro')
def test_allow_access(self, level):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
values = {
'share_id': share['id'],
'access_type': 'fake_access_type',
'access_to': 'fake_access_to',
'access_level': level,
}
fake_access_expected = copy.deepcopy(values)
fake_access_expected.update({
'id': 'fake_access_id',
'state': 'fake_state',
})
fake_access = copy.deepcopy(fake_access_expected)
fake_access.update({
'deleted': 'fake_deleted',
'deleted_at': 'fake_deleted_at',
'instance_mappings': ['foo', 'bar'],
})
self.mock_object(db_api, 'share_access_create',
mock.Mock(return_value=fake_access))
access = self.api.allow_access(
self.context, share, fake_access['access_type'],
fake_access['access_to'], level)
self.assertEqual(fake_access_expected, access)
self.share_rpcapi.allow_access.assert_called_once_with(
self.context, utils.IsAMatcher(models.ShareInstance),
fake_access)
db_api.share_access_create.assert_called_once_with(
self.context, values)
share_api.policy.check_policy.assert_called_with(
self.context, 'share', 'allow_access')
def test_allow_access_invalid_access_level(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
self.assertRaises(exception.InvalidShareAccess, self.api.allow_access,
self.context, share, 'fakeacctype', 'fakeaccto',
'ab')
def test_allow_access_status_not_available(self):
share = db_utils.create_share(status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShare, self.api.allow_access,
self.context, share, 'fakeacctype', 'fakeaccto')
def test_allow_access_no_host(self):
share = db_utils.create_share(host=None)
self.assertRaises(exception.InvalidShare, self.api.allow_access,
self.context, share, 'fakeacctype', 'fakeaccto')
def test_allow_access_to_instance(self):
share = db_utils.create_share(host='fake')
access = db_utils.create_access(share_id=share['id'],
state=constants.STATUS_ACTIVE)
rpc_method = self.mock_object(self.api.share_rpcapi, 'allow_access')
self.api.allow_access_to_instance(self.context, share.instance, access)
rpc_method.assert_called_once_with(
self.context, share.instance, access)
def test_deny_access_to_instance(self):
share = db_utils.create_share(host='fake')
access = db_utils.create_access(share_id=share['id'],
state=constants.STATUS_ACTIVE)
rpc_method = self.mock_object(self.api.share_rpcapi, 'deny_access')
self.mock_object(db_api, 'share_instance_access_get',
mock.Mock(return_value=access.instance_mappings[0]))
self.mock_object(db_api, 'share_instance_access_update_state')
self.api.deny_access_to_instance(self.context, share.instance, access)
rpc_method.assert_called_once_with(
self.context, share.instance, access)
db_api.share_instance_access_get.assert_called_once_with(
self.context, access['id'], share.instance['id'])
db_api.share_instance_access_update_state.assert_called_once_with(
self.context,
access.instance_mappings[0]['id'],
constants.STATUS_DELETING
)
@ddt.data('allow_access_to_instance', 'deny_access_to_instance')
def test_allow_and_deny_access_to_instance_invalid_instance(self, method):
share = db_utils.create_share(host=None)
self.assertRaises(
exception.InvalidShareInstance,
getattr(self.api, method),
self.context, share.instance, 'fake'
)
@mock.patch.object(db_api, 'share_get', mock.Mock())
@mock.patch.object(share_api.API, 'deny_access_to_instance', mock.Mock())
@mock.patch.object(db_api, 'share_instance_access_get_all', mock.Mock())
def test_deny_access_error(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
db_api.share_get.return_value = share
access = db_utils.create_access(state=constants.STATUS_ERROR,
share_id=share['id'])
share_instance = share.instances[0]
db_api.share_instance_access_get_all.return_value = [share_instance, ]
self.api.deny_access(self.context, share, access)
db_api.share_get.assert_called_once_with(self.context, share['id'])
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'deny_access')
share_api.API.deny_access_to_instance.assert_called_once_with(
self.context, share_instance, access)
db_api.share_instance_access_get_all.assert_called_once_with(
self.context, access['id'])
@mock.patch.object(db_api, 'share_get', mock.Mock())
@mock.patch.object(db_api, 'share_instance_access_get_all', mock.Mock())
@mock.patch.object(db_api, 'share_access_delete', mock.Mock())
def test_deny_access_error_no_share_instance_mapping(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
db_api.share_get.return_value = share
access = db_utils.create_access(state=constants.STATUS_ERROR,
share_id=share['id'])
db_api.share_instance_access_get_all.return_value = []
self.api.deny_access(self.context, share, access)
db_api.share_get.assert_called_once_with(self.context, share['id'])
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'deny_access')
db_api.share_access_delete.assert_called_once_with(
self.context, access['id'])
db_api.share_instance_access_get_all.assert_called_once_with(
self.context, access['id'])
@mock.patch.object(db_api, 'share_instance_access_update_state',
mock.Mock())
def test_deny_access_active(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
access = db_utils.create_access(state=constants.STATUS_ACTIVE,
share_id=share['id'])
self.api.deny_access(self.context, share, access)
db_api.share_instance_access_update_state.assert_called_once_with(
self.context,
access.instance_mappings[0]['id'],
constants.STATUS_DELETING
)
share_api.policy.check_policy.assert_called_with(
self.context, 'share', 'deny_access')
self.share_rpcapi.deny_access.assert_called_once_with(
self.context, utils.IsAMatcher(models.ShareInstance), access)
def test_deny_access_not_found(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
access = db_utils.create_access(state=constants.STATUS_ACTIVE,
share_id=share['id'])
self.mock_object(db_api, 'share_instance_access_get',
mock.Mock(side_effect=[exception.NotFound('fake')]))
self.api.deny_access(self.context, share, access)
share_api.policy.check_policy.assert_called_with(
self.context, 'share', 'deny_access')
def test_deny_access_not_active_not_error(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE)
access = db_utils.create_access(share_id=share['id'])
self.assertRaises(exception.InvalidShareAccess, self.api.deny_access,
self.context, share, access)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'deny_access')
def test_deny_access_status_not_available(self):
share = db_utils.create_share(status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShare, self.api.deny_access,
self.context, share, 'fakeacc')
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'deny_access')
def test_deny_access_no_host(self):
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE, host=None)
self.assertRaises(exception.InvalidShare, self.api.deny_access,
self.context, share, 'fakeacc')
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'deny_access')
def test_access_get(self):
with mock.patch.object(db_api, 'share_access_get',
mock.Mock(return_value='fake')):
rule = self.api.access_get(self.context, 'fakeid')
self.assertEqual('fake', rule)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'access_get')
db_api.share_access_get.assert_called_once_with(
self.context, 'fakeid')
def test_access_get_all(self):
share = db_utils.create_share(id='fakeid')
expected = {
'fakeacc0id': {
'id': 'fakeacc0id',
'access_type': 'fakeacctype',
'access_to': 'fakeaccto',
'access_level': 'rw',
'state': constants.STATUS_ACTIVE,
'share_id': share['id'],
},
'fakeacc1id': {
'id': 'fakeacc1id',
'access_type': 'fakeacctype',
'access_to': 'fakeaccto',
'access_level': 'rw',
'state': constants.STATUS_DELETING,
'share_id': share['id'],
},
}
rules = [
db_utils.create_access(**expected['fakeacc0id']),
db_utils.create_access(**expected['fakeacc1id']),
]
self.mock_object(db_api, 'share_access_get_all_for_share',
mock.Mock(return_value=rules))
actual = self.api.access_get_all(self.context, share)
for access in actual:
expected_access = expected[access['id']]
expected_access.pop('share_id')
self.assertEqual(expected_access, access)
share_api.policy.check_policy.assert_called_once_with(
self.context, 'share', 'access_get_all')
db_api.share_access_get_all_for_share.assert_called_once_with(
self.context, 'fakeid')
def test_share_metadata_get(self):
metadata = {'a': 'b', 'c': 'd'}
share_id = str(uuid.uuid4())
db_api.share_create(self.context,
{'id': share_id, 'metadata': metadata})
self.assertEqual(metadata,
db_api.share_metadata_get(self.context, share_id))
def test_share_metadata_update(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = {'a': '3', 'c': '2', 'd': '5'}
share_id = str(uuid.uuid4())
db_api.share_create(self.context,
{'id': share_id, 'metadata': metadata1})
db_api.share_metadata_update(self.context, share_id, metadata2, False)
self.assertEqual(should_be,
db_api.share_metadata_get(self.context, share_id))
def test_share_metadata_update_delete(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '4'}
should_be = metadata2
share_id = str(uuid.uuid4())
db_api.share_create(self.context,
{'id': share_id, 'metadata': metadata1})
db_api.share_metadata_update(self.context, share_id, metadata2, True)
self.assertEqual(should_be,
db_api.share_metadata_get(self.context, share_id))
def test_extend_invalid_status(self):
invalid_status = 'fake'
share = db_utils.create_share(status=invalid_status)
new_size = 123
self.assertRaises(exception.InvalidShare,
self.api.extend, self.context, share, new_size)
def test_extend_invalid_task_state(self):
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
new_size = 123
self.assertRaises(exception.ShareBusyException,
self.api.extend, self.context, share, new_size)
def test_extend_invalid_size(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size=200)
new_size = 123
self.assertRaises(exception.InvalidInput,
self.api.extend, self.context, share, new_size)
def test_extend_quota_error(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size=100)
new_size = 123
usages = {'gigabytes': {'reserved': 11, 'in_use': 12}}
quotas = {'gigabytes': 13}
exc = exception.OverQuota(usages=usages, quotas=quotas, overs=new_size)
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc))
self.assertRaises(exception.ShareSizeExceedsAvailableQuota,
self.api.extend, self.context, share, new_size)
def test_extend_valid(self):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size=100)
new_size = 123
self.mock_object(self.api, 'update')
self.mock_object(self.api.share_rpcapi, 'extend_share')
self.api.extend(self.context, share, new_size)
self.api.update.assert_called_once_with(
self.context, share, {'status': constants.STATUS_EXTENDING})
self.api.share_rpcapi.extend_share.assert_called_once_with(
self.context, share, new_size, mock.ANY
)
def test_shrink_invalid_status(self):
invalid_status = 'fake'
share = db_utils.create_share(status=invalid_status)
self.assertRaises(exception.InvalidShare,
self.api.shrink, self.context, share, 123)
def test_shrink_invalid_task_state(self):
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
self.assertRaises(exception.ShareBusyException,
self.api.shrink, self.context, share, 123)
@ddt.data(300, 0, -1)
def test_shrink_invalid_size(self, new_size):
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size=200)
self.assertRaises(exception.InvalidInput,
self.api.shrink, self.context, share, new_size)
@ddt.data(constants.STATUS_AVAILABLE,
constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR)
def test_shrink_valid(self, share_status):
share = db_utils.create_share(status=share_status, size=100)
new_size = 50
self.mock_object(self.api, 'update')
self.mock_object(self.api.share_rpcapi, 'shrink_share')
self.api.shrink(self.context, share, new_size)
self.api.update.assert_called_once_with(
self.context, share, {'status': constants.STATUS_SHRINKING})
self.api.share_rpcapi.shrink_share.assert_called_once_with(
self.context, share, new_size
)
def test_migrate_share(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
host='fake@backend#pool', share_type_id='fake_type_id')
request_spec = {
'share_properties': {
'size': share['size'],
'user_id': share['user_id'],
'project_id': share['project_id'],
'share_server_id': share['share_server_id'],
'snapshot_support': share['snapshot_support'],
'share_proto': share['share_proto'],
'share_type_id': share['share_type_id'],
'is_public': share['is_public'],
'consistency_group_id': share['consistency_group_id'],
'source_cgsnapshot_member_id': share[
'source_cgsnapshot_member_id'],
'snapshot_id': share['snapshot_id'],
},
'share_instance_properties': {
'availability_zone_id': share.instance['availability_zone_id'],
'share_network_id': share.instance['share_network_id'],
'share_server_id': share.instance['share_server_id'],
'share_id': share.instance['share_id'],
'host': share.instance['host'],
'status': share.instance['status'],
},
'share_type': 'fake_type',
'share_id': share['id'],
}
self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host')
self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value='fake_type'))
self.mock_object(utils, 'validate_service_host')
self.api.migrate_share(self.context, share, host, True)
self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with(
self.context, share['id'], host, True, request_spec)
def test_migrate_share_status_unavailable(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShare, self.api.migrate_share,
self.context, share, host, True)
def test_migrate_share_task_state_invalid(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
status=constants.STATUS_AVAILABLE,
task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS)
self.assertRaises(exception.ShareBusyException, self.api.migrate_share,
self.context, share, host, True)
def test_migrate_share_with_snapshots(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE)
self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=True))
self.assertRaises(exception.InvalidShare, self.api.migrate_share,
self.context, share, host, True)
def test_migrate_share_invalid_host(self):
host = 'fake@backend#pool'
share = db_utils.create_share(
host='fake2@backend', status=constants.STATUS_AVAILABLE)
self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=False))
self.assertRaises(exception.ServiceNotFound,
self.api.migrate_share,
self.context, share, host, True)
def test_migrate_share_same_host(self):
host = 'fake@backend#pool'
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE)
self.assertRaises(exception.InvalidHost,
self.api.migrate_share,
self.context, share, host, True)
def test_migrate_share_exception(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE)
self.mock_object(utils, 'validate_service_host')
self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=False))
self.mock_object(db_api, 'share_update', mock.Mock(return_value=True))
self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host',
mock.Mock(side_effect=exception.ShareMigrationFailed(
reason='fake')))
self.assertRaises(exception.ShareMigrationFailed,
self.api.migrate_share,
self.context, share, host, True)
db_api.share_update.assert_any_call(
mock.ANY, share['id'], mock.ANY)
class OtherTenantsShareActionsTestCase(test.TestCase):
def setUp(self):
super(OtherTenantsShareActionsTestCase, self).setUp()
self.api = share.API()
def test_delete_other_tenants_public_share(self):
share = db_utils.create_share(is_public=True)
ctx = context.RequestContext(user_id='1111', project_id='2222')
self.assertRaises(exception.PolicyNotAuthorized, self.api.delete, ctx,
share)
def test_update_other_tenants_public_share(self):
share = db_utils.create_share(is_public=True)
ctx = context.RequestContext(user_id='1111', project_id='2222')
self.assertRaises(exception.PolicyNotAuthorized, self.api.update, ctx,
share, {'display_name': 'newname'})
def test_get_other_tenants_public_share(self):
share = db_utils.create_share(is_public=True)
ctx = context.RequestContext(user_id='1111', project_id='2222')
self.mock_object(db_api, 'share_get',
mock.Mock(return_value=share))
result = self.api.get(ctx, 'fakeid')
self.assertEqual(share, result)
db_api.share_get.assert_called_once_with(ctx, 'fakeid')
|
# Copyright (c) 2013 Alon Swartz <[email protected]>
#
# This file is part of ec2metadata.
#
# ec2metadata is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
import time
import urllib.request, urllib.parse, urllib.error
import socket
METAOPTS = ['ami-id', 'ami-launch-index', 'ami-manifest-path',
'ancestor-ami-id', 'availability-zone', 'block-device-mapping',
'instance-id', 'instance-type', 'local-hostname', 'local-ipv4',
'kernel-id', 'product-codes', 'public-hostname', 'public-ipv4',
'public-keys', 'ramdisk-id', 'reservation-id', 'security-groups',
'user-data']
class Error(Exception):
pass
class EC2Metadata:
"""Class for querying metadata from EC2"""
def __init__(self, addr='169.254.169.254', api='2008-02-01'):
self.addr = addr
self.api = api
if not self._test_connectivity(self.addr, 80):
raise Error("could not establish connection to: %s" % self.addr)
@staticmethod
def _test_connectivity(addr, port):
for i in range(6):
s = socket.socket()
try:
s.connect((addr, port))
s.close()
return True
except socket.error as e:
time.sleep(1)
return False
def _get(self, uri):
url = 'http://%s/%s/%s' % (self.addr, self.api, uri)
value = urllib.request.urlopen(url).read()
if "404 - Not Found" in value:
return None
return value
def get(self, metaopt):
"""return value of metaopt"""
if metaopt not in METAOPTS:
raise Error('unknown metaopt', metaopt, METAOPTS)
if metaopt == 'availability-zone':
return self._get('meta-data/placement/availability-zone')
if metaopt == 'public-keys':
public_keys = []
data = self._get('meta-data/public-keys')
if not data:
return public_keys
keyids = [ line.split('=')[0] for line in data.splitlines() ]
for keyid in keyids:
uri = 'meta-data/public-keys/%d/openssh-key' % int(keyid)
public_keys.append(self._get(uri).rstrip())
return public_keys
if metaopt == 'user-data':
return self._get('user-data')
return self._get('meta-data/' + metaopt)
def get(metaopt):
"""primitive: return value of metaopt"""
m = EC2Metadata()
return m.get(metaopt)
def display(metaopts, prefix=False):
"""primitive: display metaopts (list) values with optional prefix"""
m = EC2Metadata()
for metaopt in metaopts:
value = m.get(metaopt)
if not value:
value = "unavailable"
if prefix:
print("%s: %s" % (metaopt, value))
else:
print(value)
|
"""
Test basic functionality for loading datasets.
"""
import pytest
import numpy as np
import numpy.testing as npt
from ..datasets import load_japan_quakes, load_earth_relief
from ..exceptions import GMTInvalidInput
def test_japan_quakes():
"Check that the dataset loads without errors"
data = load_japan_quakes()
assert data.shape == (115, 7)
summary = data.describe()
assert summary.loc['min', 'year'] == 1987
assert summary.loc['max', 'year'] == 1988
assert summary.loc['min', 'month'] == 1
assert summary.loc['max', 'month'] == 12
assert summary.loc['min', 'day'] == 1
assert summary.loc['max', 'day'] == 31
def test_earth_relief_fails():
"Make sure earth relief fails for invalid resolutions"
resolutions = '1m 1d bla 60d 01s 03s 001m 03'.split()
resolutions.append(60)
for resolution in resolutions:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution)
# Only test 60m and 30m to avoid downloading large datasets in CI
def test_earth_relief_60():
"Test some properties of the earth relief 60m data"
data = load_earth_relief(resolution='60m')
assert data.shape == (181, 361)
npt.assert_allclose(data.lat, np.arange(-90, 91, 1))
npt.assert_allclose(data.lon, np.arange(-180, 181, 1))
npt.assert_allclose(data.min(), -8425)
npt.assert_allclose(data.max(), 5551)
def test_earth_relief_30():
"Test some properties of the earth relief 30m data"
data = load_earth_relief(resolution='30m')
assert data.shape == (361, 721)
npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))
npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))
npt.assert_allclose(data.min(), -9214)
npt.assert_allclose(data.max(), 5859)
|
class Solution:
def reverseWords(self, s: str) -> str:
"""
Given an input string, reverse the string word by word.
Follow up:
For C programmers, try to solve it in-place in O(1) extra space.
"""
# side case
i = 0
flag = True
for i in range(len(s)):
if s[i] == " ":
flag = False
if flag:
return s
# init
word_end = len(s)
new_str = ""
i=0
j=0
# code
for i in range(len(s)-1,-1,-1):
if s[i]==" ":
#do copy
for j in range(i+1,word_end):
new_str=new_str+s[j]
if i+1<word_end :
new_str=new_str+' '
word_end=i
# do last copy
for j in range(0,word_end):
new_str=new_str+s[j]
word_end=i
if len(new_str)>0 and new_str[-1]==' ':
new_str=new_str[:-1]
return new_str
if __name__=="__main__":
solu=Solution()
assert(solu.reverseWords("the sky is blue")=="blue is sky the")
assert(solu.reverseWords(" hello world! ")=="world! hello")
assert(solu.reverseWords("a good example")=="example good a") |
from flask import Flask, request
from werkzeug.routing import BaseConverter
app = Flask(__name__)
# 自定义转换器
class MobileConverter(BaseConverter):
"""自定义手机号转换器"""
regex = r'1[3-9]\d{9}'
# 注册自定义转换器
app.url_map.converters['mobile'] = MobileConverter
# 路径参数
@app.route('/users/<int(min=1):user_id>')
def login(user_id):
return "login page {}".format(user_id)
@app.route('/sms/<mobile:mobile_num>')
def sms(mobile_num):
return "send sms to {}".format(mobile_num)
# 查询参数
@app.route('/users')
def get_user():
user_id = request.args.get('pk')
return "get user {}".format(user_id)
# 上传文件
@app.route('/file', methods=['POST'])
def save():
# 从request中取出的是File对象,可以直接读
file_obj = request.files.get('pic')
# 普通方法
# with open('./demo.jpg', 'wb') as new_file:
# new_file.write(file_obj.read())
# flask额外提供了save方法,用于保存到本地
file_obj.save('./demo.jpg')
return "save pic ok"
if __name__ == '__main__':
app.run(debug=True)
|
'''Desenvolva uma lógica que leia o peso e a altura de uma pessoa, calcule seu indice de massa corporal(IMC)
e mostre seu status, de acordo com a tabela abaixo:
- IMC abaixo de 18,5: Abaixo do peso
- Entre 18,5 e 25: Peso Ideal
- 25 até 30: Obesidade
- Acima de 40: Obesidade mórbida'''
# weight - peso em english
weight = float(input('Qual é o seu peso? (Kg) '))
# height - altura em english
height = float(input('Qual é a sua altura? (M) '))
imc = weight / (height ** 2)
print('\nSeu peso é {} kg, sua altura é {}m e seu IMC é {:.1f}'.format(weight, height, imc))
if imc < 18.5:
print('\nCUIDADO!! Você esta Abaixo do Peso normal')
elif imc >= 18.5 and imc < 25:
print('\nPARABÉNS! Você esta no Peso Ideal!')
elif imc >= 25 and imc < 30:
print('\nATENÇÃO! Você esta em Sobrepeso')
elif imc >= 30 and imc < 40:
print('\nFIQUEI ATENTO!! Você esta em Obesidade')
elif imc > 40:
print('\nCUIDADO!! Você esta em Obesidade Mórbida')
''' Outro jeito de fazer que o python entende
if imc < 18.5:
print('CUIDADO!! Você esta Abaixo do Peso normal')
elif 18.5 <= imc < 25:
print('PARABÉNS! Você esta no Peso Ideal!')
elif 25 <= imc < 30:
print('ATENÇÃO! Você esta em Sobrepeso')
elif 30 <= imc < 40:
print('FIQUEI ATENTO!! Você esta em Obesidade')
elif imc > 40:
print('CUIDADO!! Você esta em Obesidade Mórbida')
'''
|
from tkinter import Tk, Label, Button, messagebox
import random
rickyism = ["Fuck you, Lahey!", "allow me to play doubles advocate here for a moment. ",
"For all intensive purposes I think you are wrong.",
"you all seem to be taking something very valuable for granite.",
"Gettin' two birds stoned at once",
"Keep your friends close but your enemies toaster.",
"It would be my pleasurement",
"Baste yourself, boys. I'm going to Toronto to become a street person.",
"Storming the jungles at Normanly",
"I'll do some fuckin weiner work for that kind of money",
"Jacob's not the smartest knife in the drawer you guys",
"A link is only as long as your longest strong chain",
"Cock-a-doodle Fucking Ketchup Chips",
"Does a bear shit on the pope?",
"Fire Retarded",
"Friends with the Benedicts",
"Good things come to those at the gate",
"I dont have enough people words to make it understand you the way it understands me",
"It's clear to see who makes the pants here",
]
class RickyGUI:
def __init__(self, master):
self.master = master
master.title("Ricky GUI")
self.label = Label(master, text="Click 'Fuck Off' for Rickyisms")
self.label.pack()
self.greet_button = Button(master, text="Fuck Off", command=self.greet)
self.greet_button.pack()
self.close_button = Button(master, text="Close", command=master.quit)
self.close_button.pack()
def greet(self):
messagebox.showinfo("Rickyism", random.choice(rickyism))
root = Tk()
my_gui = RickyGUI(root)
root.mainloop() |
from enum import Enum
import datetime
class FlightInfoMode(Enum):
DEPARTURE = 1
ARRIVAL = 2
class FlightType(Enum):
NATIONAL = 1
INTERNATIONAL_DESTINY = 2
INTERNATIONAL_ORIGIN = 3
class Weather:
def __init__(self, min, max, description):
self.min = min or '-'
self.max = max or '-'
self.description = description or '-'
def __str__(self):
return '{} {} {}'.format(self.min, self.max, self.description)
def __repr__(self):
return '{};{};{}'.format(self.min, self.max, self.description)
class Airport:
def __init__(self, code, name):
self.code = code or '-'
self.name = name or '-'
def __str__(self):
return '{} ({})'.format(self.name, self.code)
def __repr__(self):
return '{};{}'.format(self.name, self.code)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.code == other.code
return False
def __hash__(self):
return hash(self.code)
class Flight:
def __init__(self, flight_number, company, plane, departure, arrival, flight_type, url):
self.flightNumber = flight_number or '-'
self.company = company or '-'
self.plane = plane or '-'
self.departure = departure or Departure(None, None, None, None, None, None, None, None)
self.arrival = arrival or Arrival(None, None, None, None, None, None, None, None)
self.type = flight_type or '-'
self.url = url or '-'
self.timestamp = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
key_suffix = departure.key if flight_type == FlightType.INTERNATIONAL_DESTINY else arrival.key
self.key = '{}{}'.format(flight_number, key_suffix)
def __str__(self):
return '{} from {} to {}'.format(self.flightNumber, self.departure, self.arrival)
def __repr__(self):
return '{};{};{};{};{};{}'.format(self.flightNumber, self.company, self.plane, repr(self.departure), repr(self.arrival),
self.timestamp)
def __eq__(self, other):
if isinstance(self, other.__class__):
return (self.flightNumber, self.departure, self.arrival) == (
other.flightNumber, other.departure, other.arrival)
return False
def __hash__(self):
return hash((self.flightNumber, self.departure, self.arrival))
class FlightSchedule:
def __init__(self, date, time, airport, terminal, status, weather):
self.date = date or '-'
self.time = time or '-'
self.airport = airport or Airport(None, None)
self.terminal = terminal or '-'
self.status = status or '-'
self.weather = weather or Weather(None, None, None)
self.key = '{}{}'.format(date, time)
def __str__(self):
return '{} ({} {})'.format(self.airport.name, self.date, self.time)
def __repr__(self):
return '{};{};{};{};{};{}'.format(self.date, self.time, repr(self.airport), self.terminal, self.status,
repr(self.weather))
def __eq__(self, other):
if isinstance(self, other.__class__):
return (self.date, self.time) == (
other.date, other.time, other.airport)
return False
def __hash__(self):
return hash((self.date, self.time))
class Departure(FlightSchedule):
def __init__(self, date, time, airport, terminal, status, weather, counter, door):
super().__init__(date, time, airport, terminal, status, weather)
self.counter = counter or '-'
self.door = door or '-'
def __str__(self):
return super().__str__() + ''
def __repr__(self):
return super().__repr__() + ';{};{}'.format(self.counter, self.door)
class Arrival(FlightSchedule):
def __init__(self, date, time, airport, terminal, status, weather, room, belt):
super().__init__(date, time, airport, terminal, status, weather)
self.room = room or '-'
self.belt = belt or '-'
def __str__(self):
return super().__str__() + ''
def __repr__(self):
return super().__repr__() + ';{};{}'.format(self.room, self.belt)
|
def longest_substring_util(s: str, start: int, end: int, k: int) -> int:
if end < k:
return 0
# will hold the occurrences of each character in the string
# counter = Counter(s)
count_map = [0] * 26
# build the count map which will contain the occurrences of each character in the string
for i in range(start, end):
count_map[ord(s[i]) - ord('a')] += 1
# iterate through the string
for mid in range(start, end):
# if we find a character that is 'invalid' i.e. the frequency of the character is less than k
# if counter.get(s[mid]) >= k:
if count_map[ord(s[mid]) - ord('a')] >= k:
continue
# we now have a mid point
mid_next = mid + 1
# while mid_next < end and counter.get(s[mid_next]) < k:
while mid_next < end and count_map[ord(s[mid_next]) - ord('a')] < k:
mid_next += 1
left_sub = longest_substring_util(s, start, mid, k)
right_sub = longest_substring_util(s, mid_next, end, k)
return max(left_sub, right_sub)
return end - start
def longest_substring(s: str, k: int) -> int:
"""
Divide and Conquer is one of the popular strategies that work in 2 phases.
Divide the problem into subproblems. (Divide Phase).
Repeatedly solve each subproblem independently and combine the result to solve the original problem. (Conquer Phase)
We could apply this strategy by recursively splitting the string into substrings and combine the result to find the
longest substring that satisfies the given condition. The longest substring for a string starting at index start and
ending at index end can be given by,
longestSustring(start, end) = max(longestSubstring(start, mid), longestSubstring(mid+1, end))
Finding the split position (mid)
The string would be split only when we find an invalid character. An invalid character is the one with a frequency
of less than k. As we know, the invalid character cannot be part of the result, we split the string at the index
where we find the invalid character, recursively check for each split, and combine the result.
Algorithm
Build the countMap with the frequency of each character in the string s.
Find the position for mid index by iterating over the string. The mid index would be the first invalid character in
the string.
Split the string into 2 substrings at the mid index and recursively find the result.
To make it more efficient, we ignore all the invalid characters after the mid index as well, thereby reducing the
number of recursive calls.
Complexity Analysis
Time Complexity: O(N^2), where N is the length of string ss. Though the algorithm performs better in most cases,
the worst case time complexity is still (N ^ 2).
In cases where we perform split at every index, the maximum depth of recursive call could be O(N). For each
recursive call it takes O(N) time to build the countMap resulting in O(n ^ 2) time complexity.
Space Complexity: O(N) This is the space used to store the recursive call stack. The maximum depth of recursive
call stack would be O(N).
@param s: String to evaluate for
@param k: length of the longest substring
@return: length of longest substring with at most repeating characters of length k
@rtype int
"""
return longest_substring_util(s, 0, len(s), k)
|
# -*- coding: utf-8 -*-
"""API blueprint and routes."""
from functools import partial
from flask import Blueprint, abort, jsonify, request
import bioregistry
from .utils import (
_autocomplete,
_get_identifier,
_normalize_prefix_or_404,
_search,
serialize,
)
from .. import normalize_prefix
from ..export.prefix_maps import collection_to_context_jsonlds
from ..export.rdf_export import (
collection_to_rdf_str,
metaresource_to_rdf_str,
resource_to_rdf_str,
)
from ..schema import sanitize_mapping
from ..schema_utils import (
read_collections_contributions,
read_contributors,
read_prefix_contacts,
read_prefix_contributions,
read_prefix_reviews,
read_registry_contributions,
)
from ..uri_format import get_uri_prefix
__all__ = [
"api_blueprint",
]
api_blueprint = Blueprint("api", __name__, url_prefix="/api")
@api_blueprint.route("/registry")
def resources():
"""Get all resources.
---
tags:
- resource
parameters:
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(sanitize_mapping(bioregistry.read_registry()))
@api_blueprint.route("/registry/<prefix>")
def resource(prefix: str):
"""Get a resource.
---
tags:
- resource
parameters:
- name: prefix
in: path
description: The prefix for the entry
required: true
type: string
example: doid
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml, turtle, jsonld]
""" # noqa:DAR101,DAR201
prefix = _normalize_prefix_or_404(prefix)
data = dict(prefix=prefix, **bioregistry.get_resource(prefix).dict()) # type:ignore
return serialize(
data,
serializers=[
("turtle", "text/plain", partial(resource_to_rdf_str, fmt="turtle")),
("jsonld", "application/ld+json", partial(resource_to_rdf_str, fmt="json-ld")),
],
)
@api_blueprint.route("/metaregistry")
def metaresources():
"""Get all metaresources.
---
tags:
- metaresource
parameters:
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(sanitize_mapping(bioregistry.read_metaregistry()))
@api_blueprint.route("/metaregistry/<metaprefix>")
def metaresource(metaprefix: str):
"""Get a metaresource.
---
tags:
- metaresource
parameters:
- name: prefix
in: path
description: The prefix for the metaresource
required: true
type: string
example: doid
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml, turtle, jsonld]
""" # noqa:DAR101,DAR201
data = bioregistry.get_registry(metaprefix)
if not data:
abort(404, f"Invalid metaprefix: {metaprefix}")
return serialize(
data,
serializers=[
("turtle", "text/plain", partial(metaresource_to_rdf_str, fmt="turtle")),
("jsonld", "application/ld+json", partial(metaresource_to_rdf_str, fmt="json-ld")),
],
)
@api_blueprint.route("/collections")
def collections():
"""Get all collections.
---
tags:
- collection
parameters:
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(sanitize_mapping(bioregistry.read_collections()))
@api_blueprint.route("/collection/<identifier>")
def collection(identifier: str):
"""Get a collection.
---
tags:
- collection
parameters:
- name: prefix
in: path
description: The identifier of the collection
required: true
type: string
example: 0000001
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml, context, turtle, jsonld]
""" # noqa:DAR101,DAR201
data = bioregistry.get_collection(identifier)
if not data:
abort(404, f"Invalid collection: {identifier}")
return serialize(
data,
serializers=[
("context", "application/ld+json", collection_to_context_jsonlds),
("turtle", "text/plain", partial(collection_to_rdf_str, fmt="turtle")),
("jsonld", "application/ld+json", partial(collection_to_rdf_str, fmt="json-ld")),
],
)
@api_blueprint.route("/contexts")
def contexts():
"""Get all contexts.
---
tags:
- context
parameters:
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(sanitize_mapping(bioregistry.read_contexts()))
@api_blueprint.route("/context/<identifier>")
def context(identifier: str):
"""Get a context.
---
tags:
- context
parameters:
- name: identifier
in: path
description: The identifier of the context
required: true
type: string
example: obo
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
data = bioregistry.get_context(identifier)
if not data:
abort(404, f"Invalid context: {identifier}")
return serialize(data)
@api_blueprint.route("/reference/<prefix>:<identifier>")
def reference(prefix: str, identifier: str):
"""Look up information on the reference.
---
tags:
- reference
parameters:
- name: prefix
in: path
description: The prefix for the entry
required: true
type: string
example: efo
- name: identifier
in: path
description: The identifier for the entry
required: true
type: string
example: 0000311
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(_get_identifier(prefix, identifier))
@api_blueprint.route("/contributors")
def contributors():
"""Get all contributors.
---
tags:
- contributor
parameters:
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
return serialize(sanitize_mapping(read_contributors()))
@api_blueprint.route("/contributor/<orcid>")
def contributor(orcid: str):
"""Get a contributor.
---
tags:
- contributor
parameters:
- name: orcid
in: path
description: The ORCID identifier of the contributor
required: true
type: string
example: 0000-0002-8424-0604
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml]
""" # noqa:DAR101,DAR201
author = read_contributors().get(orcid)
if author is None:
return abort(404, f"No contributor with orcid:{orcid}")
return serialize(
{
**author.dict(),
"prefix_contributions": sorted(read_prefix_contributions().get(orcid, [])),
"prefix_reviews": sorted(read_prefix_reviews().get(orcid, [])),
"prefix_contacts": sorted(read_prefix_contacts().get(orcid, [])),
"registries": sorted(read_registry_contributions().get(orcid, [])),
"collections": sorted(read_collections_contributions().get(orcid, [])),
}
)
@api_blueprint.route("/search")
def search():
"""Search for a prefix.
---
parameters:
- name: q
in: query
description: The prefix for the entry
required: true
type: string
""" # noqa:DAR101,DAR201
q = request.args.get("q")
if q is None:
abort(400)
return jsonify(_search(q))
@api_blueprint.route("/autocomplete")
def autocomplete():
"""Complete a resolution query.
---
parameters:
- name: q
in: query
description: The prefix for the entry
required: true
type: string
""" # noqa:DAR101,DAR201
q = request.args.get("q")
if q is None:
abort(400)
return jsonify(_autocomplete(q))
@api_blueprint.route("/context.jsonld")
def generate_context_json_ld():
"""Generate an *ad-hoc* context JSON-LD file from the given parameters.
You can either give prefixes as a comma-separated list like:
https://bioregistry.io/api/context.jsonld?prefix=go,doid,oa
or you can use multiple entries for "prefix" like:
https://bioregistry.io/api/context.jsonld?prefix=go&prefix=doid&prefix=oa
---
parameters:
- name: prefix
in: query
description: The prefix for the entry. Can be given multiple.
required: true
type: string
""" # noqa:DAR101,DAR201
prefix_map = {}
for arg in request.args.getlist("prefix", type=str):
for prefix in arg.split(","):
prefix = normalize_prefix(prefix.strip())
if prefix is None:
continue
uri_prefix = get_uri_prefix(prefix)
if uri_prefix is None:
continue
prefix_map[prefix] = uri_prefix
return jsonify(
{
"@context": prefix_map,
}
)
|
"""
K Closest Point to Origin
Given an array of points where points[i] = [xi, yi] represents a point on the X-Y plane and an integer k,
return the k closest points to the origin (0, 0).
The distance between two points on the X-Y plane is the Euclidean distance (i.e., √(x1 - x2)2 + (y1 - y2)2).
You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in).
Example 1:
Input: points = [[1,3],[-2,2]], k = 1
Output: [[-2,2]]
Explanation:
The distance between (1, 3) and the origin is sqrt(10).
The distance between (-2, 2) and the origin is sqrt(8).
Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
We only want the closest k = 1 points from the origin, so the answer is just [[-2,2]].
Example 2:
Input: points = [[3,3],[5,-1],[-2,4]], k = 2
Output: [[3,3],[-2,4]]
Explanation: The answer [[-2,4],[3,3]] would also be accepted.
Constraints:
1 <= k <= points.length <= 104
-104 < xi, yi < 104
"""
class Solution:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
'''
Minheap solution 2, useful when k is much smaller than N, which could reduce time complexity to ~O(N)
The idea is to maintain a heap of size k.
Steps:
1. For each point (x, y) in the array, we insert them into a binary heap
along with the distance (we reverse the sign) since we need to keep points with smallest distance
2. To keep the heap of size k, we use `heappushpop` function to immediately
remove the smallest element as we push a new one.
Complexity:
time: O(N*logK)
space: O(K) for the heap
'''
heap = []
for x, y in points: # O(N)
dist = -(x ** 2 + y ** 2)
if k == len(heap):
heapq.heappushpop(heap, (dist, x, y)) # O(logK) as time complexity depend on the size of the heap
else:
heapq.heappush(heap, (dist, x, y)) # O(logK)
return [(x, y) for dist, x, y in heap]
def kClosest_minHeap1(self, points: List[List[int]], k: int) -> List[List[int]]:
'''
Minheap solution 1, useful when N is small and k is almost up to N,
so MinHeap would have a constant factor of 2N.
Steps:
1. Create a list of tuples containing (dist, x, y) points
2. Heapify such that we have a tree-based data structure where the parent node
is always smaller than child node. ie. the tree root is the smallest element in the tree
3. For k times, we use `heappop` (a function that removes the smallest element in this minHeap)
to get k closest distance (x, y) pointss
Complexity:
time: O(N + k*logN)
space: O(N) since we keep two arrays of size N and k, where max of k would be N
'''
min_heap = []
for x, y in points: # O(N)
min_heap.append((x ** 2 + y ** 2, x, y)) # (distance, x, y)
heapq.heapify(min_heap) # O(N)
output = []
for _ in range(k): # O(k)
dist, x, y = heapq.heappop(min_heap) # O(logN)
output.append((x, y))
return output
def kClosest_naive(self, points: List[List[int]], k: int) -> List[List[int]]:
'''
Naive solution using hashmap.
Complexity:
time: O(N logN) -- sorting a list
space: O(N) -- hashmap
'''
d = {}
for x, y in points: # O(N)
dist = x ** 2 + y ** 2
d[(x, y)] = dist
sorted_d = sorted([(k, v) for k, v in d.items()], key=lambda x: x[1]) # O(N logN)
# print(sorted_d)
return [i[0] for i in sorted_d][:k] # O(N) |
#!/usr/bin/env python3
#This sample demonstrates setting fan speed according to CPU temperature.
#Install RC Driver HAT library with "pip3 install turta-rcdriverhat"
from time import sleep
from turta_rcdriverhat import Turta_IMU
from turta_rcdriverhat import Turta_RCDriver
#Initialize
imu = Turta_IMU.IMU()
rc = Turta_RCDriver.RCDriver()
try:
while True:
#Read IMU temperature
imu_temp_c = imu.read_temp()
imu_temp_f = imu.read_temp(True)
#Read CPU temperature
cpu_temp_c = rc.read_cpu_temp()
cpu_temp_f = rc.read_cpu_temp(True)
#Set fan speed
#0 is fan off, 100 is maximum speed
fan_speed = int(8 * cpu_temp_c) - 300
rc.set_fan(fan_speed)
#Print the readings
print("IMU Temp..: " + str(imu_temp_c) + "C" + \
" / " + str(imu_temp_f) + "F")
print("CPU Temp..: " + str(cpu_temp_c) + "C" + \
" / " + str(cpu_temp_f) + "F")
print("Fan speed.: " + str(fan_speed) + "%")
#Wait
print("-----")
sleep(1.0)
#Exit on CTRL+C
except KeyboardInterrupt:
print('Bye.')
|
import sys
sys.path.append('..')
from scrython.foundation import FoundationObject
import aiohttp
import asyncio
import urllib.parse
from threading import Thread
class CardsObject(FoundationObject):
"""
Master class that all card objects inherit from.
Args:
format (string, optional):
Defaults to 'json'.
Returns data in the specified method.
face (string, optional):
Defaults to empty string.
If you're using the `image` format, this will specify if you want the front or back face.
version (string, optional):
Defaults to empty string.
If you're using the `image` format, this will specify if you want the small, normal,
large, etc version of the image.
pretty (string, optional):
Defaults to empty string.
Returns a prettier version of the json object.
Note that this may break functionality with Scrython.
Raises:
Exception: If the object returned is an error.
"""
def object(self):
"""Returns the type of object it is
(card, error, etc)
Returns:
string
"""
super(CardsObject, self)._checkForKey('object')
return self.scryfallJson['object']
def id(self):
"""A unique ID for the returned card object
Returns:
string
"""
super(CardsObject, self)._checkForKey('id')
return self.scryfallJson['id']
def multiverse_ids(self):
"""The official Gatherer multiverse ids of the card
Returns:
list
"""
super(CardsObject, self)._checkForKey('multiverse_ids')
return self.scryfallJson['multiverse_ids']
def mtgo_id(self):
"""The official MTGO id of the of the card
Returns:
integer: The Magic Online id of the card
"""
super(CardsObject, self)._checkForKey('mtgo_id')
return self.scryfallJson['mtgo_id']
def mtgo_foil_id(self):
"""The corresponding MTGO foil ID of the card
Returns:
integer: The Magic Online foil id of the card
"""
super(CardsObject, self)._checkForKey('mtgo_foil_id')
return self.scryfallJson['mtgo_foil_id']
def tcgplayer_id(self):
"""The `productId` of the card on TCGplayer.
Returns:
integer: The TCGplayer id of the card
"""
super(CardsObject, self)._checkForKey('tcgplayer_id')
return self.scryfallJson['tcgplayer_id']
def name(self):
"""The oracle name of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('name')
return self.scryfallJson['name']
def uri(self):
"""The Scryfall API uri for the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('uri')
return self.scryfallJson['uri']
def scryfall_uri(self):
"""The full Scryfall page of the card
As if it was a URL from the site.
Returns:
string
"""
super(CardsObject, self)._checkForKey('scryfall_uri')
return self.scryfallJson['scryfall_uri']
def layout(self):
"""The image layout of the card. (normal, transform, etc)
Returns:
string
"""
super(CardsObject, self)._checkForKey('layout')
return self.scryfallJson['layout']
def highres_image(self):
"""Determine if a card has a highres scan available
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('highres_image')
return self.scryfallJson['highres_image']
def image_uris(self, index=0, image_type=None):
"""All image uris of the card in various qualities
An index and an image type must be supplied a single uri.
If the card has additional faces, the returned dict will
default to the front of the card.
Returns:
dict: If given no arguments
string: If given an index and image_type
Raises:
Exception: If given no index
KeyError: If the given image type is not a known type
"""
layouts = {
'normal': lambda num: self.scryfallJson['image_uris'],
'split': lambda num: self.scryfallJson['image_uris'],
'flip': lambda num: self.scryfallJson['image_uris'],
'transform': lambda num: self.scryfallJson['card_faces'][num]['image_uris'],
'meld': lambda num: self.scryfallJson['image_uris'],
'leveler': lambda num: self.scryfallJson['image_uris'],
'saga': lambda num: self.scryfallJson['image_uris'],
'planar': lambda num: self.scryfallJson['image_uris'],
'scheme': lambda num: self.scryfallJson['image_uris'],
'vanguard': lambda num: self.scryfallJson['image_uris'],
'token': lambda num: self.scryfallJson['image_uris'],
'double_faced_token': lambda num: self.scryfallJson['card_faces'][num]['image_uris'],
'emblem': lambda num: self.scryfallJson['image_uris'],
'augment': lambda num: self.scryfallJson['image_uris'],
'host': lambda num: self.scryfallJson['image_uris'],
'adventure': lambda num: self.scryfallJson['image_uris'],
'modal_dfc': lambda num: self.scryfallJson['card_faces'][num]['image_uris']
}
image_types = {
'small': lambda d: d['small'],
'normal': lambda d: d['normal'],
'large': lambda d: d['large'],
'png': lambda d: d['png'],
'art_crop': lambda d: d['art_crop'],
'border_crop': lambda d: d['border_crop']
}
images_dict = layouts.get(self.scryfallJson['layout'])
uri = image_types.get(image_type)
if index == 0 and image_type is None:
return images_dict(0)
elif not isinstance(index, int):
raise Exception('You must supply an index to get a uri')
elif image_type not in list(image_types.keys()):
raise KeyError('Image type not in known types')
return uri(images_dict(index))
def cmc(self):
"""A float of the converted mana cost of the card
Returns:
float: The cmc of the card
"""
super(CardsObject, self)._checkForKey('cmc')
return self.scryfallJson['cmc']
def type_line(self):
"""The full type line of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('type_line')
return self.scryfallJson['type_line']
def oracle_text(self):
"""The official oracle text of a card
Returns:
string
"""
super(CardsObject, self)._checkForKey('oracle_text')
return self.scryfallJson['oracle_text']
def mana_cost(self):
"""The full mana cost using shorthanded mana symbols
Returns:
string
"""
super(CardsObject, self)._checkForKey('mana_cost')
return self.scryfallJson['mana_cost']
def colors(self):
"""A list of strings with all colors found in the mana cost
Returns:
list
"""
super(CardsObject, self)._checkForKey('colors')
return self.scryfallJson['colors']
def color_identity(self):
"""A list of strings with all colors found on the card itself
Returns:
list
"""
super(CardsObject, self)._checkForKey('color_identity')
return self.scryfallJson['color_identity']
def legalities(self):
"""A dictionary of all formats and their legality
Returns:
dict
"""
super(CardsObject, self)._checkForKey('legalities')
return self.scryfallJson['legalities']
def reserved(self):
"""Returns True if the card is on the reserved list
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('reserved')
return self.scryfallJson['reserved']
def reprint(self):
"""Returns True if the card has been reprinted before
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('reprint')
return self.scryfallJson['reprint']
def set_code(self):
"""The 3 letter code for the set of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('set')
return self.scryfallJson['set']
def set_name(self):
"""The full name for the set of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('set_name')
return self.scryfallJson['set_name']
def set_uri(self):
"""The API uri for the full set list of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('set_uri')
return self.scryfallJson['set_uri']
def set_search_uri(self):
"""Same output as set_uri
Returns:
string
"""
super(CardsObject, self)._checkForKey('set_search_uri')
return self.scryfallJson['set_search_uri']
def scryfall_set_uri(self):
"""The full link to the set on Scryfall
Returns:
string
"""
super(CardsObject, self)._checkForKey('scryfall_set_uri')
return self.scryfallJson['scryfall_set_uri']
def rulings_uri(self):
"""The API uri for the rulings of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('rulings_uri')
return self.scryfallJson['rulings_uri']
def prints_search_uri(self):
"""A link to where you can begin paginating all re/prints for this card on Scryfall’s API
Returns:
string
"""
super(CardsObject, self)._checkForKey('prints_search_uri')
return self.scryfallJson['prints_search_uri']
def collector_number(self):
"""The collector number of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('collector_number')
return self.scryfallJson['collector_number']
def digital(self):
"""Returns True if the card is the digital version
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('digital')
return self.scryfallJson['digital']
def rarity(self):
"""The rarity of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('rarity')
return self.scryfallJson['rarity']
def illustration_id(self):
"""The related id of the card art
Returns:
string
"""
super(CardsObject, self)._checkForKey('illustration_id')
return self.scryfallJson['illustration_id']
def artist(self):
"""The artist of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('artist')
return self.scryfallJson['artist']
def frame(self):
"""The year of the card frame
Returns:
string
"""
super(CardsObject, self)._checkForKey('frame')
return self.scryfallJson['frame']
def frame_effects(self):
"""The card's frame effect, if any. (miracle, nyxtouched, etc.)
Returns:
list: The card's frame effects.
"""
super(CardsObject, self)._checkForKey('frame_effects')
return self.scryfallJson['frame_effects']
def full_art(self):
"""Returns True if the card is considered full art
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('full_art')
return self.scryfallJson['full_art']
def border_color(self):
"""The color of the card border
Returns:
string
"""
super(CardsObject, self)._checkForKey('border_color')
return self.scryfallJson['border_color']
def edhrec_rank(self):
"""The rank of the card on edhrec.com
Returns:
int: The rank of the card on edhrec.co
"""
super(CardsObject, self)._checkForKey('edhrec_rank')
return self.scryfallJson['edhrec_rank']
def prices(self, mode):
"""Returns prices from modes `usd`, `usd_foil`, `eur`, and `tix`
Args:
mode (string): The prices to get
Raises:
KeyError: If the mode parameter does not match a known key
Returns:
float: The prices as a float
"""
modes = ['usd', 'usd_foil', 'eur', 'tix']
if mode not in modes:
raise KeyError("{} is not a key.".format(mode))
super(CardsObject, self)._checkForKey('prices', mode)
return self.scryfallJson['prices'][mode]
def related_uris(self):
"""A dictionary of related websites for this card
Returns:
dict
"""
super(CardsObject, self)._checkForKey('related_uris')
return self.scryfallJson['related_uris']
def purchase_uris(self):
"""A dictionary of links to purchase the card
Returns:
dict
"""
super(CardsObject, self)._checkForKey('purchase_uris')
return self.scryfallJson['purchase_uris']
def life_modifier(self):
"""This is the cards life modifier value, assuming it's a Vanguard card
Returns:
string
"""
super(CardsObject, self)._checkForKey('life_modifier')
return self.scryfallJson['life_modifier']
def hand_modifier(self):
"""This cards hand modifier value, assuming it's a Vanguard card
Returns:
string
"""
super(CardsObject, self)._checkForKey('hand_modifier')
return self.scryfallJson['hand_modifier']
def color_indicator(self, num):
"""An list of all colors found in this card's color indicator
Returns:
list
"""
self._checkForTupleKey('card_faces', num, 'color_indicator')
return self.scryfallJson['card_faces'][num]['color_indicator']
def all_parts(self):
"""This this card is closely related to other cards, this property will be an list with it
Returns:
list
"""
super(CardsObject, self)._checkForKey('all_parts')
return self.scryfallJson['all_parts']
def card_faces(self):
"""If it exists, all parts found on a card's face will be found as an object from this list
Returns:
list
"""
super(CardsObject, self)._checkForKey('card_faces')
return self.scryfallJson['card_faces']
def watermark(self):
"""The associated watermark of the card, if any
Returns:
string
"""
super(CardsObject, self)._checkForKey('watermark')
return self.scryfallJson['watermark']
def story_spotlight(self):
"""True if this card is featured in the story
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('story_spotlight')
return self.scryfallJson['story_spotlight']
def power(self):
"""The power of the creature, if applicable
Returns:
string
"""
super(CardsObject, self)._checkForKey('power')
return self.scryfallJson['power']
def toughness(self):
"""The toughness of the creature, if applicable
Returns:
string
"""
super(CardsObject, self)._checkForKey('toughness')
return self.scryfallJson['toughness']
def loyalty(self):
"""This card's loyalty. Some loyalties may be X rather than a number
Returns:
string
"""
super(CardsObject, self)._checkForKey('loyalty')
return self.scryfallJson['loyalty']
def flavor_text(self):
"""The flavor text of the card, if any
Returns:
string
"""
super(CardsObject, self)._checkForKey('flavor_text')
return self.scryfallJson['flavor_text']
def arena_id(self):
"""The Arena ID of the card, if any
Returns:
int: The Arena ID of the card, if any
"""
super(CardsObject, self)._checkForKey('arena_id')
return self.scryfallJson['arena_id']
def lang(self):
"""The language of the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('lang')
return self.scryfallJson['lang']
def printed_name(self):
"""If the card is in a non-English language, this will be the name as it appears on the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('printed_name')
return self.scryfallJson['printed_name']
def printed_type_line(self):
"""If the card is in a non-English language, this will be the type line as it appears on the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('printed_type_line')
return self.scryfallJson['printed_type_line']
def printed_text(self):
"""If the card is in a non-English language, this will be the rules text as it appears on the card
Returns:
string
"""
super(CardsObject, self)._checkForKey('printed_text')
return self.scryfallJson['printed_text']
def oracle_id(self):
"""A unique ID for this card's oracle text
Returns:
string
"""
super(CardsObject, self)._checkForKey('oracle_id')
return self.scryfallJson['oracle_id']
def foil(self):
"""True if this printing exists in a foil version
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('foil')
return self.scryfallJson['foil']
def nonfoil(self):
"""True if this printing does not exist in foil
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('nonfoil')
return self.scryfallJson['nonfoil']
def oversized(self):
"""True if this printing is an oversized card
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('oversized')
return self.scryfallJson['oversized']
def games(self):
"""A list of games that this card print is available in.
Returns:
array: A list of games
"""
super(CardsObject, self)._checkForKey('games')
return self.scryfallJson['games']
def promo(self):
"""True if this card is a promotional print.
Returns:
boolean
"""
super(CardsObject, self)._checkForKey('promo')
return self.scryfallJson['promo']
def released_at(self):
"""The date this card was first released.
Returns:
string: The date in ISO format
"""
super(CardsObject, self)._checkForKey('released_at')
return self.scryfallJson['released_at']
def preview(self, key=None):
"""Preview information for this card, if any.
You may pass the name of a valid key to return the value of that key.
Such as a source_uri.
Args:
key (string): A key for specific information about the preview.
Returns:
dict: If provided no key, the entire dict is returned.
string: If provided a key, the value of that key is returned.
"""
super(CardsObject, self)._checkForKey('preview')
if key in self.scryfallJson['preview']:
return self.scryfallJson['preview'][key]
return self.scryfallJson['preview']
|
from django.test import Client, TestCase
from posts.models import Group, Post, User
class PostModelTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='Igor')
self.user_client = Client()
self.user_client.force_login(self.user)
self.test_post = Post.objects.create(
text='F' * 20,
author=self.user,
)
def test_verbose_name(self):
names = {
'text': 'Текст заметки',
'pub_date': 'Дата публикации',
'author': 'Автор',
'group': 'Группа',
}
post = self.test_post
for value, expected in names.items():
with self.subTest(value=value):
actual = post._meta.get_field(value).verbose_name
msg = f'У поля {value} не должен меняться verbose_name'
self.assertEqual(actual, expected, msg)
def test_help_text(self):
texts = {
'text': 'Напишите то, что хотели написать',
'author': 'Укажите имя автора',
'group': 'Выберите группу для публикации',
}
for value, expected in texts.items():
with self.subTest(value=value):
actual = self.test_post._meta.get_field(value).help_text
msg = f'У поля {value} не должен меняться help_text'
self.assertEqual(actual, expected, msg)
def test_group_str(self):
test_group = Group.objects.create(
title='Test Group',
slug='group',
description='Description',
)
actual = str(test_group)
expected = test_group.title
msg = '__str__ группы не совпадает с заголовком'
self.assertEqual(actual, expected, msg)
def test_post_str(self):
actual = str(self.test_post)
msg = '__str__ поста не обрезается до нужного количества символов'
self.assertEqual(actual, actual[:15], msg)
|
import numpy
def bubblesort(array):
for i in range(len(array)):
for k in range(0, len(array) - i - 1):
if array[k] > array[k + 1]:
array[k], array[k + 1] = array[k + 1], array[k]
return array
def main():
print("**Bubblesort**")
randarray = list(numpy.random.randint(0, 100, 25))
print("Random array:", randarray)
print("Sorted array:", bubblesort(randarray))
if __name__ == '__main__':
main()
|
def get_model():
from django_comments.models import Comment
return Comment
def get_form():
from extcomments.forms import CommentForm
return CommentForm
|
import numpy as np
import matplotlib.pyplot as plt
import corner
nbuilt = 30000
mcmc = np.loadtxt("../data/mcmc.txt")
ntheta = mcmc.shape[1]
#fig = plt.figure()
corner.corner(mcmc[nbuilt:, 1:])
plt.show() |
from .intelligent_system_group_dataset_reader import IntelligentSystemGroupDatasetReader
from .survey_dataset_reader import SurveyDatasetReader
|
# Тема 2. Тип данных СПИСОК (list)------------------------
# ------------------------------------------
print('--------------------------------------Тема 2. Тип данных СПИСОК (list)----------------------------------------')
print('--------------------------------------------------------------------------------------------------------------')
print('')
# Раздел. Базовые ПРАВИЛА по СПИСКАМ:
print('----------------------------------------------Правила:--------------------------------------------------------')
print('ЕСЛИ СПИСОК: List_test1 = [0,1,2,3,4,"F5"], элементы списка разделяются запятыми')
print('Элементами списка могут быть любые объекты, в том числе и сами листы')
print('Выборку из листа можно выполнить так: [x**2|for x in num|if x>0]')
print('Выборку из листа можно выполнить так: [Операция с найденными(x)| Переменная(x) и множиство|Условие выбора(x)]')
print('ТО АДРЕСАЦИЯ ОБЪЕКТОВ ЛИСТА ВЫГЛЯДИТ ТАК:')
print('--------------------------------')
print('[ 0][ 1][ 2][ 3][ 4][ "F5"]')
print(' 0 : 1 : 2 : 3 : 4 : 5 ')
print('--------------------------------')
List_test1 = [0, 1, 2, 3, 4, "F5"]
print(type(List_test1), List_test1)
for x1 in range(6):
x2 = 6 - x1
sp1 = ' '
sp2 = ' '
if (x1 == 0 or x1 == 5): sp2 = ''
if x1 == 5: sp1 = ' '
if x1 == 5: sp2 = sp2 + ' '
for i in range(6 - len(List_test1[0:x1])): sp2 = sp2 + " "
print('List_test1[', x1, '] =', List_test1[x1], sp1 + 'List_test1[', 0, ':', x1, '] =', List_test1[0:x1],
sp2 + ' List_test1[', x2, ':', 6, '] =', List_test1[x2:6])
print(' List_test1[ 0 : 6 ] =', List_test1[0:6], ' List_test1[ 0 : 6 ] =', List_test1[0:6])
print('**************************************************************************************************************')
print('')
# Раздел. Инициализация СПИСКА
print('Раздел. Инициализация СПИСКА,')
print('')
# 1 Инициализация СПИСКА квадратными скобками [] - пустой список
print('----------------------------------------------Описание:-------------------------------------------------------')
print('1 Инициализация СПИСКА квадратными скобками [] - пустой список')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('пустой список : list_temp = []')
print('print(type(list_temp))')
print('----------------------------------------------Результат:------------------------------------------------------')
list_temp = [] # пустой список
print(type(list_temp), list_temp)
print('**************************************************************************************************************')
print('')
# 1 Инициализация СПИСКА квадратными скобками - не пустой список
print('----------------------------------------------Описание:-------------------------------------------------------')
print('1 Инициализация СПИСКА квадратными скобками - не пустой список')
print('Обращение и вывод значений и типа объектов СПИСКА.')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('не пустой список: list_temp = [1.2, 123, \'Volvo\', [1,2,3]]')
print('for el in list_temp:')
print('print(el, type(el))')
print('----------------------------------------------Результат:------------------------------------------------------')
list_temp = [1.2, 123, 'Volvo', [1, 2, 3]] # не пустой список
for el in list_temp:
print(el, type(el))
print('**************************************************************************************************************')
print('')
# 2 Инициализация СПИСКА командой list
print('----------------------------------------------Описание:-------------------------------------------------------')
print('2 Инициализация СПИСКА командой list')
print('Обращение и вывод значений и типа объектов СПИСКА.')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('list_str = list(\'Volvo\')')
print('print(list_str)')
print('----------------------------------------------Результат:------------------------------------------------------')
list_str = list('Volvo')
print(list_str)
print('**************************************************************************************************************')
print('')
# Обращения к элементам списка, подсписки
print('----------------------------------------------Описание:-------------------------------------------------------')
print('1 Вариант обращения к элементам списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('for i in range(len(list_temp)):')
print('print(i, \':\', list_temp[i])')
print('----------------------------------------------Результат:------------------------------------------------------')
for i in range(len(list_temp)):
print(i, ':', list_temp[i])
print('**************************************************************************************************************')
print('')
print('----------------------------------------------Описание:-------------------------------------------------------')
print('2 Вариант обращения к элементам списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('for i in range(len(list_temp)):')
print('print(i, \':\', list_temp[i:])')
print('----------------------------------------------Результат:------------------------------------------------------')
for i in range(len(list_temp)):
print(i, ':', list_temp[i:])
print('**************************************************************************************************************')
print('')
print('----------------------------------------------Описание:-------------------------------------------------------')
print('3 Вариант обращения к элементам списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('for i in range(len(list_temp)):')
print('print(i, \':\', list_temp[:i])')
print('----------------------------------------------Результат:------------------------------------------------------')
for i in range(len(list_temp)):
print(i, ':', list_temp[:i])
print('**************************************************************************************************************')
print('')
# Функции со списками
print('----------------------------------------------Описание:-------------------------------------------------------')
print('Определение и вывод длины списка на экран')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('print(len(list_temp))')
print('----------------------------------------------Результат:------------------------------------------------------')
print(len(list_temp))
print('**************************************************************************************************************')
print('')
# Операции со списками
print('----------------------------------------------Описание:-------------------------------------------------------')
print('Сложение списков и умножение на целое число')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('print(list_temp + list_str)')
print('print(list_temp*2)')
print('----------------------------------------------Результат:------------------------------------------------------')
print(list_temp + list_str)
print(list_temp * 2)
print('**************************************************************************************************************')
print('')
# Методы
print('---------------------------------------------Описание:-------------------------------------------------------')
print('аppend - дополнение в конец списка, с генерацией значений')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list = []')
print('for i in range(5):')
print(' integer_list.append(i)')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list = []
for i in range(5):
integer_list.append(i)
print(integer_list)
print('**************************************************************************************************************')
print('')
print('----------------------------------------------Описание:-------------------------------------------------------')
print('аppend - дополнение в конец списка, одиночная константа')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list.append(0)')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list.append(0)
print(integer_list)
print('**************************************************************************************************************')
print('')
# remove
print('----------------------------------------------Описание:-------------------------------------------------------')
print('remove - одиночное удаление')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list.remove(0)')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list.remove(0)
print(integer_list)
print('**************************************************************************************************************')
print('')
# del
print('----------------------------------------------Описание:-------------------------------------------------------')
print('del - одиночное удаление')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('del integer_list[4]')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
del integer_list[4]
print(integer_list)
print('**************************************************************************************************************')
print('')
# reverse
print('----------------------------------------------Описание:-------------------------------------------------------')
print('reverse - реверс индексов по отношению к значениям списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list.reverse()')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list.reverse()
print(integer_list)
print('**************************************************************************************************************')
print('')
# sort
print('----------------------------------------------Описание:-------------------------------------------------------')
print('sort - сортировка списка по целочисленным значениям списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list = [9,3,6,2,4]')
print('integer_list.sort()')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list = [9, 3, 6, 2, 4]
integer_list.sort()
print(integer_list)
print('**************************************************************************************************************')
print('')
# insert
print('----------------------------------------------Описание:-------------------------------------------------------')
print('insert - вставка в список значений')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list.insert(2, 100)')
print('print(integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list.insert(2, 100)
print(integer_list)
print('**************************************************************************************************************')
print('')
# Раздел. Обработка списков (map, filter, reduce)
print('')
# map
# map(function, list) ----> map -----> list(map)
# new_integer_list = list(map(str, integer_list))
print('----------------------------------------------Описание:-------------------------------------------------------')
print('map -------------------------------------------')
print('map(function, list) ----> map -----> list(map)')
print('new_integer_list = list(map(str, integer_list))')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list = [9,3,6,2,4]')
print('new_integer_list = list(map(lambda x: x**2, integer_list))')
print('print(new_integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list = [9, 3, 6, 2, 4]
new_integer_list = list(map(lambda x: x ** 2, integer_list))
print(new_integer_list)
print('**************************************************************************************************************')
print('')
# filter
print('----------------------------------------------Описание:-------------------------------------------------------')
print('filter - фильтрация списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('integer_list = [9,3,6,2,4]')
print('new_integer_list = list(filter(lambda x: x<5, integer_list))')
print('print(new_integer_list)')
print('----------------------------------------------Результат:------------------------------------------------------')
integer_list = [9, 3, 6, 2, 4]
new_integer_list = list(filter(lambda x: x < 5, integer_list))
print(new_integer_list)
print('**************************************************************************************************************')
print('')
# reduce
print('----------------------------------------------Описание:-------------------------------------------------------')
print('filter - фильтрация списка')
print('--------------------------------------------Python Script:----------------------------------------------------')
print('from functools import reduce')
print('integer_list = [1,2,3,4]')
print('sum = reduce(lambda x,y: x+y, integer_list)')
print('product = reduce(lambda x,y: x*y, integer_list)')
print('print(sum, product)')
print('----------------------------------------------Результат:------------------------------------------------------')
from functools import reduce
integer_list = [1, 2, 3, 4]
sum = reduce(lambda x, y: x + y, integer_list)
product = reduce(lambda x, y: x * y, integer_list)
print(sum, product)
print('**************************************************************************************************************')
print('')
|
def scoreBianka(inp):
if inp == "WWW": return 2
if inp == "BBB": return 0
if inp.count("B") == 2 or inp.count("W") == 2: return 1
def scoreWilliams(inp):
if inp == "WWW": return 2
if inp == "BBB": return 0
if inp.count("B") == 2: return 0
if inp.count("W") == 2: return 2
def winner(bianka, williams):
if bianka == 0:
if williams == 1: return "Williams"
if williams == 2: return "Bianka"
if bianka == 1:
if williams == 0: return "Bianka"
if williams == 2: return "Williams"
if bianka == 2:
if williams == 0: return "Williams"
if williams == 1: return "Bianka"
game = {0 : "Rock",
1 : "Paper",
2 : "Scissors"}
bianka, williams = raw_input().split()
bianka = scoreBianka(bianka)
williams = scoreWilliams(williams)
win = winner(bianka, williams)
if win == "Bianka":
print "Bianka won with " + game[bianka]
print "Williams lost with " + game[williams]
else:
print "Williams won with " + game[williams]
print "Bianka lost with " + game[bianka]
|
#!/usr/bin/env python3
from sys import argv
import argparse
import UPhO
def main():
usage = u"""
\t\t\u26F1 \u001b[31;1m newick2json.py \u001b[0m \u26F1
\n
Convert your newick file in hierafchichal json format.
This script is written for python3 and requires the corresponding version of UPhO (https://github.com/ballesterus/UPhO) for split decoposition.
newick2json.py <NEWICKTREE> <OUTGROUP,TAXA,SEPARATED,WITH,COMMAS>
"""
print (usage)
F=open(argv[1], "r")
Oname =argv[1].split('.')[0] + ".json"
Fc=F.readline()
T = UPhO.myPhylo(Fc)
F.close()
print("\u001b[32m Tree file was read: OK")
global outg
global ing
myoutg=argv[2].split(",")
# myoutg=['Epiperipatus_sp', 'Opisthopatus_cinctipes', 'Peripatopsis_capensis']
outg=set(myoutg)
ing=set(T.leaves) - outg
R=clados(T)
update_parents(R)
L=ladderize(R)
with open(Oname, "w") as out:
out.write(build_json(L,R))
print("\u001b[32m Enjoy editing you json. Bye bye \u001b[0m")
#Global variables
outg=""
ing=""
#class definition for nodes
class node:
"""Class related to UPhO.split but a single partition, listing the descencedants of that node difines it"""
def __init__(self):
self.children=None
self.branch_length=None
self.support=None
self.name=None
self.parent=None
self.level=None
self.size=None
self.droot=0
#FUNCTION DEFIBITIONS
def belongs_to_og(split_vec):
"""verifies if a list of OTU are a subset of the outgroup"""
s = set(split_vec)
if outg.issuperset(s) and not ing.issuperset(s):
return True
else:
return False
def belongs_to_in(split_vec):
"""verifies if a list of OTU are a subset of the ingroup"""
s = set(split_vec)
if ing.issuperset(s) and not outg.issuperset(s):
return True
else:
return False
def clados(phylo):
"""Returns a dictionary of nodes in a UPhO.myPhylo object to reflect polarity based on an outgroup (rooting).
It also assigns node names and updates the brachlengts leading to the root"""
counter=0
result = {}
rnode=node()
rnode.name="root"
rnode.children=phylo.leaves
rnode.size=len(rnode.children)
rnode.level=0
rnode.droot=0
result[rnode.name]=rnode
for s in phylo.splits:
for v in s.vecs:
if len(v)==1:
tnode=node()
tnode.name=v[0]
tnode.size = 1
tnode.branch_length=s.branch_length
tnode.support=None
tnode.children=v
result[tnode.name]=tnode
elif set(v) == outg:
nbl = float(s.branch_length)/2
inode=node()
inode.children = v
inode.size=len(v)
inode.name="outgroup"
inode.parent="root"
inode.branch_length=nbl
inode.support = s.support
result[inode.name]=inode
inode=node()
inode.children = list(ing)
inode.name="ingroup"
inode.size=len(inode.children)
inode.parent="root"
inode.branch_length=nbl
inode.support = s.support
result[inode.name]=inode
elif belongs_to_og(v):
inode=node()
inode.children=v
inode.size=len(v)
inode.name= "n_" + str(counter)
counter += 1
inode.branch_length=s.branch_length
inode.support=s.support
result[inode.name]=inode
elif belongs_to_in(v) and len(v) < len(ing):
inode=node()
inode.children=v
inode.size=len(v)
inode.name= "n_" + str(counter)
counter += 1
inode.branch_length=s.branch_length
inode.support=s.support
result[inode.name]=inode
return result
def find_mommy(nodeName, nodes_dict):
"""Updates the parent for node based on the information from a collection of nodes(dictionary)"""
q=nodes_dict[nodeName]
qc=set(q.children)
parent="root"
min_psize=q.size
c_psize = nodes_dict["root"].size #This as big as a parent can be a we want the smallest
for n in nodes_dict.keys():
cn = set(nodes_dict[n].children)
if len(cn) > min_psize and len(cn) < c_psize:
if qc.issubset(cn):
parent=nodes_dict[n].name
c_psize=len(cn)
q.parent=parent
def find_children(nodeName,node_dict):
"""List the direct childre of a node. To run after all parents have been identified"""
result=[]
asize=node_dict['root'].size
for k in node_dict.keys():
if node_dict[k].parent == nodeName:
if node_dict[k].size < asize:
result.insert(0, k)
asize=node_dict[k].size
else:
result.append(k)
return result
def update_parents(nodes_dict):
"""Populates the parents for the nodes in the node dictionary"""
for k in nodes_dict:
if nodes_dict[k].name != "root":
find_mommy(k,nodes_dict)
def ladderize(nodes_dict):
"""Updates levels and returns and list of node keys ordered descendingly"""
ladder=["root"]
queue=[]
init=find_children('root', nodes_dict)
queue= queue + init
while len(queue) > 0:
for q in queue:
queue.remove(q)
ladder.append(q)
queue=queue + find_children(q, nodes_dict)
for n in ladder:
cp=nodes_dict[n].parent
if cp != None:
nodes_dict[n].level = nodes_dict[cp].level + 1
nodes_dict[n].droot= nodes_dict[cp].droot + float(nodes_dict[n].droot)
return ladder
def json_node(node_name, nodes_dict):
"""returns a json formatted string of a node"""
jstring=""
node= nodes_dict[node_name]
desc=find_children(node_name, nodes_dict)
children_placeholder="#" +'#,#'.join(desc) + "#"
pad="\t" * node.level
if node.name == "root":
jstring="%s{\n%s\"name\" : \"%s\",\n%s\"children\" : [\n%s\n%s]\n%s}\n" %(pad,pad,node.name,pad, children_placeholder, pad,pad)
else:
if node.size > 1:
jstring="\n%s{\n%s\"name\" : \"%s\",\n%s\"parent\" : \"%s\",\n%s\"support\" : %s,\n%s\"branch-length\" : %s,\n%s\"children\" : [\n%s\n%s]\n%s}\n" %(pad,pad,node.name,pad, node.parent, pad, node.support,pad, node.branch_length, pad, children_placeholder, pad,pad)
else:
jstring="\n%s{\n%s\"name\" : \"%s\",\n%s\"parent\" : \"%s\",\n%s\"branch-length\" : %s\n%s}\n" %(pad,pad,node.name,pad, node.parent,pad, node.branch_length, pad)
return jstring
def build_json(ladder, nodes_dict):
construct=json_node(ladder[0], nodes_dict)
for n in ladder:
search = "#" + n + "#" # makesure
replac = json_node(n, nodes_dict)
temp = construct.replace(search,replac)
construct = temp
return construct
if __name__ == "__main__":
main()
|
"""
Auxiliary functions for accessing the logging information generated by the
Test Application Server (TAS).
"""
#################################################################################
# MIT License
#
# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC),
# Universidad de la Republica Oriental del Uruguay (UdelaR).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################
import click
import lorawan.user_agent.logger.loggers as loggers
def log_all():
""" Starts a logger that collects the messages from all the TAS services."""
logger_mock = loggers.LoggerAll()
print("Starting log.")
logger_mock.start_logging()
def log_nwk_forwarder():
""" Starts a logger that collects the messages from the Payload Forwarder service."""
logger_mock = loggers.PayloadForwarderLog()
print("Starting Payload Forwarder Service log.")
logger_mock.start_logging()
def log_test_session_coordinator():
""" Starts a logger that collects the messages from the Test Session Coordinatior service."""
logger_mock = loggers.TestServerLog()
print("Starting Test Server log.")
logger_mock.start_logging()
|
import cv2
import face_recognition
import os
SMALLEST_DISTANCE_THRESHOLD = 2
NO_RECOGNITION = SMALLEST_DISTANCE_THRESHOLD + 1
class FaceRecognition(object):
def __init__(self, face_detector, known_face_path):
self.face_detector = face_detector
self.known_face_path = known_face_path
self.encoded_known_faces = self.get_encoded_known_faces()
def get_encoded_known_faces(self):
encoded_known_faces = {}
for known_person in [f.name for f in os.scandir(self.known_face_path) if f.is_dir()]:
for face_file_name in os.listdir(f'{self.known_face_path}/{known_person}'):
encoded_known_faces[known_person] = list()
if face_file_name.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
image = cv2.imread(f'{self.known_face_path}/{known_person}/{face_file_name}')
# Assuming that there is a single face in the known faces images per file
faces_bounding_boxes = self.face_detector.detect(image)
if not faces_bounding_boxes:
continue
face_bounding_box = faces_bounding_boxes[0]
encoded_known_faces[known_person].append(face_recognition.face_encodings(
image, known_face_locations=[face_bounding_box.css]))
return encoded_known_faces
def find_face_in_encodings(self, image, face_bounding_box):
encoded_face = face_recognition.face_encodings(image, known_face_locations=[face_bounding_box.css])
# Getting the closest image distance for each known person
scores = {name: min(
[face_recognition.face_distance([encoded_face[0]], encoded_known_face[0])
for encoded_known_face in encoded_known_faces])[0]
for name, encoded_known_faces in self.encoded_known_faces.items()}
# Getting the smallest distance
smallest_distance = min(scores.values())
if smallest_distance < SMALLEST_DISTANCE_THRESHOLD:
matched_name = min(scores, key=scores.get)
else:
matched_name = 'unknown'
return matched_name
|
#!/usr/bin/env python
import math, os, sys, random
try:
from optparse import OptionParser
except:
from optik import OptionParser
def main():
(instar,output) = parse_command_line()
g = open(instar, "r")
instar_line=g.readlines()
o1=open(output,"w")
mline0=judge_mline0(instar_line)
mline1=judge_mline1(mline0,instar_line)
# print mline0,mline1
# print instar_line[mline0]
# print instar_line[mline1]
mline0-=1
mline1-=1
R_mline0=int(instar_line[mline0].split('#')[1])-1
R_mline1=int(instar_line[mline1].split('#')[1])-1
First_mline0=mline0-R_mline0
First_mline1=mline1-R_mline1
# print First_mline0,mline0+1,First_mline1,mline1+1
# read relion 3.1 optical group info fesible for relion 3.0, high order aberration info was dropped.
# BeamTiltX & BeamTiltY is unnecessary, must have the rest.
BTX_index=-1
BTY_index=-1
for i in range(First_mline0,mline0+1):
if str(instar_line[i].split()[0])=="_rlnAmplitudeContrast":
AC_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnBeamTiltX":
BTX_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnBeamTiltY":
BTY_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnSphericalAberration":
CS_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnVoltage":
VOLTAGE_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnImagePixelSize":
IPS_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnImageSize":
IMS_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnImageDimensionality":
ID_index=int(str(instar_line[i].split('#')[1]))-1
ac=float(str(instar_line[mline0+1].split()[AC_index]))
if(BTX_index!=-1):
btx=float(str(instar_line[mline0+1].split()[BTX_index]))
if(BTY_index!=-1):
bty=float(str(instar_line[mline0+1].split()[BTY_index]))
cs=float(str(instar_line[mline0+1].split()[CS_index]))
voltage=float(str(instar_line[mline0+1].split()[VOLTAGE_index]))
apix=float(str(instar_line[mline0+1].split()[IPS_index]))
imagesize=int(str(instar_line[mline0+1].split()[IMS_index]))
imagediamention=int(str(instar_line[mline0+1].split()[ID_index]))
# print ac,btx,bty,cs,voltage,apix,imagesize,imagediamention
# magnification=10000.0, detector pixel size=apix
# remove rln_opticsgroup rln_originXAngst rln_originYAngst
modified_metadata_lable=[]
modified_metadata_lable_num=[]
c=0
for i in range(First_mline1,mline1+1):
modified_metadata_lable.append([])
modified_metadata_lable_num.append([])
modified_metadata_lable[c]=str(instar_line[i].split()[0])
modified_metadata_lable_num[c]=int(str(instar_line[i].split('#')[1]))-1
c+=1
if str(instar_line[i].split()[0])=="_rlnOpticsGroup":
OG_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnOriginXAngst":
OXA_index=int(str(instar_line[i].split('#')[1]))-1
if str(instar_line[i].split()[0])=="_rlnOriginYAngst":
OYA_index=int(str(instar_line[i].split('#')[1]))-1
# print modified_metadata_lable
# print modified_metadata_lable_num
# print OG_index,OXA_index,OYA_index
for i in range(0,len(modified_metadata_lable)):
if(modified_metadata_lable[i]=="_rlnOpticsGroup"):
del modified_metadata_lable[i]
del modified_metadata_lable_num[i]
break
for i in range(0,len(modified_metadata_lable)):
if(modified_metadata_lable[i]=="_rlnOriginXAngst"):
del modified_metadata_lable[i]
del modified_metadata_lable_num[i]
break
for i in range(0,len(modified_metadata_lable)):
if(modified_metadata_lable[i]=="_rlnOriginYAngst"):
del modified_metadata_lable[i]
del modified_metadata_lable_num[i]
break
c=len(modified_metadata_lable)
CCC=len(modified_metadata_lable)
for i in range(0,9):
modified_metadata_lable.append([])
modified_metadata_lable_num.append([])
modified_metadata_lable[c+0]="_rlnAmplitudeContrast"
modified_metadata_lable[c+1]="_rlnSphericalAberration"
modified_metadata_lable[c+2]="_rlnVoltage"
modified_metadata_lable[c+3]="_rlnMagnification"
modified_metadata_lable[c+4]="_rlnDetectorPixelSize"
modified_metadata_lable[c+5]="_rlnImageSize"
modified_metadata_lable[c+6]="_rlnImageDimensionality"
modified_metadata_lable[c+7]="_rlnOriginX"
modified_metadata_lable[c+8]="_rlnOriginY"
modified_metadata_lable_num[c+0]=ac
modified_metadata_lable_num[c+1]=cs
modified_metadata_lable_num[c+2]=voltage
modified_metadata_lable_num[c+3]=10000.0
modified_metadata_lable_num[c+4]=apix
modified_metadata_lable_num[c+5]=imagesize
modified_metadata_lable_num[c+6]=imagediamention
modified_metadata_lable_num[c+7]=OXA_index
modified_metadata_lable_num[c+8]=OYA_index
c=len(modified_metadata_lable)
if(BTX_index>=0):
for i in range(0,2):
modified_metadata_lable.append([])
modified_metadata_lable_num.append([])
modified_metadata_lable_num[c+i]=-1
modified_metadata_lable[c+0]="_rlnBeamTiltX"
modified_metadata_lable[c+1]="_rlnBeamTiltY"
modified_metadata_lable_num[c+0]=btx
modified_metadata_lable_num[c+1]=bty
c=len(modified_metadata_lable)
o1.write("#modified to fit relion3.0\n\ndata_\n\nloop_\n")
tmp=""
for i in range(0,c):
tmp+=str(modified_metadata_lable[i])
tmp+=" #"
tmp+=str(i+1)
tmp+="\n"
o1.write(tmp)
tmp=""
for i in range(mline1+1,len(instar_line)):
if(instar_line[i].split()):
tmp=""
for j in range(0,CCC):
tmp+=str(instar_line[i].split()[int(modified_metadata_lable_num[j])])
tmp+="\t"
for j in range(CCC,c):
if(str(modified_metadata_lable[j])=="_rlnOriginX" or str(modified_metadata_lable[j])=="_rlnOriginY"):
tmp+=str(float(instar_line[i].split()[int(modified_metadata_lable_num[j])])/apix)
else:
tmp+=str(modified_metadata_lable_num[j])
if(j!=c-1):
tmp+="\t"
else:
tmp+="\n"
o1.write(tmp)
g.close()
o1.close()
def parse_command_line():
usage="%prog <input relion 3.1 star> <output relion 3.0 star>"
parser = OptionParser(usage=usage, version="%1")
if len(sys.argv)<3:
print "<input relion 3.1 star> <output relion 3.0 star>"
sys.exit(-1)
(options, args)=parser.parse_args()
instar = args[0]
output=args[1]
return (instar,output)
def judge_mline0(inline):
trys=50
intarget=-1
for i in range (0,trys):
if(inline[i].split()):
if(inline[i].split()[0][0]!="_"):
if(intarget==1):
return i
break
else:
continue
if(inline[i].split()[0][0]=="_"):
intarget=1
def judge_mline1(mline0,inline):
trys=100
intarget=-1
for i in range (mline0,trys):
if(inline[i].split()):
if(inline[i].split()[0][0]!="_"):
if(intarget==1):
return i
break
else:
continue
if(inline[i].split()[0][0]=="_"):
intarget=1
if __name__== "__main__":
main()
|
from authx.database.mongodb import MongoDBBackend
from authx.database.redis import RedisBackend
"""
This is the database module, which contains the database class, also a cache class.
"""
__all__ = ["MongoDBBackend", "RedisBackend"]
|
import click
import grab
PREFIX = "GRAB"
path_message = (
"A path is required, set system variable 'export GRAB_PATH=/path/to/code' or pass in the path using "
"the '--path' keyword"
)
@click.group(
invoke_without_command=False,
context_settings={
"help_option_names": ["-h", "--help"],
"auto_envvar_prefix": PREFIX,
},
)
@click.version_option(version=grab.version())
@click.pass_context
def grab_cli(ctx):
"""Run the grab application."""
@grab_cli.command(
help="Add repos from file"
) # TODO add this back in when it is 7.1^ --> , no_args_is_help=True)
@click.option("-f", "--file", "file_", help="File name to import")
@click.option("-u", "--url", "url", help="URL of repo to import")
@click.option("-p", "--path", envvar=f"{PREFIX}_PATH", help="Base path for repos")
def add(file_, url, path):
if file_ and url:
print("Only select a file or a url")
exit(1)
if path is None:
print(path_message)
exit(1)
if file_ is None and url is None:
print("A file or url is required")
exit(1)
grab.add_repos(file_, url, path)
@grab_cli.command(name="list", help="List all the current repos")
@click.option("-o", "--org", help="Show only repos matching the org.")
@click.option("-w", "--wide", is_flag=True, help="Show more details about the repos")
@click.option("--generate", is_flag=True, help="Generate the repo_list.yaml file.")
@click.option(
"-p", "paths", multiple=True, help="Paths to be included in the generate function."
)
@click.option(
"--show-paths",
"show",
is_flag=True,
help="List the paths from grab_paths.yaml that is used to generate "
"the current repo_list.yaml file",
)
@click.option(
"--new-file", "new_file", is_flag=True, help="Create a new grab_paths.yaml file"
)
@click.argument("grab_path", envvar=f"{PREFIX}_PATH")
def list_repos(org, wide, generate, paths, show, new_file, grab_path):
if len(paths) > 0 and not generate:
print("-p can only be used with --generate")
print("Aborting...")
exit(1)
if new_file and not generate:
print("--new-file cna only be used with --generate")
print("Aborting...")
exit(1)
if (org is not None or wide) and (generate or show):
print("--org or --wide can not be used with --generate or --show-paths")
print("Aborting...")
exit(1)
if generate and show:
print("--generate and --show-paths can not be used together")
print("Aborting...")
exit(1)
if generate:
grab.generate(grab_path, paths, new_file)
elif show:
grab.show_paths()
else:
grab.list_repos(org, wide)
@grab_cli.command(
help="Add remote users fork. This feature is only working with github.com, on public repos and ssh "
"routes. Requires the Git clone ssh/url string."
)
@click.argument("fork_path")
@click.option("-p", "--path", envvar=f"{PREFIX}_PATH", help="Base path for repos")
def fork(fork_path, path):
if path is None:
print(path_message)
exit(1)
grab.fork(fork_path, path)
@grab_cli.command(
"path",
help="Get the system path for a Repo. The REPO is a combination of "
"org/repo in the same format given by the list command or the line number given by the "
"list command.",
)
@click.argument("repo")
def path_to_repo(repo):
grab.path_to_repo(repo)
|
from django.core.management.base import BaseCommand
from django.db import models, transaction
from typing import Any, Optional
from api.apps.users.models import User
from api.apps.users.factory import (AdminUser,
AdminProfile,
ActiveUser,
ActiveProfile,
InactiveProfile,
InactiveUser,
)
from api.apps.users.models import User
ACTIVE = 15
INACTIVE = 4
class Command(BaseCommand):
help = "Generates test data for Users and related profiles"
models = [
AdminUser,
AdminProfile,
ActiveUser,
ActiveProfile,
InactiveUser,
InactiveProfile,
]
@transaction.atomic
def handle(self, *args: Any, **options: Any) -> Optional[str]:
if User.objects.count() > 1:
self.stdout.write("Users seem to already exist")
exit("exiting.")
self.stdout.write("Creating test data.")
self.stdout.write("Creating active users.")
for _ in range(ACTIVE):
active_user = ActiveUser()
self.stdout.write("Creating inactive users.")
for _ in range(INACTIVE):
inactive_user = InactiveUser()
|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# 1st party.
import argparse
import re
# 3rd party.
from tuf.exceptions import UnknownTargetError
# 2nd party.
# 2nd party.
from .download import REPOSITORY_URL_PREFIX, TUFDownloader
from .exceptions import NonCanonicalVersion, NonDatadogPackage, NoSuchDatadogPackageOrVersion
# Private module functions.
def __is_canonical(version):
'''
https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
'''
P = r'^([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?$'
return re.match(P, version) is not None
def __get_wheel_distribution_name(standard_distribution_name):
# https://www.python.org/dev/peps/pep-0491/#escaping-and-unicode
return re.sub('[^\\w\\d.]+', '_', standard_distribution_name, re.UNICODE)
# Public module functions.
def download():
parser = argparse.ArgumentParser()
parser.add_argument(
'standard_distribution_name', type=str, help='Standard distribution name of the desired Datadog check.'
)
parser.add_argument(
'--repository', type=str, default=REPOSITORY_URL_PREFIX, help='The complete URL prefix for the TUF repository.'
)
parser.add_argument('--version', type=str, default=None, help='The version number of the desired Datadog check.')
parser.add_argument(
'-v', '--verbose', action='count', default=0, help='Show verbose information about TUF and in-toto.'
)
args = parser.parse_args()
repository_url_prefix = args.repository
standard_distribution_name = args.standard_distribution_name
version = args.version
verbose = args.verbose
if not standard_distribution_name.startswith('datadog-'):
raise NonDatadogPackage(standard_distribution_name)
else:
wheel_distribution_name = __get_wheel_distribution_name(standard_distribution_name)
tuf_downloader = TUFDownloader(repository_url_prefix=repository_url_prefix, verbose=verbose)
if not version:
version = tuf_downloader.get_latest_version(standard_distribution_name, wheel_distribution_name)
else:
if not __is_canonical(version):
raise NonCanonicalVersion(version)
target_relpath = 'simple/{}/{}-{}-py2.py3-none-any.whl'.format(
standard_distribution_name, wheel_distribution_name, version
)
try:
target_abspath = tuf_downloader.download(target_relpath)
except UnknownTargetError:
raise NoSuchDatadogPackageOrVersion(standard_distribution_name, version)
print(target_abspath) # pylint: disable=print-statement
|
from werkzeug.contrib.cache import SimpleCache
class BasicCache:
cache = SimpleCache()
def set(self, key, value):
self.cache.set(key, value, timeout=50 * 1000)
def get(self, key):
return self.cache.get(key=key)
|
import unittest
from pymocky.models.header_matcher import HeaderMatcher
class HeaderMatcherTests(unittest.TestCase):
def test_dict_headers(self):
headers = {
"Content-Type": "application/json",
"Content-Length": "123",
}
other_headers = {
"Content-Type": "application/json",
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, True)
def test_dict_headers_not_match(self):
headers = {
"Content-Type": "application/json",
}
other_headers = {
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, False)
def test_list_headers(self):
headers = [
"Content-Type",
"Content-Length",
]
other_headers = {
"Content-Type": "application/json",
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, True)
def test_list_headers_not_match(self):
headers = [
"Content-Type",
]
other_headers = {
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, False)
def test_string_headers(self):
headers = "Content-Type"
other_headers = {
"Content-Type": "application/json",
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, True)
def test_string_headers_not_match(self):
headers = "Content-Type"
other_headers = {
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
matches = hm.matches(other_headers)
self.assertEqual(matches, False)
def test_header_key_not_match(self):
headers = ["Content-Type", "application/json"]
other_headers = {
"Content-Length": "123",
}
hm = HeaderMatcher(headers)
match = hm.header_match(headers, other_headers)
self.assertEqual(match, False)
|
# coding=utf-8
import sys
if sys.version_info > (3, 0):
from html2jirawiki.html2jirawiki import html_to_jira_wiki, ATX, ATX_CLOSED
else:
from html2jirawiki import html_to_jira_wiki, ATX, ATX_CLOSED
|
def coords2display(p,
valbounds=[-46.702880859375, -23.569022144054955, \
-46.69189453125, -23.57405696664267],
imgsize=[512, 256]):
pnormalized = [(p[0] - valbounds[0]) / (valbounds[2]-valbounds[0]),
(p[1] - valbounds[1]) / (valbounds[3]-valbounds[1])]
pnew = [ pnormalized[0]*imgsize[0], pnormalized[1]*imgsize[1] ]
return pnew
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
class RequestExceededError(Exception):
pass
|
from flask import Blueprint
bp = Blueprint('dashboard', __name__, template_folder='templates')
from pugsley.dashboard import routes
|
import os
import importlib
import globals as G
for f in os.listdir(G.local + "/mods/mcpython/Commands"):
if os.path.isfile(G.local + "/mods/mcpython/Commands/" + f) and not f in [
"__init__.py"
]:
name = f.split(".")[0]
locals()[name] = importlib.import_module("Commands." + name)
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
SGD Regressor
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class SGDRegressor(ScikitLearnBase):
"""
SGD Regressor
"""
info = {'problemtype':'regression', 'normalize':True}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.SGDRegressor
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(SGDRegressor, cls).getInputSpecification()
specs.description = r"""The \xmlNode{SGDRegressor} implements regularized linear models with stochastic
gradient descent (SGD) learning for regression: the gradient of the loss is estimated each sample at
a time and the model is updated along the way with a decreasing strength schedule
(aka learning rate). For best results using the default learning rate schedule, the
data should have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays of floating
point values for the features. The model it fits can be controlled with the loss parameter;
by default, it fits a linear support vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model parameters towards
the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a
combination of both (Elastic Net). If the parameter update crosses the 0.0 value because
of the regularizer, the update is truncated to $0.0$ to allow for learning sparse models and
achieve online feature selection.
This implementation works with data represented as dense arrays of floating point values for the features.
\zNormalizationPerformed{SGDRegressor}
"""
specs.addSub(InputData.parameterInputFactory("loss", contentType=InputTypes.makeEnumType("loss", "lossType",['squared_loss', 'huber','epsilon_insensitive','squared_epsilon_insensitive']),
descr=r"""The loss function to be used.
The ``squared\_loss'' refers to the ordinary least squares fit. ``huber'' modifies ``squared\_loss'' to focus less on getting outliers correct by
switching from squared to linear loss past a distance of epsilon. ``epsilon\_insensitive'' ignores errors less than epsilon and is linear past
that; this is the loss function used in SVR. ``squared\_epsilon\_insensitive'' is the same but becomes squared loss past a tolerance of epsilon.
""", default='squared_loss'))
specs.addSub(InputData.parameterInputFactory("penalty", contentType=InputTypes.makeEnumType("penalty", "penaltyType",['l2', 'l1', 'elasticnet']),
descr=r"""The penalty (aka regularization term) to be used. Defaults to ``l2'' which is the standard regularizer for linear SVM models.
``l1'' and ``elasticnet'' might bring sparsity to the model (feature selection) not achievable with ``l2''.""", default='l2'))
specs.addSub(InputData.parameterInputFactory("alpha", contentType=InputTypes.FloatType,
descr=r"""Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute
the learning rate when set to learning_rate is set to ``optimal''.""", default=0.0001))
specs.addSub(InputData.parameterInputFactory("l1_ratio", contentType=InputTypes.FloatType,
descr=r"""The Elastic Net mixing parameter, with $0 <= l1\_ratio <= 1$. $l1\_ratio=0$ corresponds to L2 penalty, $l1\_ratio=1$ to L1.
Only used if penalty is ``elasticnet''.""", default=0.15))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of passes over the training data (aka epochs).""", default=1000))
specs.addSub(InputData.parameterInputFactory("tol", contentType=InputTypes.FloatType,
descr=r"""The stopping criterion. If it is not None, training will stop when $(loss > best\_loss - tol)$ for $n\_iter\_no\_change$
consecutive epochs.""", default=1e-3))
specs.addSub(InputData.parameterInputFactory("shuffle", contentType=InputTypes.BoolType,
descr=r"""TWhether or not the training data should be shuffled after each epoch """, default=True))
specs.addSub(InputData.parameterInputFactory("epsilon", contentType=InputTypes.FloatType,
descr=r"""Epsilon in the epsilon-insensitive loss functions; only if loss is ``huber'', ``epsilon\_insensitive'', or
``squared\_epsilon\_insensitive''. For ``huber'', determines the threshold at which it becomes less important to get the
prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label
are ignored if they are less than this threshold.""", default=0.1))
specs.addSub(InputData.parameterInputFactory("learning_rate", contentType=InputTypes.makeEnumType("learning_rate", "learningType",['constant', 'optimal', 'invscaling','adaptive']),
descr=r"""The learning rate schedule:
\begin{itemize}
\item constant: $eta = eta0$
\item optimal: $eta = 1.0 / (alpha * (t + t0))$ where t0 is chosen by a heuristic proposed by Leon Bottou.
\item invscaling: $eta = eta0 / pow(t, power\_t)$
\item adaptive: $eta = eta0$, as long as the training keeps decreasing. Each time n\_iter\_no\_change consecutive epochs fail
to decrease the training loss by tol or fail to increase validation score by tol if early\_stopping is True, the current
learning rate is divided by 5.
\end{itemize}
""", default='optimal'))
specs.addSub(InputData.parameterInputFactory("eta0", contentType=InputTypes.FloatType,
descr=r"""The initial learning rate for the ``constant'', ``invscaling'' or ``adaptive'' schedules. The default value is 0.0
as eta0 is not used by the default schedule ``optimal''.""", default=0.0))
specs.addSub(InputData.parameterInputFactory("power_t", contentType=InputTypes.FloatType,
descr=r"""The exponent for inverse scaling learning rate.""", default=0.5))
specs.addSub(InputData.parameterInputFactory("early_stopping", contentType=InputTypes.BoolType,
descr=r"""hether to use early stopping to terminate training when validation score is not
improving. If set to True, it will automatically set aside a stratified fraction of training
data as validation and terminate training when validation score is not improving by at least
tol for n\_iter\_no\_change consecutive epochs.""", default=False))
specs.addSub(InputData.parameterInputFactory("validation_fraction", contentType=InputTypes.FloatType,
descr=r"""The proportion of training data to set aside as validation set for early stopping.
Must be between 0 and 1. Only used if early\_stopping is True.""", default=0.1))
specs.addSub(InputData.parameterInputFactory("n_iter_no_change", contentType=InputTypes.IntegerType,
descr=r"""Number of iterations with no improvement to wait before early stopping.""", default=5))
specs.addSub(InputData.parameterInputFactory("random_state", contentType=InputTypes.IntegerType,
descr=r"""Used to shuffle the training data, when shuffle is set to
True. Pass an int for reproducible output across multiple function calls.""",
default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.IntegerType,
descr=r"""The verbosity level""", default=0))
specs.addSub(InputData.parameterInputFactory("warm_start", contentType=InputTypes.BoolType,
descr=r"""When set to True, reuse the solution of the previous call
to fit as initialization, otherwise, just erase the previous solution.""", default=False))
specs.addSub(InputData.parameterInputFactory("average", contentType=InputTypes.BoolType,
descr=r"""When set to True, computes the averaged SGD weights accross
all updates and stores the result in the coef_ attribute.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['loss','penalty','alpha','l1_ratio','fit_intercept',
'max_iter','tol','shuffle','epsilon', 'learning_rate',
'eta0','power_t','early_stopping','validation_fraction',
'n_iter_no_change', 'random_state', 'verbose', 'warm_start',
'average'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
"""Some plot."""
import random
import matplotlib.pyplot as plt
import numpy as np
def main():
"""Go Main Go."""
(fig, ax) = plt.subplots(1, 1)
x = []
y = []
cnts = []
data = []
for i in range(250):
x.append(i)
data.append(random.randint(0, 100))
std = np.std(data)
avg = np.average(data)
y.append(avg)
cnt = np.sum(np.where(data > (avg + std * 2), 1, 0))
cnt += np.sum(np.where(data < (avg - std * 2), 1, 0))
cnts.append((len(data) - cnt) / float(len(data)) * 100.0)
ax.plot(x, y)
ax2 = ax.twinx()
ax2.plot(x, cnts, color="r")
ax2.set_ylabel("Percentage within 2 sigma [%]", color="r")
ax2.set_ylim(0, 102)
ax.set_xlabel("Random Sample Size Increase")
ax.set_ylabel("Average", color="b")
ax.set_ylim(0, 102)
ax.set_title("Random Sampling between 0 and 100")
ax.grid(True)
ax.set_yticks([0, 25, 50, 75, 100])
fig.savefig("test.png")
if __name__ == "__main__":
main()
|
from python_specific_patterns.prebound_method_pattern.random import random, set_seed
print(set_seed(100))
print(random())
|
from sqlalchemy import Column, Integer, DateTime, String, ForeignKey
from sqlalchemy.orm import relationship
from app.db.base_class import Base
class Like(Base): # type: ignore
__tablename__ = 'like'
id = Column(Integer, primary_key=True, index=True)
post_id = Column(Integer, ForeignKey('post.id', ondelete='CASCADE'), nullable=False)
ip_address = Column(String(256), nullable=True)
user_agent = Column(String(1999), nullable=True)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=True)
post = relationship(
'Post',
back_populates='likes'
)
|
from rest_framework import serializers
from .models import Post
from django.contrib.auth.models import User
class PostSerializer(serializers.ModelSerializer):
class Meta:
fields = ('id', 'text', 'author', 'image', 'pub_date')
model = Post
read_only_fields = ['author'] |
from iclientpy.rest.apifactory import iManagerAPIFactory
from iclientpy.rest.api.node_service import NodeService
import requests
from urllib.parse import quote
import logging
logger = logging.Logger(__name__)
class MsgHandler:
def __init__(self, user_access_imgr_url, url, user, password, factory_kls = iManagerAPIFactory):
self._url = url
self._user_access_imgr_url = user_access_imgr_url
to_portal_template = user_access_imgr_url + '/staticFiles/views/apps/iPortalDetail.html?id={id}'
to_server_template = user_access_imgr_url + '/staticFiles/views/apps/iServerDetail2.html?id={id}'
self._to_templates = {'iPortal': to_portal_template, 'iServer': to_server_template}
self._access_href_template = user_access_imgr_url + '/security/sessionid?sessionid={sessionid}&to={to}'
self._user = user
self._password = password
self._factory_kls = factory_kls
self._cmds = {
'list': self.do_list,
'start': self.do_start,
'stop': self.do_stop
}
def get_sessionid(self):
response = requests.post(self._url + '/security/tokens.json', json={'username': self._user, 'password': self._password})
response.raise_for_status()
return response.cookies['JSESSIONID']
def __call__(self, content:str, *args):
try:
content = content.strip()
parts = content.split(' ', maxsplit=1)
fun = self._cmds.get(parts[0].strip(), self.send_help)
return fun(parts[1]) if len(parts) > 1 else fun()
except:
logger.exception('unknown error:' + content + ',' + str(args))
return 'error'
def send_help(self):
return 'list/stop {id}/start {id}'
def _get_node_service(self) -> NodeService:
return self._factory_kls(base_url=self._url, username=self._user, passwd=self._password).node_service() # type:NodeService
def do_list(self):
node_s = self._get_node_service() # type:NodeService
services = node_s.get_services().list
msgs = []
for service in services:
is_online = node_s.get_current_M_PortTCP(service.id).get('value') == '1'
status = '在线' if is_online else '离线'
template = self._to_templates.get(service.type, None) # type:str
if template is None:
address = service.address
else:
to = template.format(id=service.id)
to = quote(to, 'utf8')
address = self._access_href_template.format(sessionid=self.get_sessionid(), to = to)
msg = '{id}:{name}({type})-{status}-<a href="{address}">查看</a>'.format(id = service.id, name = service.name, type = service.type, status = status, address=address)
msgs.append(msg)
return '\n'.join(msgs) if len(msgs) != 0 else 'empty'
def do_start(self, msg: str):
node_s = self._get_node_service() # type:NodeService
result = node_s.start_nodes([msg.strip()])
return '启动{0}成功'.format(msg) if result.isSucceed else '启动{0}失败'.format(msg)
def do_stop(self, msg):
node_s = self._get_node_service() # type:NodeService
result = node_s.stop_nodes([msg.strip()])
return '停止{0}成功'.format(msg) if result.isSucceed else '停止{0}失败'.format(msg)
if __name__ == '__main__':
from iclientpy.rest.cmd.wxreceiver.server import start_server
import sys
argv = [ MsgHandler(*(sys.argv[1:5]))]
argv.extend(sys.argv[5:])
start_server(*argv) |
"""
General user access control methods
"""
from typing import Optional
from sni.user.models import Group, User
# pylint: disable=too-many-return-statements
def is_authorized_to_login(usr: User) -> bool:
"""
Tells wether a user is authorized to login or not. A user is authorized to
login if the following conditions are satisfied:
* its ``authorized_to_login`` field is ``True`` or if the user belongs to
**at least one** group, corporation, alliance, or coalition whose
``authorized_to_login`` field is ``True``;
* the user's ``authorized_to_login`` field is not ``False``, and none of
the groups, corporation, alliance, or coalitions the user belongs to has
a ``authorized_to_login`` field set ot ``False``.
In addition, the root user is always allowed to login, regardless of the
status of the group he's part of.
"""
if usr.character_id == 0:
return True
if usr.authorized_to_login is False:
return False
authorized_to_login: Optional[bool] = usr.authorized_to_login
if usr.corporation is not None:
if usr.corporation.authorized_to_login is False:
return False
# Note that in Python, (None or True) == True
authorized_to_login = (
authorized_to_login or usr.corporation.authorized_to_login
)
if usr.alliance is not None:
if usr.alliance.authorized_to_login is False:
return False
authorized_to_login = (
authorized_to_login or usr.alliance.authorized_to_login
)
for coa in usr.coalitions():
if coa.authorized_to_login is False:
return False
authorized_to_login = authorized_to_login or coa.authorized_to_login
for grp in Group.objects(members=usr):
if grp.authorized_to_login is False:
return False
authorized_to_login = authorized_to_login or grp.authorized_to_login
# If authorized_to_login is still None, then the result is False
return bool(authorized_to_login)
|
import cv2 as cv
face_cascade = cv.CascadeClassifier('haarcascade_frontalcatface_extended.xml')
image = cv.imread('enes.png')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(image=gray, scaleFactor=1.02, minNeighbors=2, minSize=(200, 200))
for (x, y, width, height) in faces :
cv.rectangle(image, (x, y), (x+width, y+height), (0, 255, 0), 3, cv.LINE_8)
cv.imshow('at', image)
cv.waitKey(0)
cv.destroyAllWindows() |
'''
- DESAFIO 056
- Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa mostre:
- A média de idade do grupo.
- Qual é o nome do homem mais velho.
- Quantas mulheres tem menos de 20 anos
'''
soma_idade = 0
media_idade = 0
maior_idade_homem = 0
nome_mais_velho = ''
totmulher20 = 0
for pessoa in range(1, 5):
print('----- {}ª PESSOA -----'.format(pessoa))
nome = str(input('NOME: ')).strip()
idade = int(input('IDADE: '))
sexo = str(input('SEXO [M/F]:')).strip()
soma_idade += idade
if pessoa == 1 and sexo in 'Mm':
maior_idade_homem = idade
nome_mais_velho = nome
if sexo in 'Mm' and idade > maior_idade_homem:
maior_idade_homem = idade
nome_mais_velho = nome
if sexo in 'Ff' and idade < 20:
totmulher20 += 1
media_idade = soma_idade / 4
print('A média de idade do grupo é de {} anos.'.format(media_idade))
print('O homem mais velho tem {} anos e se chama {}.'.format(maior_idade_homem, nome_mais_velho))
print('No total há {} mulher(es) com menos de 20 anos.'.format(totmulher20))
|
DEBUG = True
SECRET_KEY = 'dev'
|
# allennlp train config.json -s res --include-package packages --force
from typing import Iterator, List, Dict
import torch
import torch.optim as optim
import numpy as np
import pandas as pd
from allennlp.data import Instance
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.dataset_readers import DatasetReader
from allennlp.common.file_utils import cached_path
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
# from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.predictors import SentenceTaggerPredictor
from myutils import label_cols
from myutils import device
from myutils import stats_dim
from myutils import hidden_size
torch.manual_seed(1)
@Model.register('quora_insincere_classification')
class BaselineModel(Model):
def __init__(self,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder,
encoder_cnn: Seq2VecEncoder,
vocab: Vocabulary) -> None:
super().__init__(vocab)
self.word_embeddings = text_field_embedder
self.encoder = encoder
# self.encoder_cnn = encoder_cnn
self.encoder_cnn = torch.nn.Conv1d(in_channels=1,out_channels=64,kernel_size=2)
self.max_pooling = torch.nn.MaxPool1d(kernel_size=hidden_size-1, stride=1, padding=0)
self.hidden = torch.nn.Linear(64+stats_dim, len(label_cols))
# self.output = torch.nn.Sigmoid()
# This loss combines a `Sigmoid` layer and the `BCELoss` in one single class
# self.accuracy = torch.nn.BCEWithLogitsLoss()
self.loss = torch.nn.BCEWithLogitsLoss()
def forward(self,
tokens: Dict[str, torch.Tensor],
stats,
label: torch.Tensor = None) -> Dict[str, torch.Tensor]:
# embeddings
mask = get_text_field_mask(tokens)
embeddings = self.word_embeddings(tokens)
N = embeddings.shape[0]
# print('embeddings',embeddings.shape)
# bi-LSTM
encoder_after_lstm = self.encoder(embeddings, mask)
# print('encoder_after_lstm',encoder_after_lstm.shape)
# CNN
encoder_after_cnn = self.encoder_cnn(encoder_after_lstm.view(N,1,hidden_size))
# print('encoder_after_cnn',encoder_after_cnn.shape)
encoder_after_pooling = self.max_pooling(encoder_after_cnn)
# print('encoder_after_pooling',encoder_after_pooling.shape)
encoder_after_pooling = torch.squeeze(encoder_after_pooling,2)
# print('reshape',encoder_after_pooling.shape)
# concatenate
# stats_tensor = torch.FloatTensor(stats)
# print('stats_tensor',stats_tensor.shape)
dense = torch.cat((encoder_after_pooling,stats),dim=1) # concatenate horizontally
# print('dense',dense.shape)
# DNN
cls_logits = self.hidden(dense)
# print('cls_logits',cls_logits.shape)
# print(cls_logits)
# res = self.output(cls_logits)
# output = {"res": cls_logits, "prediction": np.argmax(cls_logits,axis=0)}
output = {"class_logits": cls_logits}
if label is not None:
# self.accuracy(tag_logits, label, mask)
output["loss"] = self.loss(cls_logits, label)
return output
# def get_metrics(self, reset: bool = False) -> Dict[str, float]:
# return {"accuracy": self.accuracy.get_metric(reset)}
|
#!/usr/bin/env python
import sys
import os
import subprocess
import string
import time
import datetime
import shutil
import stat
#import utils
if len(sys.argv) != 4:
print ("input params error!")
os._exit(1)
src_media_url=sys.argv[1]
trans_profile=sys.argv[2]
dst_format=sys.argv[3]
FFMPEG=""
mp4info=""
mp4fragment=""
python_2_7=""
mp4_dash_script=""
mp4_tool_path=""
py_path=""
work_path=""
dst_video_path=""
dst_hls_path=""
dst_dash_path=""
def prepare():
global FFMPEG
global mp4fragment
global mp4_dash_script
global python_2_7
global mp4_tool_path
global py_path
global work_path
global dst_video_path
global dst_hls_path
global dst_dash_path
print ("===prepare===")
py_path=os.path.dirname(os.path.realpath(__file__))
str_t="py-->%s"%(py_path)
print (str_t)
work_path = os.path.dirname(py_path)
str_t = "work_path-->%s"%(work_path)
print (str_t)
dst_video_path = os.path.join(work_path,'transcode_root')
if os.path.exists(dst_video_path):
shutil.rmtree(dst_video_path)
os.makedirs(dst_video_path)
dst_hls_path = os.path.join(dst_video_path,'hls')
if os.path.exists(dst_hls_path):
shutil.rmtree(dst_hls_path)
os.makedirs(dst_hls_path)
dst_dash_path = os.path.join(dst_video_path,'dash')
if os.path.exists(dst_dash_path):
shutil.rmtree(dst_dash_path)
os.makedirs(dst_dash_path)
FFMPEG= os.path.join(work_path,'ffmpeg')
os.chmod(FFMPEG, stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
mp4_tool_path = os.path.join(work_path,'mp4tools')
mp4fragment = os.path.join(mp4_tool_path,'mp4fragment')
mp4_dash_script = os.path.join(mp4_tool_path, 'mp4-dash.py')
python_2_7 = os.path.join(mp4_tool_path, 'python2.7')
def trans_param_set(profile):
trans_param = ''
if profile == 'biaoqing':
video_param = " -pix_fmt yuv420p -filter_complex 'yadif,fps=25, scale=640:-1:flags=fast_bilinear' -c:v libx264 -b:v 300K -preset slow "
audio_param = " -c:a libfdk_aac -b:a 64k -ar 44100 -profile:a aac_he "
format_param = " -f mp4 "
elif profile == 'gaoqing':
video_param = " -pix_fmt yuv420p -filter_complex 'yadif,fps=25, scale=960:-1:flags=fast_bilinear' -c:v libx264 -b:v 500K -preset slow "
audio_param = " -c:a libfdk_aac -b:a 64k -ar 44100 -profile:a aac_he "
format_param = " -f mp4 "
trans_param = '{0} {1} {2} '.format(video_param, audio_param, format_param)
return trans_param
def transcode(video_src,profile):
global FFMPEG
global dst_video_path
trans_param = trans_param_set(profile)
dst_mp4_video = "/dev/null"
pass1_param = trans_param + "-pass 1"
ff_trans_cmd ='{0} -y -i "{1}" {2} {3}'.format(FFMPEG, video_src, pass1_param, dst_mp4_video )
print(ff_trans_cmd)
subprocess.call(ff_trans_cmd, shell=True)
pass2_param = trans_param + "-pass 2"
dst_mp4_video = dst_video_path + "/dst_video.mp4"
ff_trans_cmd ='{0} -y -i "{1}" {2} {3}'.format(FFMPEG, video_src, pass2_param, dst_mp4_video )
print(ff_trans_cmd)
subprocess.call(ff_trans_cmd, shell=True)
return dst_mp4_video
def generate_hls(mp4_video):
global FFMPEG
global dst_hls_path
hls_cmd =" -c copy -bsf:v h264_mp4toannexb -flags -global_header -map 0:0 -map 0:1 -f segment -segment_list {0}/video.m3u8 -segment_time 10 {0}/out_video_%d.ts ".format(dst_hls_path)
ff_hls_cmd = '{0} -y -i "{1}" {2} '.format(FFMPEG, mp4_video, hls_cmd )
print(ff_hls_cmd)
subprocess.call(ff_hls_cmd, shell=True)
return
def generate_raw_dash(mp4_video):
global mp4_tool_path
global mp4fragment
global mp4_dash_script
mp4fragment_tmp_mp4 = os.path.join(dst_video_path, 'output_fragment.mp4')
mp4fragment_cmd = '{0} {1} {2}'.format(mp4fragment, mp4_video,mp4fragment_tmp_mp4)
print ('mp4fragment_cmd is: {0}'.format(mp4fragment_cmd))
subprocess.call(mp4fragment_cmd, shell=True)
if os.path.exists(dst_dash_path):
shutil.rmtree(dst_dash_path)
mp4_to_dash_cmd = '{0} {1} --exec-dir={2} --use-segment-timeline ' \
' -o {3} {4}'.format(python_2_7,mp4_dash_script,mp4_tool_path,dst_dash_path,mp4fragment_tmp_mp4)
print ('mp4 to dash cmd is {0}'.format(mp4_to_dash_cmd))
print mp4_to_dash_cmd
subprocess.call(mp4_to_dash_cmd, shell=True)
os.remove(mp4fragment_tmp_mp4)
return
def transcode_package():
global src_media_url
global trans_profile
global dst_format
#mkdir,set global variable
prepare()
mp4_video = transcode(src_media_url,trans_profile)
generate_hls(mp4_video)
generate_raw_dash(mp4_video)
return
if __name__ == "__main__":
transcode_package()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 11:25:48 2018
@author: 89288
"""
import pymongo
class DataArranger():
def __init__(self):
'''
链接数据库
'''
self.client = pymongo.MongoClient('localhost',27017)
self.database = self.client.MultiThread
def save_reload_url(self,dic_url):
'''
将需要再次爬取的链接存入数据库
'''
collection_reload_urls = self.database['reload_urls']
collection_reload_urls.insert(dic_url)
def save_crawled_url(self,url):
'''
将已爬取的链接存入数据库
'''
collection_crawled_urls = self.database['crawled_urls']
collection_crawled_urls.insert({'url':url})
def save_song_information(self,dic_song):
'''
将歌曲信息存入数据库
'''
collection_song_information = self.database['song_information']
collection_song_information.insert(dic_song)
def save_crawl_urls(self,list_urls):
'''
将需要爬取的链接存入数据库
'''
collection_crawl_urls = self.database['crawl_urls']
collection_crawl_urls.insert({'urls':list_urls})
def read_reload_urls(self):
'''
查询需要再次爬取的链接的数量
'''
collection_reload_urls = self.database['reload_urls']
if collection_reload_urls:
number = collection_reload_urls.find().count()
else:
number = 0
return number
|
import time
import numpy as np
from fuzzer.lib.queue import FuzzQueue
from fuzzer.lib.queue import Seed
class ImageInputCorpus(FuzzQueue):
"""Class that holds inputs and associated coverage."""
def __init__(self, outdir, israndom, sample_function, cov_num, criteria):
"""Init the class.
Args:
seed_corpus: a list of numpy arrays, one for each input tensor in the
fuzzing process.
sample_function: a function that looks at the whole current corpus and
samples the next element to mutate in the fuzzing loop.
Returns:
Initialized object.
"""
FuzzQueue.__init__(self, outdir, israndom, sample_function, cov_num, criteria)
self.loopup = {}
self.loopup[0] = 0
self.loopup[1] = 1
self.loopup.update(self.loopup.fromkeys(range(2, 51), 2))
self.loopup.update(self.loopup.fromkeys(range(51, 151), 4))
self.loopup.update(self.loopup.fromkeys(range(151, 256), 128))
def save_if_interesting(self, seed, data, crash, dry_run=False, suffix=None):
"""Adds item to corpus if it exercises new coverage."""
def class_loop_up(x):
return self.loopup[x]
self.mutations_processed += 1
current_time = time.time()
if dry_run:
coverage = self.compute_cov()
self.dry_run_cov = coverage
if current_time - self.log_time > 2:
self.log_time = current_time
self.log()
describe_op = "src:%06d" % (seed.parent.id) if suffix is None else "src:%s" % (suffix)
if crash:
fn = "%s/crashes/id:%06d,%s.npy" % (self.out_dir, self.uniq_crashes, describe_op)
self.uniq_crashes += 1
self.last_crash_time = current_time
else:
fn = "%s/queue/id:%06d,%s.npy" % (self.out_dir, self.total_queue, describe_op)
if self.has_new_bits(seed) or dry_run:
self.last_reg_time = current_time
if self.sample_type != 'random2' or dry_run:
seed.queue_time = current_time
seed.id = self.total_queue
seed.fname = fn
seed.probability = self.REG_INIT_PROB
self.queue.append(seed)
del seed.coverage
else:
del seed
self.total_queue += 1
else:
del seed
return False
np.save(fn, data)
return True
|
import pytest
@pytest.fixture()
def mongodb_with_members(mongo_testdb):
mongo_testdb["members"].insert_many(
[
{
"_id": 1,
"name": "Duong Thai Minh",
"codeforces_handle": "I_UsedTo_Love_You",
"codeforces": {
"handle": "I_UsedTo_Love_You",
"rating": 1500,
},
},
{
"_id": 2,
"name": "Truong Cong Thanh",
"codeforces_handle": "TYT",
"codeforces": None,
}
]
)
yield mongo_testdb
# teardown
mongo_testdb.drop_collection("members")
|
#
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
import logging
from vnc_api.vnc_api import FlowNode
from vnc_api.vnc_api import GlobalSystemConfig
from vnc_api.vnc_api import Project
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestFlowNode(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestFlowNode, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestFlowNode, cls).tearDownClass(*args, **kwargs)
def setUp(self):
super(TestFlowNode, self).setUp()
self.gsc = self.api.global_system_config_read(
GlobalSystemConfig().fq_name)
@property
def api(self):
return self._vnc_lib
def test_flow_node_crud(self):
project = Project('project-%s' % self.id())
self.api.project_create(project)
fn = FlowNode(name='fn-%s' % self.id(),
parent_obj=self.gsc)
fn.set_flow_node_load_balancer_ip('10.100.0.55')
fn.set_flow_node_ip_address('10.100.1.10')
fn.set_flow_node_inband_interface('fn-test-inband')
uuid = self.api.flow_node_create(fn)
fn.set_uuid(uuid)
fn.set_display_name('fn test')
self.api.flow_node_update(fn)
updated_fn = self.api.flow_node_read(id=fn.get_uuid())
for attr in ['flow_node_load_balancer_ip',
'flow_node_ip_address',
'flow_node_inband_interface']:
self.assertEqual(getattr(fn, attr), getattr(updated_fn, attr))
|
# coding=utf-8
class Test:
def class_func(self, p):
# self代表的是类的实例对象
print self # <__main__.Test instance at 0x01D98E40>
print self.__class__ # __main__.Test
print p
t = Test()
t.class_func("p") |
print('{:^20}'.format('BRASILEIRÃO 2019'))
classificacao = ('Flamengo', 'Santos', 'Palmeiras', 'Grêmio', 'Athletico-PR', 'São Paulo', 'Internacional',
'Corinthians', 'Fortaleza', 'Goiás', 'Bahia', 'Vasco da Gama', 'Atlético-MG', 'Fluminense',
'Botafogo', 'Ceará', 'Cruzeiro', 'CSA', 'Chapecoense', 'Avaí')
print(f'\nOs 5 primeiros colocados são: {classificacao[:5]}.')
print(f'\nOs 4 últimos colocados são: {classificacao[16:]}')
print(f'\nEm ordem alfabética, a Classificação fica: \n'
f'{sorted(classificacao)}')
print(f'O Chapecoense está na {classificacao.index("Chapecoense")+1}ª posição.') |
from asyncio import CancelledError
from contextlib import suppress
from typing import AsyncIterator, Callable, Iterator
from uuid import uuid4
import aiobotocore
import pytest
from aiobotocore.client import AioBaseClient
from localstack.services import infra
from pytest_lazyfixture import lazy_fixture
from src.room_store.common import NoSuchRoomError
from src.room_store.room_archive import RoomArchive
from src.room_store.s3_room_archive import S3RoomArchive
from src.util.async_util import async_collect
from tests.static_fixtures import VALID_ACTION, ANOTHER_VALID_ACTION
pytestmark = pytest.mark.asyncio
@pytest.fixture(scope='session')
def s3_url() -> Iterator[str]:
infra.start_infra(asynchronous=True, apis=['s3'])
yield 'http://localhost:4566'
infra.stop_infra()
@pytest.fixture
async def s3_client(s3_url: str) -> AsyncIterator[AioBaseClient]:
s3_client_context = aiobotocore.get_session().create_client(
's3',
region_name='us-east-1',
endpoint_url=s3_url,
aws_access_key_id='test',
aws_secret_access_key='test',
)
with suppress(CancelledError):
async with s3_client_context as s3_client:
yield s3_client
@pytest.fixture
async def s3_room_archive(s3_client: AioBaseClient) -> S3RoomArchive:
bucket = str(uuid4())
await s3_client.create_bucket(Bucket=bucket)
return S3RoomArchive(s3_client, bucket)
def any_room_archive(func: Callable) -> Callable:
return pytest.mark.parametrize(
'room_archive',
[
lazy_fixture('memory_room_archive'),
lazy_fixture('s3_room_archive'),
],
)(func)
@any_room_archive
async def test_get_all_room_ids(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [])
await room_archive.write('another_room_id', [])
room_ids = await async_collect(room_archive.get_all_room_ids())
assert room_ids.sort() == ['test_room_id', 'another_room_id'].sort()
@any_room_archive
async def test_write_and_read(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [VALID_ACTION])
assert list(await room_archive.read('test_room_id')) == [VALID_ACTION]
@any_room_archive
async def test_room_exists(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [])
assert await room_archive.room_exists('test_room_id')
assert not await room_archive.room_exists('nonexistent_room')
@any_room_archive
async def test_delete(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [])
await room_archive.delete('test_room_id')
assert not await room_archive.room_exists('test_room_id')
@any_room_archive
async def test_read_nonexistent_room(room_archive: RoomArchive) -> None:
with pytest.raises(NoSuchRoomError):
await room_archive.read('nonexistent_room')
@any_room_archive
async def test_read_empty_room(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [])
assert list(await room_archive.read('test_room_id')) == []
@any_room_archive
async def test_delete_nonexistent_room(room_archive: RoomArchive) -> None:
await room_archive.delete('nonexistent_room')
@any_room_archive
async def test_overwrite_room(room_archive: RoomArchive) -> None:
await room_archive.write('test_room_id', [VALID_ACTION])
await room_archive.write('test_room_id', [ANOTHER_VALID_ACTION])
assert list(await room_archive.read('test_room_id')) == [ANOTHER_VALID_ACTION]
|
import pandas as pd
def a_function(foo):
""" A function
Args:
foo (integer) : foo
Returns:
bar (integer) : bar
"""
return 1 |
import PyWave
import pytest
@pytest.fixture
def wf():
PATH = "path/to/a/wave/file.wav"
wavefile = PyWave.open(PATH)
yield wavefile
wavefile.close()
def test_read(wf):
wfile = wf.read(1)
assert wf is not None
assert isinstance(wf, object)
# due to the read(1) we should have a warning
assert wf.data_position == 8 # 32bit float, 2 channels = 8 bytes minimum read for 1 sample
assert (len(wf.messages) > 0)
assert wfile is not None
assert len(wfile) == wf.block_align # make sure we read on BlockAlignments
def test_metadata(wf):
assert wf.format == 0x0003 # WAVE_FORMAT_IEEE_FLOAT
assert wf.channels == 2
assert wf.frequency == 44100
assert wf.samples_per_sec == wf.frequency
assert wf.bitrate == 2822400
assert wf.bitrate == wf.average_bytes_per_sec * 8
assert wf.bits_per_sample == 32 # 1 sample = 32 bit float
assert wf.bits_per_sample == wf.bytes_per_sample * 8
assert wf.block_align == wf.bytes_per_sample * wf.channels # 32bit float = 4 bytes * 2 channels = 8 bytes minimum read for 1 sample
assert wf.data_length == wf.samples * wf.channels * wf.bytes_per_sample # 99328 * 2 channels * 4 bytes per sample
assert wf.samples == 99328
assert wf.samples == (wf.data_length // wf.bytes_per_sample // wf.channels)
assert wf.data_starts_at == 88
assert wf.data_length == ( wf.end_of_data - wf.data_starts_at )
waveformatcode, waveformatname = wf.format_name
assert waveformatcode == 'WAVE_FORMAT_IEEE_FLOAT'
assert waveformatname == 'IEEE Float'
def test_static_methods(wf):
assert wf.get_format_name(wf.format) == wf.format_name
assert wf.get_channel_layout(0b111111,6) == ['Front Left', 'Front Right', 'Front Center', 'Low Frequency', 'Back Left (Surround Back Left)', 'Back Right (Surround Back Right)']
assert wf.get_channel_setup_name(0b111111, 6) == '5.1'
# test if the __del__ on the class does not fail when the file cannot be opened and __init__ fails
# also test whether we get the right exception back
#
# an Attribute Error in PyWave.__del__ indicates failure to correctly initialize variables before opening a wavefile.
# a FileNotFoundError is the expected outcome.
def test_delete():
with pytest.raises(FileNotFoundError):
wavefile = PyWave.open("xxxx.yyy")
|
from wtpy.monitor import WtMonSvr
svr = WtMonSvr(deploy_dir="E:\\deploy")
svr.run(port=8099, bSync=False)
input("press enter key to exit\n") |
Subsets and Splits