ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40c66166450f05463040e1afcd6136eae489dbc | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import Binarize
def test_Binarize_inputs():
input_map = dict(abs=dict(argstr='--abs',
),
args=dict(argstr='%s',
),
bin_col_num=dict(argstr='--bincol',
),
bin_val=dict(argstr='--binval %d',
),
bin_val_not=dict(argstr='--binvalnot %d',
),
binary_file=dict(argstr='--o %s',
genfile=True,
),
count_file=dict(argstr='--count %s',
),
dilate=dict(argstr='--dilate %d',
),
environ=dict(nohash=True,
usedefault=True,
),
erode=dict(argstr='--erode %d',
),
erode2d=dict(argstr='--erode2d %d',
),
frame_no=dict(argstr='--frame %s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--i %s',
copyfile=False,
mandatory=True,
),
invert=dict(argstr='--inv',
),
mask_file=dict(argstr='--mask maskvol',
),
mask_thresh=dict(argstr='--mask-thresh %f',
),
match=dict(argstr='--match %d...',
),
max=dict(argstr='--max %f',
xor=['wm_ven_csf'],
),
merge_file=dict(argstr='--merge %s',
),
min=dict(argstr='--min %f',
xor=['wm_ven_csf'],
),
out_type=dict(argstr='',
),
rmax=dict(argstr='--rmax %f',
),
rmin=dict(argstr='--rmin %f',
),
subjects_dir=dict(),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
ventricles=dict(argstr='--ventricles',
),
wm=dict(argstr='--wm',
),
wm_ven_csf=dict(argstr='--wm+vcsf',
xor=['min', 'max'],
),
zero_edges=dict(argstr='--zero-edges',
),
zero_slice_edge=dict(argstr='--zero-slice-edges',
),
)
inputs = Binarize.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Binarize_outputs():
output_map = dict(binary_file=dict(),
count_file=dict(),
)
outputs = Binarize.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | b40c6714a2a336998c01bd20558f69907733e015 | import ast
from typing import Any
from jonahlint.profanity_ast_checker import (
AssignmentChecker,
ClassChecker,
ConstantChecker,
FunctionChecker,
ImportChecker,
)
from jonahlint.profanity_checker import ProfanityChecker
class ProfanityVisitor(ast.NodeVisitor):
def __init__(self, profanity_checker: ProfanityChecker):
self.reports_list = []
self.function_checker = FunctionChecker(profanity_checker=profanity_checker)
self.class_checker = ClassChecker(profanity_checker=profanity_checker)
self.assignment_checker = AssignmentChecker(profanity_checker=profanity_checker)
self.constant_checker = ConstantChecker(profanity_checker=profanity_checker)
self.import_checker = ImportChecker(profanity_checker=profanity_checker)
super().__init__()
def visit_FunctionDef(self, node: ast.FunctionDef) -> Any:
self.reports_list.extend(self.function_checker.check(node))
return self.generic_visit(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any:
self.reports_list.extend(self.function_checker.check(node))
return self.generic_visit(node)
def visit_ClassDef(self, node: ast.ClassDef) -> Any:
self.reports_list.extend(self.class_checker.check(node))
return self.generic_visit(node)
def visit_Assign(self, node: ast.Assign) -> Any:
self.reports_list.extend(self.assignment_checker.check(node))
return self.generic_visit(node)
def visit_Constant(self, node: ast.Constant) -> Any:
self.reports_list.extend(self.constant_checker.check(node))
return self.generic_visit(node)
def visit_Import(self, node: ast.Import) -> Any:
self.reports_list.extend(self.import_checker.check(node))
return self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Any:
self.reports_list.extend(self.import_checker.check(node))
return self.generic_visit(node)
|
py | b40c671b9a4a610f61975c5f0cb683adc8db92e8 | import torch
from dropblock import DropBlock3D
from unittest import mock
# noinspection PyCallingNonCallable
def test_block_mask_cube_even():
db = DropBlock3D(block_size=2, drop_prob=0.1)
mask = torch.tensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[1., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]])
expected = torch.tensor([[[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 0., 0., 1., 1., 1.],
[1., 0., 0., 1., 0., 0.],
[1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 0., 0., 1., 1., 1.],
[1., 0., 0., 1., 0., 0.],
[1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]]]])
block_mask = db._compute_block_mask(mask)
assert torch.equal(block_mask, expected)
# noinspection PyCallingNonCallable
def test_block_mask_cube_odd():
db = DropBlock3D(block_size=3, drop_prob=0.1)
mask = torch.tensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[1., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]])
expected = torch.tensor([[[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]]]])
block_mask = db._compute_block_mask(mask)
assert torch.equal(block_mask, expected)
def test_forward_pass():
db = DropBlock3D(block_size=3, drop_prob=0.1)
block_mask = torch.tensor([[[[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[1., 1., 1., 0., 0., 0., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[1., 1., 1., 0., 0., 0., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[0., 0., 0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0., 1.],
[1., 1., 1., 0., 0., 0., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
[[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]]]])
db._compute_block_mask = mock.MagicMock(return_value=block_mask)
x = torch.ones(10, 10, 7, 7, 7)
h = db(x)
expected = block_mask * block_mask.numel() / block_mask.sum()
expected = expected[:, None, :, :, :].expand_as(x)
assert tuple(h.shape) == (10, 10, 7, 7, 7)
assert torch.equal(h, expected)
def test_forward_pass2():
block_sizes = [2, 3, 4, 5, 6, 7, 8]
depths = [5, 6, 8, 10, 11, 14, 15]
heights = [5, 6, 8, 10, 11, 14, 15]
widths = [5, 7, 8, 10, 15, 14, 15]
for block_size, depth, height, width in zip(block_sizes, depths, heights, widths):
dropout = DropBlock3D(0.2, block_size=block_size)
input = torch.randn((5, 20, depth, height, width))
output = dropout(input)
assert tuple(input.shape) == tuple(output.shape)
|
py | b40c695b549e3a7982e8f3ffc4a295bf5cb66fbb | # coding:utf8
"""
some copy from https://github.com/shidenggui/easyutils/blob/master/easyutils/stock.py
"""
import re
import datetime
import requests
import io
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
assert type(stock_code) is str, 'stock code need str type'
if stock_code.startswith(("sh", "sz")):
return stock_code[:2]
if stock_code.startswith(
("50", "51", "60", "90", "110", "113", "132", "204")
):
return "sh"
if stock_code.startswith(
("00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "1318")
):
return "sz"
if stock_code.startswith(("5", "6", "9", "7")):
return "sh"
return "sz"
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(("00", "30", "60")):
return "stock"
return "fund"
def get_all_stock_codes():
"""获取所有股票 ID"""
all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js"
grep_stock_codes = re.compile("~(\d+)`")
response = requests.get(all_stock_codes_url)
stock_codes = grep_stock_codes.findall(response.text)
return stock_codes
def round_price_by_code(price, code):
"""
根据代码类型[股票,基金] 截取制定位数的价格
:param price: 证券价格
:param code: 证券代码
:return: str 截断后的价格的字符串表示
"""
if isinstance(price, str):
return price
typ = get_code_type(code)
if typ == "fund":
return "{:.3f}".format(price)
return "{:.2f}".format(price)
def get_ipo_info(only_today=False):
import pyquery
response = requests.get(
"http://vip.stock.finance.sina.com.cn/corp/go.php/vRPD_NewStockIssue/page/1.phtml",
headers={"accept-encoding": "gzip, deflate, sdch"},
)
html = response.content.decode("gbk")
html_obj = pyquery.PyQuery(html)
table_html = html_obj("#con02-0").html()
import pandas as pd
df = pd.read_html(
io.StringIO(table_html),
skiprows=3,
converters={"证券代码": str, "申购代码": str},
)[0]
if only_today:
today = datetime.datetime.now().strftime("%Y-%m-%d")
df = df[df["上网发行日期↓"] == today]
return df
if __name__ == '__main__':
df = get_all_stock_codes()
print(df)
|
py | b40c6ab1e969f861b244fee8291b63800bf8eb26 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Testing for Chapter 3
"""
import unittest
from RLBook.Chapter3.Gridworld import GridWorld
class TestGridWorld(unittest.TestCase):
""" Testing the Chapter3 Implementation
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_initialisation(self):
""" Testing the Initialisation of the GridWorld class
"""
# Initialise the Grid World
env = GridWorld(**{"TOL": 1e-8})
env.find_possible_states()
assert len(env.NEXT_STATE) == 5
assert len(env.ACTION_REWARD) == 5
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner())
|
py | b40c6ace5174e1cc7ad34c2c3147903effeccc00 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : run.py
@Time : 2022/01/23 20:08:39
@Author : Jianwen Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
import os
from tqdm import tqdm
for name in tqdm(os.listdir(f'./input')):
if name.endswith('.pdb'):
name = name.split('.')[0]
# dssp
os.system(f'./dssp -i ./input/{name}.pdb -o ./output/{name}.dssp')
|
py | b40c6c328c36285ca52a9ee62291b4fb9db7768c |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "''"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "ogcore/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
py | b40c6c9d4e5704d0329d7da3f587be73ffaf9c11 | from base.application import cache, app
from peewee import fn, JOIN
from flask import jsonify
from dateutil.parser import parse
from flask_restful import Resource
from base.utils.gcloud import query_item
from flask import Response, flash
@app.route('/api/report/date/<date>')
def report_by_date(date):
data = list(trait.select(report.report_slug,
report.report_name,
trait.trait_name,
trait.trait_slug,
report.release,
trait.submission_date,
mapping.log10p,
fn.CONCAT(mapping.chrom, ":", mapping.pos).alias("CHROM_POS")) \
.join(mapping, JOIN.LEFT_OUTER) \
.switch(trait) \
.join(report) \
.filter(
(db.truncate_date("day", trait.submission_date) == parse(date).date()
),
(report.release == 0),
(trait.status == "complete")
) \
.dicts()
.execute())
return jsonify(data)
@app.route('/api/report/data/<string:report_slug>')
def report_data(report_slug):
trait_set = query_item('trait', filters=[('report_slug', '=', report_slug)])
# Get first report if available.
try:
trait = trait_set[0]
except IndexError:
try:
trait_set = query_item('trait', filters=[('secret_hash', '=', report_slug)])
trait = trait_set[0]
except IndexError:
flash('Cannot find report', 'danger')
return abort(404)
return Response(trait['trait_data'], mimetype="text/csv", headers={"Content-disposition":"attachment; filename=%s.tsv" % report_slug})
|
py | b40c6e6125c142dc3b51c5f33fd1da16c4127a88 | import sys
import vim
import traceback
from debugger import Debugger
def debugger_init(debug = 0):
global debugger
# get needed vim variables
# port that the engine will connect on
port = int(vim.eval('debuggerPort'))
if port == 0:
port = 9000
# the max_depth variable to set in the engine
max_children = vim.eval('debuggerMaxChildren')
if max_children == '':
max_children = '32'
max_data = vim.eval('debuggerMaxData')
if max_data == '':
max_data = '1024'
max_depth = vim.eval('debuggerMaxDepth')
if max_depth == '':
max_depth = '1'
minibufexpl = int(vim.eval('debuggerMiniBufExpl'))
if minibufexpl == 0:
minibufexpl = 0
debugger = Debugger(port, max_children, max_data, max_depth, minibufexpl, debug)
import shlex
_commands = {}
def debugger_cmd(plain):
if ' ' in plain:
name, plain = plain.split(' ', 1)
args = shlex.split(plain)
else:
name = plain
plain = ''
args = []
if name not in _commands:
print '[usage:] dbg command [options]'
for command in _commands:
print ' - ', command, ' ::', _commands[command]['help']
return
cmd = _commands[name]
if cmd['plain']:
return cmd['cmd'](plain)
else:
cmd['cmd'](*args)
def cmd(name, help='', plain=False):
def decor(fn):
_commands[name] = {'cmd':fn, 'help':help, 'plain':plain}
return fn
return decor
def debugger_command(msg, arg1 = '', arg2 = ''):
try:
debugger.command(msg, arg1, arg2)
debugger.update()
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
@cmd('run', 'run until the next break point (or the end)')
def debugger_run():
try:
debugger.run()
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
# @cmd('watch', 'watch a value')
def debugger_watch_input(cmd, arg = ''):
try:
if arg == '<cword>':
arg = vim.eval('expand("<cword>")')
debugger.watch_input(cmd, arg)
except:
debugger.ui.windows['trace'].write( sys.exc_info() )
debugger.ui.windows['trace'].write( "".join(traceback.format_tb(sys.exc_info()[2])) )
debugger.stop()
print 'Connection closed, stop debugging'
@cmd('ctx', 'refresh the context (scope)')
def debugger_context():
try:
debugger.command('context_get')
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging'
@cmd('e', 'eval some text', plain=True)
def debugger_eval(stuff):
debugger.command("eval", '', stuff)
def debugger_property(name = ''):
try:
debugger.property_get()
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_mark(exp = ''):
try:
debugger.mark(exp)
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_up():
try:
debugger.up()
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_down():
try:
debugger.down()
except:
debugger.ui.windows['trace'].write(sys.exc_info())
debugger.ui.windows['trace'].write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_quit():
global debugger
debugger.quit()
mode = 0
def debugger_resize():
global mode
mode = mode + 1
if mode >= 3:
mode = 0
if mode == 0:
vim.command("wincmd =")
elif mode == 1:
vim.command("wincmd |")
if mode == 2:
vim.command("wincmd _")
# vim: et sw=4 sts=4
|
py | b40c6f19378ce589b5f9cd10878d3de500bb5c74 | for cas in range(int(input())):
input()
print(len(set(input().split())))
|
py | b40c71166769ece74fe337bd0ab9fb2193b3744e | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
API and utilities for nova-network interactions.
"""
import copy
import functools
import inspect
import time
import typing as ty
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from nova.accelerator import cyborg
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network import constants
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova.policies import servers as servers_policies
from nova import profiler
from nova import service_auth
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_SESSION = None
_ADMIN_AUTH = None
def reset_state():
global _ADMIN_AUTH
global _SESSION
_ADMIN_AUTH = None
_SESSION = None
def _load_auth_plugin(conf):
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
nova.conf.neutron.NEUTRON_GROUP)
if auth_plugin:
return auth_plugin
if conf.neutron.auth_type is None:
# If we're coming in through a REST API call for something like
# creating a server, the end user is going to get a 500 response
# which is accurate since the system is mis-configured, but we should
# leave a breadcrumb for the operator that is checking the logs.
LOG.error('The [neutron] section of your nova configuration file '
'must be configured for authentication with the networking '
'service endpoint. See the networking service install guide '
'for details: '
'https://docs.openstack.org/neutron/latest/install/')
err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type
raise neutron_client_exc.Unauthorized(message=err_msg)
def get_binding_profile(port):
"""Convenience method to get the binding:profile from the port
The binding:profile in the port is undefined in the networking service
API and is dependent on backend configuration. This means it could be
an empty dict, None, or have some values.
:param port: dict port response body from the networking service API
:returns: The port binding:profile dict; empty if not set on the port
"""
return port.get(constants.BINDING_PROFILE, {}) or {}
def update_instance_cache_with_nw_info(impl, context, instance, nw_info=None):
if instance.deleted:
LOG.debug('Instance is deleted, no further info cache update',
instance=instance)
return
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = impl._get_instance_nw_info(context, instance)
LOG.debug('Updating instance_info_cache with network_info: %s',
nw_info, instance=instance)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = objects.InstanceInfoCache.new(context, instance.uuid)
ic.network_info = nw_info
ic.save()
instance.info_cache = ic
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed storing info cache', instance=instance)
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getfullargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
with lockutils.lock('refresh_cache-%s' % instance.uuid):
# We need to call the wrapped function with the lock held to ensure
# that it can call _get_instance_nw_info safely.
res = f(self, context, *args, **kwargs)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
@profiler.trace_cls("neutron_api")
class ClientWrapper(clientv20.Client):
"""A Neutron client wrapper class.
Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and
convert it to a 401,403 for Nova clients.
"""
def __init__(self, base_client, admin):
# Expose all attributes from the base_client instance
self.__dict__ = base_client.__dict__
self.base_client = base_client
self.admin = admin
def __getattribute__(self, name):
obj = object.__getattribute__(self, name)
if callable(obj):
obj = object.__getattribute__(self, 'proxy')(obj)
return obj
def proxy(self, obj):
def wrapper(*args, **kwargs):
try:
ret = obj(*args, **kwargs)
except neutron_client_exc.Unauthorized:
if not self.admin:
# Token is expired so Neutron is raising a
# unauthorized exception, we should convert it to
# raise a 401 to make client to handle a retry by
# regenerating a valid token and trying a new
# attempt.
raise exception.Unauthorized()
# In admin context if token is invalid Neutron client
# should be able to regenerate a valid by using the
# Neutron admin credential configuration located in
# nova.conf.
LOG.error("Neutron client was not able to generate a "
"valid admin token, please verify Neutron "
"admin credential located in nova.conf")
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
raise exception.Forbidden(str(e))
return ret
return wrapper
def _get_auth_plugin(context, admin=False):
# NOTE(dprince): In the case where no auth_token is present we allow use of
# neutron admin tenant credentials if it is an admin context. This is to
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
return _ADMIN_AUTH
if context.auth_token:
return service_auth.get_auth_plugin(context)
# We did not get a user token and we should not be using
# an admin token so log an error
raise exception.Unauthorized()
def _get_session():
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.neutron.NEUTRON_GROUP)
return _SESSION
def get_client(context, admin=False):
auth_plugin = _get_auth_plugin(context, admin=admin)
session = _get_session()
client_args = dict(session=session,
auth=auth_plugin,
global_request_id=context.global_id,
connect_retries=CONF.neutron.http_retries)
# NOTE(efried): We build an adapter
# to pull conf options
# to pass to neutronclient
# which uses them to build an Adapter.
# This should be unwound at some point.
adap = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client_args = dict(client_args,
service_type=adap.service_type,
service_name=adap.service_name,
interface=adap.interface,
region_name=adap.region_name,
endpoint_override=adap.endpoint_override)
return ClientWrapper(clientv20.Client(**client_args),
admin=admin or context.is_admin)
def _is_not_duplicate(item, items, items_list_name, instance):
present = item in items
# The expectation from this function's perspective is that the
# item is not part of the items list so if it is part of it
# we should at least log it as a warning
if present:
LOG.warning("%(item)s already exists in list: %(list_name)s "
"containing: %(items)s. ignoring it",
{'item': item,
'list_name': items_list_name,
'items': items},
instance=instance)
return not present
def _ensure_no_port_binding_failure(port):
binding_vif_type = port.get('binding:vif_type')
if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED:
raise exception.PortBindingFailed(port_id=port['id'])
class API:
"""API for interacting with the neutron 2.x API."""
def __init__(self):
self.last_neutron_extension_sync = None
self.extensions = {}
self.pci_whitelist = pci_whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _update_port_with_migration_profile(
self, instance, port_id, port_profile, admin_client):
try:
updated_port = admin_client.update_port(
port_id, {'port': {constants.BINDING_PROFILE: port_profile}})
return updated_port
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Unable to update binding profile "
"for port: %(port)s due to failure: %(error)s",
{'port': port_id, 'error': ex},
instance=instance)
def _clear_migration_port_profile(
self, context, instance, admin_client, ports):
for p in ports:
# If the port already has a migration profile and if
# it is to be torn down, then we need to clean up
# the migration profile.
port_profile = get_binding_profile(p)
if not port_profile:
continue
if constants.MIGRATING_ATTR in port_profile:
del port_profile[constants.MIGRATING_ATTR]
LOG.debug("Removing port %s migration profile", p['id'],
instance=instance)
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
def _setup_migration_port_profile(
self, context, instance, host, admin_client, ports):
# Migrating to a new host
for p in ports:
# If the host hasn't changed, there is nothing to do.
# But if the destination host is different than the
# current one, please update the port_profile with
# the 'migrating_to'(constants.MIGRATING_ATTR) key pointing to
# the given 'host'.
host_id = p.get(constants.BINDING_HOST_ID)
if host_id != host:
port_profile = get_binding_profile(p)
# If the "migrating_to" attribute already points at the given
# host, then skip the port update call since we're not changing
# anything.
if host != port_profile.get(constants.MIGRATING_ATTR):
port_profile[constants.MIGRATING_ATTR] = host
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
LOG.debug("Port %(port_id)s updated with migration "
"profile %(profile_data)s successfully",
{'port_id': p['id'],
'profile_data': port_profile},
instance=instance)
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures.
:param context: The user request context.
:param instance: The instance with attached ports.
:param host: Optional host used to control the setup. If provided and
is not the same as the current instance.host, this method assumes
the instance is being migrated and sets the "migrating_to"
attribute in the binding profile for the attached ports.
:param teardown: Whether or not network information for the ports
should be cleaned up. If True, at a minimum the "migrating_to"
attribute is cleared in the binding profile for the ports. If a
host is also provided, then port bindings for that host are
deleted when teardown is True as long as the host does not match
the current instance.host.
:raises: nova.exception.PortBindingDeletionFailed if host is not None,
teardown is True, and port binding deletion fails.
"""
# Check if the instance is migrating to a new host.
port_migrating = host and (instance.host != host)
# If the port is migrating to a new host or if it is a
# teardown on the original host, then proceed.
if port_migrating or teardown:
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id,
constants.BINDING_HOST_ID: instance.host}
# Now get the port details to process the ports
# binding profile info.
data = self.list_ports(context, **search_opts)
ports = data['ports']
admin_client = get_client(context, admin=True)
if teardown:
# Reset the port profile
self._clear_migration_port_profile(
context, instance, admin_client, ports)
# If a host was provided, delete any bindings between that
# host and the ports as long as the host isn't the same as
# the current instance.host.
has_binding_ext = self.has_port_binding_extension(
client=admin_client)
if port_migrating and has_binding_ext:
self._delete_port_bindings(context, ports, host)
elif port_migrating:
# Setup the port profile
self._setup_migration_port_profile(
context, instance, host, admin_client, ports)
def _delete_port_bindings(self, context, ports, host):
"""Attempt to delete all port bindings on the host.
:param context: The user request context.
:param ports: list of port dicts to cleanup; the 'id' field is required
per port dict in the list
:param host: host from which to delete port bindings
:raises: PortBindingDeletionFailed if port binding deletion fails.
"""
client = get_client(context, admin=True)
failed_port_ids = []
for port in ports:
# This call is safe in that 404s for non-existing
# bindings are ignored.
try:
client.delete_port_binding(port['id'], host)
except neutron_client_exc.NeutronClientException as exc:
# We can safely ignore 404s since we're trying to delete
# the thing that wasn't found anyway, but for everything else
# we should log an error
if exc.status_code == 404:
continue
failed_port_ids.append(port['id'])
LOG.exception(
"Failed to delete binding for port %(port_id)s on host "
"%(host)s", {'port_id': port['id'], 'host': host})
if failed_port_ids:
raise exception.PortBindingDeletionFailed(
port_id=','.join(failed_port_ids), host=host)
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None,
auto_allocate=False):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
if auto_allocate:
# The auto-allocated-topology extension may create complex
# network topologies and it does so in a non-transactional
# fashion. Therefore API users may be exposed to resources that
# are transient or partially built. A client should use
# resources that are meant to be ready and this can be done by
# checking their admin_state_up flag.
search_opts['admin_state_up'] = True
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def _cleanup_created_port(self, port_client, port_id, instance):
try:
port_client.delete_port(port_id)
except neutron_client_exc.NeutronClientException:
LOG.exception(
'Failed to delete port %(port_id)s while cleaning up after an '
'error.', {'port_id': port_id},
instance=instance)
def _create_port_minimal(self, context, port_client, instance, network_id,
fixed_ip=None, security_group_ids=None):
"""Attempts to create a port for the instance on the given network.
:param context: The request context.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:returns: The created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
:raises NoMoreFixedIps: If neutron fails with
IpAddressGenerationFailure error.
:raises: PortBindingFailed: If port binding failed.
:raises NetworksWithQoSPolicyNotSupported: if the created port has
resource request.
"""
# Set the device_id so it's clear who this port was created for,
# and to stop other instances trying to use it
port_req_body = {'port': {'device_id': instance.uuid}}
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(fixed_ip)}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance.project_id
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
port_response = port_client.create_port(port_req_body)
port = port_response['port']
port_id = port['id']
# NOTE(gibi): Checking if the created port has resource request as
# such ports are currently not supported as they would at least
# need resource allocation manipulation in placement but might also
# need a new scheduling if resource on this host is not available.
if self._has_resource_request(context, port, port_client):
msg = (
"The auto-created port %(port_id)s is being deleted due "
"to its network having QoS policy.")
LOG.info(msg, {'port_id': port_id})
self._cleanup_created_port(port_client, port_id, instance)
# NOTE(gibi): This limitation regarding server create can be
# removed when the port creation is moved to the conductor. But
# this code also limits attaching a network that has QoS
# minimum bandwidth rule.
raise exception.NetworksWithQoSPolicyNotSupported(
instance_uuid=instance.uuid, network_id=network_id)
try:
_ensure_no_port_binding_failure(port)
except exception.PortBindingFailed:
with excutils.save_and_reraise_exception():
port_client.delete_port(port_id)
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.InvalidIpForNetworkClient:
LOG.warning('Neutron error: %(ip)s is not a valid IP address '
'for network %(network_id)s.',
{'ip': fixed_ip, 'network_id': network_id},
instance=instance)
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
'network %(network_id)s.') %
{'ip': fixed_ip, 'network_id': network_id})
raise exception.InvalidInput(reason=msg)
except (neutron_client_exc.IpAddressInUseClient,
neutron_client_exc.IpAddressAlreadyAllocatedClient):
LOG.warning('Neutron error: Fixed IP %s is '
'already in use.', fixed_ip, instance=instance)
msg = _("Fixed IP %s is already in use.") % fixed_ip
raise exception.FixedIpAlreadyInUse(message=msg)
except neutron_client_exc.OverQuotaClient:
LOG.warning(
'Neutron error: Port quota exceeded in tenant: %s',
port_req_body['port']['tenant_id'], instance=instance)
raise exception.PortLimitExceeded()
except neutron_client_exc.IpAddressGenerationFailureClient:
LOG.warning('Neutron error: No more fixed IPs in network: %s',
network_id, instance=instance)
raise exception.NoMoreFixedIps(net=network_id)
except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception('Neutron error creating port on network %s',
network_id, instance=instance)
def _update_port(self, port_client, instance, port_id,
port_req_body):
try:
port_response = port_client.update_port(port_id, port_req_body)
port = port_response['port']
_ensure_no_port_binding_failure(port)
LOG.debug('Successfully updated port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.MacAddressInUseClient:
mac_address = port_req_body['port'].get('mac_address')
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: MAC address %(mac)s is already '
'in use on network %(network)s.',
{'mac': mac_address, 'network': network_id},
instance=instance)
raise exception.PortInUse(port_id=mac_address)
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: Tried to bind a port with '
'fixed_ips to a host in the wrong segment on '
'network %(network)s.',
{'network': network_id}, instance=instance)
raise exception.FixedIpInvalidOnHost(port_id=port_id)
def _check_external_network_attach(self, context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(servers_policies.NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
def _unbind_ports(self, context, ports,
neutron, port_client=None):
"""Unbind the given ports by clearing their device_id,
device_owner and dns_name.
:param context: The request context.
:param ports: list of port IDs.
:param neutron: neutron client for the current context.
:param port_client: The client with appropriate karma for
updating the ports.
"""
if port_client is None:
# Requires admin creds to set port bindings
port_client = get_client(context, admin=True)
# it is a dict of network dicts as returned by the neutron client keyed
# by network UUID
networks: ty.Dict[str, ty.Dict] = {}
for port_id in ports:
# A port_id is optional in the NetworkRequest object so check here
# in case the caller forgot to filter the list.
if port_id is None:
continue
port_req_body: ty.Dict[str, ty.Any] = {
'port': {
'device_id': '',
'device_owner': '',
constants.BINDING_HOST_ID: None,
}
}
try:
port = self._show_port(
context, port_id, neutron_client=neutron,
fields=[constants.BINDING_PROFILE, 'network_id'])
except exception.PortNotFound:
LOG.debug('Unable to show port %s as it no longer '
'exists.', port_id)
return
except Exception:
# NOTE: In case we can't retrieve the binding:profile or
# network info assume that they are empty
LOG.exception("Unable to get binding:profile for port '%s'",
port_id)
port_profile = {}
network: dict = {}
else:
port_profile = get_binding_profile(port)
net_id = port.get('network_id')
if net_id in networks:
network = networks[net_id]
else:
network = neutron.show_network(net_id,
fields=['dns_domain']
).get('network')
networks[net_id] = network
# Unbind Port device
if port_profile.get('arq_uuid'):
"""Delete device profile by arq uuid."""
cyclient = cyborg.get_client(context)
cyclient.delete_arqs_by_uuid([port_profile['arq_uuid']])
LOG.debug('Delete ARQs %s for port %s',
port_profile['arq_uuid'], port_id)
# NOTE: We're doing this to remove the binding information
# for the physical device but don't want to overwrite the other
# information in the binding profile.
for profile_key in ('pci_vendor_info', 'pci_slot',
constants.ALLOCATION, 'arq_uuid',
'physical_network', 'card_serial_number',
'vf_num', 'pf_mac_address'):
if profile_key in port_profile:
del port_profile[profile_key]
port_req_body['port'][constants.BINDING_PROFILE] = port_profile
# NOTE: For internal DNS integration (network does not have a
# dns_domain), or if we cannot retrieve network info, we use the
# admin client to reset dns_name.
if (
self.has_dns_extension(client=port_client) and
not network.get('dns_domain')
):
port_req_body['port']['dns_name'] = ''
try:
port_client.update_port(port_id, port_req_body)
except neutron_client_exc.PortNotFoundClient:
LOG.debug('Unable to unbind port %s as it no longer '
'exists.', port_id)
except Exception:
LOG.exception("Unable to clear device ID for port '%s'",
port_id)
# NOTE: For external DNS integration, we use the neutron client
# with user's context to reset the dns_name since the recordset is
# under user's zone.
self._reset_port_dns_name(network, port_id, neutron)
def _validate_requested_port_ids(self, context, instance, neutron,
requested_networks):
"""Processes and validates requested networks for allocation.
Iterates over the list of NetworkRequest objects, validating the
request and building sets of ports and networks to
use for allocating ports for the instance.
:param context: The user request context.
:type context: nova.context.RequestContext
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param requested_networks: List of user-requested networks and/or ports
:type requested_networks: nova.objects.NetworkRequestList
:returns: tuple of:
- ports: dict mapping of port id to port dict
- ordered_networks: list of nova.objects.NetworkRequest objects
for requested networks (either via explicit network request
or the network for an explicit port request)
:raises nova.exception.PortNotFound: If a requested port is not found
in Neutron.
:raises nova.exception.PortNotUsable: If a requested port is not owned
by the same tenant that the instance is created under.
:raises nova.exception.PortInUse: If a requested port is already
attached to another instance.
:raises nova.exception.PortNotUsableDNS: If a requested port has a
value assigned to its dns_name attribute.
"""
ports = {}
ordered_networks = []
# If we're asked to auto-allocate the network then there won't be any
# ports or real neutron networks to lookup, so just return empty
# results.
if requested_networks and not requested_networks.auto_allocate:
for request in requested_networks:
# Process a request to use a pre-existing neutron port.
if request.port_id:
# Make sure the port exists.
port = self._show_port(context, request.port_id,
neutron_client=neutron)
# Make sure the instance has access to the port.
if port['tenant_id'] != instance.project_id:
raise exception.PortNotUsable(port_id=request.port_id,
instance=instance.uuid)
# Make sure the port isn't already attached to another
# instance.
if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
# Make sure that if the user assigned a value to the port's
# dns_name attribute, it is equal to the instance's
# hostname
if port.get('dns_name'):
if port['dns_name'] != instance.hostname:
raise exception.PortNotUsableDNS(
port_id=request.port_id,
instance=instance.uuid, value=port['dns_name'],
hostname=instance.hostname)
# Make sure the port is usable
_ensure_no_port_binding_failure(port)
# If requesting a specific port, automatically process
# the network for that port as if it were explicitly
# requested.
request.network_id = port['network_id']
ports[request.port_id] = port
# Process a request to use a specific neutron network.
if request.network_id:
ordered_networks.append(request)
return ports, ordered_networks
def _clean_security_groups(self, security_groups):
"""Cleans security groups requested from Nova API
Neutron already passes a 'default' security group when
creating ports so it's not necessary to specify it to the
request.
"""
if not security_groups:
security_groups = []
elif security_groups == [constants.DEFAULT_SECGROUP]:
security_groups = []
return security_groups
def _process_security_groups(self, instance, neutron, security_groups):
"""Processes and validates requested security groups for allocation.
Iterates over the list of requested security groups, validating the
request and filtering out the list of security group IDs to use for
port allocation.
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param security_groups: list of requested security group name or IDs
to use when allocating new ports for the instance
:return: list of security group IDs to use when allocating new ports
:raises nova.exception.NoUniqueMatch: If multiple security groups
are requested with the same name.
:raises nova.exception.SecurityGroupNotFound: If a requested security
group is not in the tenant-filtered list of available security
groups in Neutron.
"""
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
# NOTE(slaweq): fields other than name and id aren't really needed
# so asking only about those fields will allow Neutron to not
# prepare list of rules for each found security group. That may
# speed processing of this request a lot in case when tenant has
# got many security groups
sg_fields = ['id', 'name']
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
fields=sg_fields, **search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
# If there was a name match in a previous iteration
# of the loop, we have a conflict.
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
else:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
return security_group_ids
def _validate_requested_network_ids(self, context, instance, neutron,
requested_networks, ordered_networks):
"""Check requested networks using the Neutron API.
Check the user has access to the network they requested, and that
it is a suitable network to connect to. This includes getting the
network details for any ports that have been passed in, because the
request will have been updated with the network_id in
_validate_requested_port_ids.
If the user has not requested any ports or any networks, we get back
a full list of networks the user has access to, and if there is only
one network, we update ordered_networks so we will connect the
instance to that network.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: neutron client
:param requested_networks: nova.objects.NetworkRequestList, list of
user-requested networks and/or ports; may be empty
:param ordered_networks: output from _validate_requested_port_ids
that will be used to create and update ports
:returns: dict, keyed by network ID, of networks to use
:raises InterfaceAttachFailedNoNetwork: If no specific networks were
requested and none are available.
:raises NetworkAmbiguous: If no specific networks were requested but
more than one is available.
:raises ExternalNetworkAttachForbidden: If the policy rules forbid
the request context from using an external non-shared network but
one was requested (or available).
"""
# Get networks from Neutron
# If net_ids is empty, this actually returns all available nets
auto_allocate = requested_networks and requested_networks.auto_allocate
net_ids = [request.network_id for request in ordered_networks]
nets = self._get_available_networks(context, instance.project_id,
net_ids, neutron=neutron,
auto_allocate=auto_allocate)
if not nets:
if requested_networks:
# There are no networks available for the project to use and
# none specifically requested, so check to see if we're asked
# to auto-allocate the network.
if auto_allocate:
# During validate_networks we checked to see if
# auto-allocation is available so we don't need to do that
# again here.
nets = [self._auto_allocate_network(instance, neutron)]
else:
# NOTE(chaochin): If user specifies a network id and the
# network can not be found, raise NetworkNotFound error.
for request in requested_networks:
if not request.port_id and request.network_id:
raise exception.NetworkNotFound(
network_id=request.network_id)
else:
# no requested nets and user has no available nets
return {}
# if this function is directly called without a requested_network param
if (not requested_networks or
requested_networks.is_single_unspecified or
requested_networks.auto_allocate):
# If no networks were requested and none are available, consider
# it a bad request.
if not nets:
raise exception.InterfaceAttachFailedNoNetwork(
project_id=instance.project_id)
# bug/1267723 - if no network is requested and more
# than one is available then raise NetworkAmbiguous Exception
if len(nets) > 1:
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
ordered_networks.append(
objects.NetworkRequest(network_id=nets[0]['id']))
# NOTE(melwitt): check external net attach permission after the
# check for ambiguity, there could be another
# available net which is permitted bug/1364344
self._check_external_network_attach(context, nets)
return {net['id']: net for net in nets}
def _create_ports_for_instance(self, context, instance, ordered_networks,
nets, neutron, security_group_ids):
"""Create port for network_requests that don't have a port_id
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param ordered_networks: objects.NetworkRequestList in requested order
:param nets: a dict of network_id to networks returned from neutron
:param neutron: neutronclient built from users request context
:param security_group_ids: a list of security group IDs to be applied
to any ports created
:returns a list of pairs (NetworkRequest, created_port_uuid); note that
created_port_uuid will be None for the pair where a pre-existing
port was part of the user request
"""
created_port_ids = []
requests_and_created_ports = []
for request in ordered_networks:
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
try:
port_security_enabled = network.get(
'port_security_enabled', True)
if port_security_enabled:
if not network.get('subnets'):
# Neutron can't apply security groups to a port
# for a network without L3 assignments.
LOG.debug('Network with port security enabled does '
'not have subnets so security groups '
'cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
else:
if security_group_ids:
# We don't want to apply security groups on port
# for a network defined with
# 'port_security_enabled=False'.
LOG.debug('Network has port security disabled so '
'security groups cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
created_port_id = None
if not request.port_id:
# create minimal port, if port not already created by user
created_port = self._create_port_minimal(
context, neutron, instance, request.network_id,
request.address, security_group_ids)
created_port_id = created_port['id']
created_port_ids.append(created_port_id)
requests_and_created_ports.append((
request, created_port_id))
except Exception:
with excutils.save_and_reraise_exception():
if created_port_ids:
self._delete_ports(
neutron, instance, created_port_ids)
return requests_and_created_ports
def _has_resource_request(self, context, port, neutron):
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
if self.has_extended_resource_request_extension(context, neutron):
return bool(resource_request.get(constants.REQUEST_GROUPS, []))
else:
return bool(resource_request)
def instance_has_extended_resource_request(self, instance_uuid):
# NOTE(gibi): We need to use an admin context to query neutron ports as
# neutron does not fill the resource_request field in the port response
# if we query with a non admin context.
admin_context = nova_context.get_admin_context()
if not self.has_extended_resource_request_extension(admin_context):
# Short circuit if the extended resource request API extension is
# not available
return False
# So neutron supports the extended resource request but does the
# instance has a port with such request
search_opts = {'device_id': instance_uuid,
'fields': [constants.RESOURCE_REQUEST]}
ports = self.list_ports(
admin_context, **search_opts).get('ports', [])
for port in ports:
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
if resource_request.get(constants.REQUEST_GROUPS, []):
return True
return False
def get_binding_profile_allocation(
self,
context: nova_context.RequestContext,
port_id: str,
resource_provider_mapping: ty.Dict[str, ty.List[str]],
) -> ty.Union[None, str, ty.Dict[str, str]]:
"""Calculate the value of the allocation key of the binding:profile
based on the allocated resources.
:param context: the request context
:param port_id: the uuid of the neutron port
:param resource_provider_mapping: the mapping returned by the placement
defining which request group get allocated from which resource
providers
:returns: None if the port has no resource request. Returns a single
RP UUID if the port has a legacy resource request. Returns a dict
of request group id: resource provider UUID mapping if the port has
an extended resource request.
"""
# We need to use an admin client as the port.resource_request is admin
# only
neutron_admin = get_client(context, admin=True)
neutron = get_client(context)
port = self._show_port(context, port_id, neutron_client=neutron_admin)
if self._has_resource_request(context, port, neutron):
return self._get_binding_profile_allocation(
context, port, neutron, resource_provider_mapping)
else:
return None
def _get_binding_profile_allocation(
self, context, port, neutron, resource_provider_mapping
):
# TODO(gibi): remove this condition and the else branch once Nova does
# not need to support old Neutron sending the legacy resource request
# extension
if self.has_extended_resource_request_extension(
context, neutron
):
# The extended resource request format also means that a
# port has more than a one request groups
request_groups = port.get(
constants.RESOURCE_REQUEST, {}).get(
constants.REQUEST_GROUPS, [])
# Each request group id from the port needs to be mapped to
# a single provider id from the provider mappings. Each
# group from the port is mapped to a numbered request group
# in placement so we can assume that they are mapped to
# a single provider and therefore the provider mapping list
# has a single provider id.
allocation = {
group['id']: resource_provider_mapping[group['id']][0]
for group in request_groups
}
else:
# This is the legacy resource request format where a port
# is mapped to a single request group
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
allocation = resource_provider_mapping[
port['id']][0]
return allocation
def allocate_for_instance(self, context, instance,
requested_networks,
security_groups=None, bind_host_id=None,
resource_provider_mapping=None,
network_arqs=None):
"""Allocate network resources for the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param requested_networks: objects.NetworkRequestList object.
:param security_groups: None or security groups to allocate for
instance.
:param bind_host_id: the host ID to attach to the ports being created.
:param resource_provider_mapping: a dict keyed by ids of the entities
(for example Neutron port) requesting resources for this instance
mapped to a list of resource provider UUIDs that are fulfilling
such a resource request.
:param network_arqs: dict keyed by arq uuid, of ARQs allocated to
ports.
:returns: network info as from get_instance_nw_info()
"""
LOG.debug('allocate_for_instance()', instance=instance)
if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance.uuid)
# We do not want to create a new neutron session for each call
neutron = get_client(context)
# We always need admin_client to build nw_info,
# we sometimes need it when updating ports
admin_client = get_client(context, admin=True)
#
# Validate ports and networks with neutron. The requested_ports_dict
# variable is a dict, keyed by port ID, of ports that were on the user
# request and may be empty. The ordered_networks variable is a list of
# NetworkRequest objects for any networks or ports specifically
# requested by the user, which again may be empty.
#
# NOTE(gibi): we use the admin_client here to ensure that the returned
# ports has the resource_request attribute filled as later we use this
# information to decide when to add allocation key to the port binding.
# See bug 1849657.
requested_ports_dict, ordered_networks = (
self._validate_requested_port_ids(
context, instance, admin_client, requested_networks))
nets = self._validate_requested_network_ids(
context, instance, neutron, requested_networks, ordered_networks)
if not nets:
LOG.debug("No network configured", instance=instance)
return network_model.NetworkInfo([])
# Validate requested security groups
security_groups = self._clean_security_groups(security_groups)
security_group_ids = self._process_security_groups(
instance, neutron, security_groups)
# Tell Neutron which resource provider fulfills the ports' resource
# request.
# We only consider pre-created ports here as ports created
# below based on requested networks are not scheduled to have their
# resource request fulfilled.
for port in requested_ports_dict.values():
# only communicate the allocations if the port has resource
# requests
if self._has_resource_request(context, port, neutron):
profile = get_binding_profile(port)
profile[constants.ALLOCATION] = (
self._get_binding_profile_allocation(
context, port, neutron, resource_provider_mapping))
port[constants.BINDING_PROFILE] = profile
# Create ports from the list of ordered_networks. The returned
# requests_and_created_ports variable is a list of 2-item tuples of
# the form (NetworkRequest, created_port_id). Note that a tuple pair
# will have None for the created_port_id if the NetworkRequest already
# contains a port_id, meaning the user requested a specific
# pre-existing port so one wasn't created here. The ports will be
# updated later in _update_ports_for_instance to be bound to the
# instance and compute host.
requests_and_created_ports = self._create_ports_for_instance(
context, instance, ordered_networks, nets, neutron,
security_group_ids)
#
# Update existing and newly created ports
#
ordered_nets, ordered_port_ids, preexisting_port_ids, \
created_port_ids = self._update_ports_for_instance(
context, instance,
neutron, admin_client, requests_and_created_ports, nets,
bind_host_id, requested_ports_dict, network_arqs)
#
# Perform a full update of the network_info_cache,
# including re-fetching lots of the required data from neutron
#
nw_info = self.get_instance_nw_info(
context, instance, networks=ordered_nets,
port_ids=ordered_port_ids,
admin_client=admin_client,
preexisting_port_ids=preexisting_port_ids)
# Only return info about ports we processed in this run, which might
# have been pre-existing neutron ports or ones that nova created. In
# the initial allocation case (server create), this will be everything
# we processed, and in later runs will only be what was processed that
# time. For example, if the instance was created with port A and
# then port B was attached in this call, only port B would be returned.
# Thus, this filtering only affects the attach case.
return network_model.NetworkInfo([vif for vif in nw_info
if vif['id'] in created_port_ids +
preexisting_port_ids])
def _update_ports_for_instance(self, context, instance, neutron,
admin_client, requests_and_created_ports, nets,
bind_host_id, requested_ports_dict, network_arqs):
"""Update ports from network_requests.
Updates the pre-existing ports and the ones created in
``_create_ports_for_instance`` with ``device_id``, ``device_owner``,
optionally ``mac_address`` and, depending on the
loaded extensions, ``rxtx_factor``, ``binding:host_id``, ``dns_name``.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: client using user context
:param admin_client: client using admin context
:param requests_and_created_ports: [(NetworkRequest, created_port_id)];
Note that created_port_id will be None for any user-requested
pre-existing port.
:param nets: a dict of network_id to networks returned from neutron
:param bind_host_id: a string for port['binding:host_id']
:param requested_ports_dict: dict, keyed by port ID, of ports requested
by the user
:param network_arqs: dict keyed by arq uuid, of ARQs allocated to
ports.
:returns: tuple with the following::
* list of network dicts in their requested order
* list of port IDs in their requested order - note that does not
mean the port was requested by the user, it could be a port
created on a network requested by the user
* list of pre-existing port IDs requested by the user
* list of created port IDs
"""
# We currently require admin creds to set port bindings.
port_client = admin_client
preexisting_port_ids = []
created_port_ids = []
ports_in_requested_order = []
nets_in_requested_order = []
created_vifs = [] # this list is for cleanups if we fail
for request, created_port_id in requests_and_created_ports:
vifobj = objects.VirtualInterface(context)
vifobj.instance_uuid = instance.uuid
vifobj.tag = request.tag if 'tag' in request else None
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
nets_in_requested_order.append(network)
zone = 'compute:%s' % instance.availability_zone
port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
if (requested_ports_dict and
request.port_id in requested_ports_dict and
get_binding_profile(requested_ports_dict[request.port_id])):
port_req_body['port'][constants.BINDING_PROFILE] = \
get_binding_profile(requested_ports_dict[request.port_id])
try:
port_arq = None
if network_arqs:
port_arq = network_arqs.get(request.arq_uuid, None)
self._populate_neutron_extension_values(
context, instance, request.pci_request_id, port_req_body,
network=network, neutron=neutron,
bind_host_id=bind_host_id,
port_arq=port_arq)
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
if created_port_id:
port_id = created_port_id
created_port_ids.append(port_id)
else:
port_id = request.port_id
ports_in_requested_order.append(port_id)
# After port is created, update other bits
updated_port = self._update_port(
port_client, instance, port_id, port_req_body)
# NOTE(danms): The virtual_interfaces table enforces global
# uniqueness on MAC addresses, which clearly does not match
# with neutron's view of the world. Since address is a 255-char
# string we can namespace it with our port id. Using '/' should
# be safely excluded from MAC address notations as well as
# UUIDs. We can stop doing this now that we've removed
# nova-network, but we need to leave the read translation in
# for longer than that of course.
vifobj.address = '%s/%s' % (updated_port['mac_address'],
updated_port['id'])
vifobj.uuid = port_id
vifobj.create()
created_vifs.append(vifobj)
if not created_port_id:
# only add if update worked and port create not called
preexisting_port_ids.append(port_id)
self._update_port_dns_name(context, instance, network,
ports_in_requested_order[-1],
neutron)
except Exception:
with excutils.save_and_reraise_exception():
self._unbind_ports(context,
preexisting_port_ids,
neutron, port_client)
self._delete_ports(neutron, instance, created_port_ids)
for vif in created_vifs:
vif.destroy()
return (nets_in_requested_order, ports_in_requested_order,
preexisting_port_ids, created_port_ids)
def _refresh_neutron_extensions_cache(self, client):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync) >=
CONF.neutron.extension_sync_interval)):
extensions_list = client.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = {ext['alias']: ext for ext in extensions_list}
def _has_extension(self, extension, context=None, client=None):
"""Check if the provided neutron extension is enabled.
:param extension: The alias of the extension to check
:param client: keystoneauth1.adapter.Adapter
:param context: nova.context.RequestContext
:returns: True if the neutron extension is available, else False
"""
if client is None:
client = get_client(context)
self._refresh_neutron_extensions_cache(client)
return extension in self.extensions
def has_multi_provider_extension(self, context=None, client=None):
"""Check if the 'multi-provider' extension is enabled.
This extension allows administrative users to define multiple physical
bindings for a logical network.
"""
return self._has_extension(constants.MULTI_PROVIDER, context, client)
def has_dns_extension(self, context=None, client=None):
"""Check if the 'dns-integration' extension is enabled.
This extension adds the 'dns_name' and 'dns_assignment' attributes to
port resources.
"""
return self._has_extension(constants.DNS_INTEGRATION, context, client)
# TODO(gibi): Remove all branches where this is False after Neutron made
# the this extension mandatory. In Xena this extension will be optional to
# support the scenario where Neutron upgraded first. So Neutron can mark
# this mandatory earliest in Yoga.
def has_extended_resource_request_extension(
self, context=None, client=None,
):
return self._has_extension(
constants.RESOURCE_REQUEST_GROUPS, context, client,
)
# TODO(stephenfin): This is optionally used by the XenAPI virt driver, but
# I can't find what defines it and suspect it's dead code. Consider
# removing the functionality
def has_qos_queue_extension(self, context=None, client=None):
"""Check if the 'qos-queue' extension is enabled.
This extension is provided by a XenServer neutron plugin...we think.
"""
return self._has_extension(constants.QOS_QUEUE, context, client)
def has_vnic_index_extension(self, context=None, client=None):
"""Check if the 'vnic-index' extension is enabled.
This extension is provided by the VMWare NSX neutron plugin.
"""
return self._has_extension(constants.VNIC_INDEX, context, client)
def has_fip_port_details_extension(self, context=None, client=None):
"""Check if the 'fip-port-details' extension is enabled.
This extension adds the 'port_details' attribute to floating IPs.
"""
return self._has_extension(constants.FIP_PORT_DETAILS, context, client)
def has_substr_port_filtering_extension(self, context=None, client=None):
"""Check if the 'ip-substring-filtering' extension is enabled.
This extension adds support for filtering ports by using part of an IP
address.
"""
return self._has_extension(
constants.SUBSTR_PORT_FILTERING, context, client
)
def has_segment_extension(self, context=None, client=None):
"""Check if the neutron 'segment' extension is enabled.
This extension exposes information about L2 segments of a network.
"""
return self._has_extension(
constants.SEGMENT, context, client,
)
def has_port_binding_extension(self, context=None, client=None):
"""Check if the neutron 'binding-extended' extension is enabled.
This extensions exposes port bindings of a virtual port to external
application.
This extension allows nova to bind a port to multiple hosts at the same
time, like during live migration.
"""
return self._has_extension(
constants.PORT_BINDING_EXTENDED, context, client
)
def bind_ports_to_host(self, context, instance, host,
vnic_types=None, port_profiles=None):
"""Attempts to bind the ports from the instance on the given host
If the ports are already actively bound to another host, like the
source host during live migration, then the new port bindings will
be inactive, assuming $host is the destination host for the live
migration.
In the event of an error, any ports which were successfully bound to
the host should have those host bindings removed from the ports.
This method should not be used if "has_port_binding_extension"
returns False.
:param context: the user request context
:type context: nova.context.RequestContext
:param instance: the instance with a set of ports
:type instance: nova.objects.Instance
:param host: the host on which to bind the ports which
are attached to the instance
:type host: str
:param vnic_types: optional dict for the host port binding
:type vnic_types: dict of <port_id> : <vnic_type>
:param port_profiles: optional dict per port ID for the host port
binding profile.
note that the port binding profile is mutable
via the networking "Port Binding" API so callers that
pass in a profile should ensure they have the latest
version from neutron with their changes merged,
which can be determined using the "revision_number"
attribute of the port.
:type port_profiles: dict of <port_id> : <port_profile>
:raises: PortBindingFailed if any of the ports failed to be bound to
the destination host
:returns: dict, keyed by port ID, of a new host port
binding dict per port that was bound
"""
# Get the current ports off the instance. This assumes the cache is
# current.
network_info = instance.get_network_info()
if not network_info:
# The instance doesn't have any ports so there is nothing to do.
LOG.debug('Instance does not have any ports.', instance=instance)
return {}
client = get_client(context, admin=True)
bindings_by_port_id: ty.Dict[str, ty.Any] = {}
for vif in network_info:
# Now bind each port to the destination host and keep track of each
# port that is bound to the resulting binding so we can rollback in
# the event of a failure, or return the results if everything is OK
port_id = vif['id']
binding = dict(host=host)
if vnic_types is None or port_id not in vnic_types:
binding['vnic_type'] = vif['vnic_type']
else:
binding['vnic_type'] = vnic_types[port_id]
if port_profiles is None or port_id not in port_profiles:
binding['profile'] = vif['profile']
else:
binding['profile'] = port_profiles[port_id]
data = {'binding': binding}
try:
binding = client.create_port_binding(port_id, data)['binding']
except neutron_client_exc.NeutronClientException:
# Something failed, so log the error and rollback any
# successful bindings.
LOG.error('Binding failed for port %s and host %s.',
port_id, host, instance=instance, exc_info=True)
for rollback_port_id in bindings_by_port_id:
try:
client.delete_port_binding(rollback_port_id, host)
except neutron_client_exc.NeutronClientException as exc:
if exc.status_code != 404:
LOG.warning('Failed to remove binding for port %s '
'on host %s.', rollback_port_id, host,
instance=instance)
raise exception.PortBindingFailed(port_id=port_id)
bindings_by_port_id[port_id] = binding
return bindings_by_port_id
def delete_port_binding(self, context, port_id, host):
"""Delete the port binding for the given port ID and host
This method should not be used if "has_port_binding_extension"
returns False.
:param context: The request context for the operation.
:param port_id: The ID of the port with a binding to the host.
:param host: The host from which port bindings should be deleted.
:raises: nova.exception.PortBindingDeletionFailed if a non-404 error
response is received from neutron.
"""
client = get_client(context, admin=True)
try:
client.delete_port_binding(port_id, host)
except neutron_client_exc.NeutronClientException as exc:
# We can safely ignore 404s since we're trying to delete
# the thing that wasn't found anyway.
if exc.status_code != 404:
LOG.error(
'Unexpected error trying to delete binding for port %s '
'and host %s.', port_id, host, exc_info=True)
raise exception.PortBindingDeletionFailed(
port_id=port_id, host=host)
def _get_vf_pci_device_profile(self, pci_dev):
"""Get VF-specific fields to add to the PCI device profile.
This data can be useful, e.g. for off-path networking backends that
need to do the necessary plumbing in order to set a VF up for packet
forwarding.
"""
vf_profile: ty.Dict[str, ty.Union[str, int]] = {}
try:
pf_mac = pci_utils.get_mac_by_pci_address(pci_dev.parent_addr)
except (exception.PciDeviceNotFoundById) as e:
LOG.debug(
"Could not determine PF MAC address for a VF with"
" addr %(addr)s, error: %(e)s",
{"addr": pci_dev.address, "e": e})
# NOTE(dmitriis): we do not raise here since not all PFs will
# have netdevs even when VFs are netdevs (see LP: #1915255). The
# rest of the fields (VF number and card serial) are not enough
# to fully identify the VF so they are not populated either.
return vf_profile
try:
vf_num = pci_utils.get_vf_num_by_pci_address(
pci_dev.address)
except exception.PciDeviceNotFoundById as e:
# This is unlikely to happen because the kernel has a common SR-IOV
# code that creates physfn symlinks, however, it would be better
# to avoid raising an exception here and simply warn an operator
# that things did not go as planned.
LOG.warning(
"Could not determine a VF logical number for a VF"
" with addr %(addr)s, error: %(e)s", {
"addr": pci_dev.address, "e": e})
return vf_profile
card_serial_number = pci_dev.card_serial_number
if card_serial_number:
vf_profile.update({
'card_serial_number': card_serial_number
})
vf_profile.update({
'pf_mac_address': pf_mac,
'vf_num': vf_num,
})
return vf_profile
def _get_pci_device_profile(self, pci_dev):
dev_spec = self.pci_whitelist.get_devspec(pci_dev)
if dev_spec:
dev_profile = {
'pci_vendor_info': "%s:%s"
% (pci_dev.vendor_id, pci_dev.product_id),
'pci_slot': pci_dev.address,
'physical_network': dev_spec.get_tags().get(
'physical_network'
),
}
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF:
dev_profile.update(
self._get_vf_pci_device_profile(pci_dev))
return dev_profile
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
address=pci_dev.address)
def _populate_neutron_binding_profile(self, instance, pci_request_id,
port_req_body,
port_arq):
"""Populate neutron binding:profile.
Populate it with SR-IOV related information
:raises PciDeviceNotFound: If a claimed PCI device for the given
pci_request_id cannot be found on the instance.
"""
if pci_request_id:
pci_devices = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if not pci_devices:
# The pci_request_id likely won't mean much except for tracing
# through the logs since it is generated per request.
LOG.error('Unable to find PCI device using PCI request ID in '
'list of claimed instance PCI devices: %s. Is the '
'[pci]/passthrough_whitelist configuration correct?',
# Convert to a primitive list to stringify it.
list(instance.pci_devices), instance=instance)
raise exception.PciDeviceNotFound(
_('PCI device not found for request ID %s.') %
pci_request_id)
pci_dev = pci_devices.pop()
profile = copy.deepcopy(get_binding_profile(port_req_body['port']))
profile.update(self._get_pci_device_profile(pci_dev))
port_req_body['port'][constants.BINDING_PROFILE] = profile
if port_arq:
# PCI SRIOV device according port ARQ
profile = copy.deepcopy(get_binding_profile(port_req_body['port']))
profile.update(cyborg.get_arq_pci_device_profile(port_arq))
port_req_body['port'][constants.BINDING_PROFILE] = profile
@staticmethod
def _populate_pci_mac_address(instance, pci_request_id, port_req_body):
"""Add the updated MAC address value to the update_port request body.
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
pci_devs = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
# only ever index a single device, which needs to be
# successfully claimed for this to be called as part of
# allocate_networks method
LOG.error("PCI request %s does not have a "
"unique device associated with it. Unable to "
"determine MAC address",
pci_request_id, instance=instance)
return
pci_dev = pci_devs[0]
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
try:
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
except exception.PciDeviceNotFoundById as e:
LOG.error(
"Could not determine MAC address for %(addr)s, "
"error: %(e)s",
{"addr": pci_dev.address, "e": e}, instance=instance)
else:
port_req_body['port']['mac_address'] = mac
def _populate_neutron_extension_values(self, context, instance,
pci_request_id, port_req_body,
network=None, neutron=None,
bind_host_id=None,
port_arq=None):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
if neutron is None:
neutron = get_client(context)
if self.has_qos_queue_extension(client=neutron):
flavor = instance.get_flavor()
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
port_req_body['port'][constants.BINDING_HOST_ID] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body,
port_arq)
if self.has_dns_extension(client=neutron):
# If the DNS integration extension is enabled in Neutron, most
# ports will get their dns_name attribute set in the port create or
# update requests in allocate_for_instance. So we just add the
# dns_name attribute to the payload of those requests. The
# exception is when the port binding extension is enabled in
# Neutron and the port is on a network that has a non-blank
# dns_domain attribute. This case requires to be processed by
# method _update_port_dns_name
if (not network.get('dns_domain')):
port_req_body['port']['dns_name'] = instance.hostname
def _update_port_dns_name(self, context, instance, network, port_id,
neutron):
"""Update an instance port dns_name attribute with instance.hostname.
The dns_name attribute of a port on a network with a non-blank
dns_domain attribute will be sent to the external DNS service
(Designate) if DNS integration is enabled in Neutron. This requires the
assignment of the dns_name to the port to be done with a Neutron client
using the user's context. allocate_for_instance uses a port with admin
context if the port binding extensions is enabled in Neutron. In this
case, we assign in this method the dns_name attribute to the port with
an additional update request. Only a very small fraction of ports will
require this additional update request.
"""
if self.has_dns_extension(client=neutron) and network.get(
'dns_domain'):
try:
port_req_body = {'port': {'dns_name': instance.hostname}}
neutron.update_port(port_id, port_req_body)
except neutron_client_exc.BadRequest:
LOG.warning('Neutron error: Instance hostname '
'%(hostname)s is not a valid DNS name',
{'hostname': instance.hostname}, instance=instance)
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
'name') % {'hostname': instance.hostname})
raise exception.InvalidInput(reason=msg)
def _reset_port_dns_name(self, network, port_id, client):
"""Reset an instance port dns_name attribute to empty when using
external DNS service.
_unbind_ports uses a client with admin context to reset the dns_name if
the DNS extension is enabled and network does not have dns_domain set.
When external DNS service is enabled, we use this method to make the
request with a Neutron client using user's context, so that the DNS
record can be found under user's zone and domain.
"""
if self.has_dns_extension(client=client) and network.get(
'dns_domain'):
try:
port_req_body = {'port': {'dns_name': ''}}
client.update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception("Failed to reset dns_name for port %s", port_id)
def _delete_ports(self, neutron, instance, ports, raise_if_fail=False):
exceptions = []
for port in ports:
try:
neutron.delete_port(port)
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
LOG.warning("Port %s does not exist", port,
instance=instance)
else:
exceptions.append(e)
LOG.warning("Failed to delete port %s for instance.",
port, instance=instance, exc_info=True)
if len(exceptions) > 0 and raise_if_fail:
raise exceptions[0]
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance.uuid}
neutron = get_client(context)
data = neutron.list_ports(**search_opts)
ports = {port['id'] for port in data.get('ports', [])}
requested_networks = kwargs.get('requested_networks') or []
# NOTE(danms): Temporary and transitional
if isinstance(requested_networks, objects.NetworkRequestList):
requested_networks = requested_networks.as_tuples()
ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id,
arq_uuid, device_profile in requested_networks])
# NOTE(boden): requested_networks only passed in when deallocating
# from a failed build / spawn call. Therefore we need to include
# preexisting ports when deallocating from a standard delete op
# in which case requested_networks is not provided.
ports_to_skip |= set(self._get_preexisting_port_ids(instance))
ports = set(ports) - ports_to_skip
# Reset device_id and device_owner for the ports that are skipped
self._unbind_ports(context, ports_to_skip, neutron)
# Delete the rest of the ports
self._delete_ports(neutron, instance, ports, raise_if_fail=True)
# deallocate vifs (mac addresses)
objects.VirtualInterface.delete_by_instance_uuid(
context, instance.uuid)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
:param context: the request context
:param instance: the instance object the port is detached from
:param port_id: the UUID of the port being detached
:return: A NetworkInfo, port_allocation tuple where the
port_allocation is a dict which contains the resource
allocation of the port per resource provider uuid. E.g.:
{
rp_uuid: {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": 10000,
"NET_BW_IGR_KILOBIT_PER_SEC": 20000,
}
}
}
Note that right now this dict only contains a single key as a
neutron port only allocates from a single resource provider.
"""
# We need to use an admin client as the port.resource_request is admin
# only
neutron_admin = get_client(context, admin=True)
neutron = get_client(context)
port_allocation: ty.Dict = {}
try:
# NOTE(gibi): we need to read the port resource information from
# neutron here as we might delete the port below
port = neutron_admin.show_port(port_id)['port']
except exception.PortNotFound:
LOG.debug('Unable to determine port %s resource allocation '
'information as the port no longer exists.', port_id)
port = None
preexisting_ports = self._get_preexisting_port_ids(instance)
if port_id in preexisting_ports:
self._unbind_ports(context, [port_id], neutron)
else:
self._delete_ports(neutron, instance, [port_id],
raise_if_fail=True)
# Delete the VirtualInterface for the given port_id.
vif = objects.VirtualInterface.get_by_uuid(context, port_id)
if vif:
self._delete_nic_metadata(instance, vif)
vif.destroy()
else:
LOG.debug('VirtualInterface not found for port: %s',
port_id, instance=instance)
if port:
# if there is resource associated to this port then that needs to
# be deallocated so lets return info about such allocation
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
profile = get_binding_profile(port)
if self.has_extended_resource_request_extension(context, neutron):
# new format
groups = resource_request.get(constants.REQUEST_GROUPS)
if groups:
allocated_rps = profile.get(constants.ALLOCATION)
for group in groups:
allocated_rp = allocated_rps[group['id']]
port_allocation[allocated_rp] = {
"resources": group.get("resources", {})
}
else:
# legacy format
allocated_rp = profile.get(constants.ALLOCATION)
if resource_request and allocated_rp:
port_allocation = {
allocated_rp: {
"resources": resource_request.get("resources", {})
}
}
else:
# Check the info_cache. If the port is still in the info_cache and
# in that cache there is allocation in the profile then we suspect
# that the port is disappeared without deallocating the resources.
for vif in instance.get_network_info():
if vif['id'] == port_id:
profile = vif.get('profile') or {}
rp_uuid = profile.get(constants.ALLOCATION)
if rp_uuid:
LOG.warning(
'Port %s disappeared during deallocate but it had '
'resource allocation on resource provider %s. '
'Resource allocation for this port may be '
'leaked.', port_id, rp_uuid, instance=instance)
break
return self.get_instance_nw_info(context, instance), port_allocation
def _delete_nic_metadata(self, instance, vif):
if not instance.device_metadata:
# nothing to delete
return
for device in instance.device_metadata.devices:
if (isinstance(device, objects.NetworkInterfaceMetadata) and
device.mac == vif.address):
instance.device_metadata.devices.remove(device)
instance.save()
break
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:returns: A dict containing port data keyed by 'port', e.g.
::
{'port': {'port_id': 'abcd',
'fixed_ip_address': '1.2.3.4'}}
"""
return dict(port=self._show_port(context, port_id))
def _show_port(self, context, port_id, neutron_client=None, fields=None):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:param neutron_client: A neutron client.
:param fields: The condition fields to query port data.
:returns: A dict of port data.
e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}
"""
if not neutron_client:
neutron_client = get_client(context)
try:
if fields:
result = neutron_client.show_port(port_id, fields=fields)
else:
result = neutron_client.show_port(port_id)
return result.get('port')
except neutron_client_exc.PortNotFoundClient:
raise exception.PortNotFound(port_id=port_id)
except neutron_client_exc.Unauthorized:
raise exception.Forbidden()
except neutron_client_exc.NeutronClientException as exc:
msg = (_("Failed to access port %(port_id)s: %(reason)s") %
{'port_id': port_id, 'reason': exc})
raise exception.NovaException(message=msg)
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
with lockutils.lock('refresh_cache-%s' % instance.uuid):
result = self._get_instance_nw_info(context, instance, **kwargs)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=result)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None, force_refresh=False,
**kwargs):
# NOTE(danms): This is an inner method intended to be called
# by other code that updates instance nwinfo. It *must* be
# called with the refresh_cache-%(instance_uuid) lock held!
if force_refresh:
LOG.debug('Forcefully refreshing network info cache for instance',
instance=instance)
elif refresh_vif_id:
LOG.debug('Refreshing network info cache for port %s',
refresh_vif_id, instance=instance)
else:
LOG.debug('Building network info cache for instance',
instance=instance)
# Ensure that we have an up to date copy of the instance info cache.
# Otherwise multiple requests could collide and cause cache
# corruption.
compute_utils.refresh_info_cache_for_instance(context, instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids, admin_client,
preexisting_port_ids,
refresh_vif_id,
force_refresh=force_refresh)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None, neutron=None):
"""Return an instance's complete list of port_ids and networks.
The results are based on the instance info_cache in the nova db, not
the instance's current list of ports in neutron.
"""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = _("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
"networks as not none.")
raise exception.NovaException(message=message)
ifaces = instance.get_network_info()
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance.project_id,
net_ids, neutron)
# an interface was added/removed from instance.
else:
# Prepare the network ids list for validation purposes
networks_ids = [network['id'] for network in networks]
# Validate that interface networks doesn't exist in networks.
# Though this issue can and should be solved in methods
# that prepare the networks list, this method should have this
# ignore-duplicate-networks/port-ids mechanism to reduce the
# probability of failing to boot the VM.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces
if _is_not_duplicate(iface['network']['id'],
networks_ids,
"networks",
instance)]
# Include existing interfaces so they are not removed from the db.
# Validate that the interface id is not in the port_ids
port_ids = [iface['id'] for iface in ifaces
if _is_not_duplicate(iface['id'],
port_ids,
"port_ids",
instance)] + port_ids
return networks, port_ids
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed IP to the instance from specified network."""
neutron = get_client(context)
search_opts = {'network_id': network_id}
data = neutron.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'network_id': network_id}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = ("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex}, instance=instance)
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed IP from the instance."""
neutron = get_client(context)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception as ex:
msg = ("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex},
instance=instance)
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForInstance(
instance_uuid=instance.uuid, ip=address)
def _get_physnet_tunneled_info(self, context, neutron, net_id):
"""Retrieve detailed network info.
:param context: The request context.
:param neutron: The neutron client object.
:param net_id: The ID of the network to retrieve information for.
:return: A tuple containing the physnet name, if defined, and the
tunneled status of the network. If the network uses multiple
segments, the first segment that defines a physnet value will be
used for the physnet name.
"""
if self.has_multi_provider_extension(client=neutron):
network = neutron.show_network(net_id,
fields='segments').get('network')
segments = network.get('segments', {})
for net in segments:
# NOTE(vladikr): In general, "multi-segments" network is a
# combination of L2 segments. The current implementation
# contains a vxlan and vlan(s) segments, where only a vlan
# network will have a physical_network specified, but may
# change in the future. The purpose of this method
# is to find a first segment that provides a physical network.
# TODO(vladikr): Additional work will be required to handle the
# case of multiple vlan segments associated with different
# physical networks.
physnet_name = net.get('provider:physical_network')
if physnet_name:
return physnet_name, False
# Raising here as at least one segment should
# have a physical network provided.
if segments:
msg = (_("None of the segments of network %s provides a "
"physical_network") % net_id)
raise exception.NovaException(message=msg)
net = neutron.show_network(
net_id, fields=['provider:physical_network',
'provider:network_type']).get('network')
return (net.get('provider:physical_network'),
net.get('provider:network_type') in constants.L3_NETWORK_TYPES)
@staticmethod
def _get_trusted_mode_from_port(port):
"""Returns whether trusted mode is requested
If port binding does not provide any information about trusted
status this function is returning None
"""
value = get_binding_profile(port).get('trusted')
if value is not None:
# This allows the user to specify things like '1' and 'yes' in
# the port binding profile and we can handle it as a boolean.
return strutils.bool_from_string(value)
@staticmethod
def _is_remote_managed(vnic_type):
"""Determine if the port is remote_managed or not by VNIC type.
:param str vnic_type: The VNIC type to assess.
:return: A boolean indicator whether the NIC is remote managed or not.
:rtype: bool
"""
return vnic_type == network_model.VNIC_TYPE_REMOTE_MANAGED
def is_remote_managed_port(self, context, port_id):
"""Determine if a port has a REMOTE_MANAGED VNIC type.
:param context: The request context
:param port_id: The id of the Neutron port
"""
port = self.show_port(context, port_id)['port']
return self._is_remote_managed(
port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL)
)
# NOTE(sean-k-mooney): we might want to have this return a
# nova.network.model.VIF object instead in the future.
def _get_port_vnic_info(self, context, neutron, port_id):
"""Retrieve port vNIC info
:param context: The request context
:param neutron: The Neutron client
:param port_id: The id of port to be queried
:return: A tuple of vNIC type, trusted status, network ID, resource
request of the port if any and port numa affintiy policy,
and device_profile.
Trusted status only affects SR-IOV ports and will always be
None for other port types. If no port numa policy is
requested by a port, None will be returned.
"""
fields = ['binding:vnic_type', constants.BINDING_PROFILE,
'network_id', constants.RESOURCE_REQUEST,
constants.NUMA_POLICY, 'device_profile']
port = self._show_port(
context, port_id, neutron_client=neutron, fields=fields)
network_id = port.get('network_id')
trusted = None
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
trusted = self._get_trusted_mode_from_port(port)
# NOTE(gibi): Get the port resource_request which may or may not be
# set depending on neutron configuration, e.g. if QoS rules are
# applied to the port/network and the port-resource-request API
# extension is enabled.
resource_request = port.get(constants.RESOURCE_REQUEST, None)
numa_policy = port.get(constants.NUMA_POLICY, None)
device_profile = port.get("device_profile", None)
return (vnic_type, trusted, network_id, resource_request,
numa_policy, device_profile)
def support_create_with_resource_request(self, context):
"""Returns false if neutron is configured with extended resource
request which is not currently supported.
This function is only here temporarily to help mocking this check in
the functional test environment.
"""
return not (self.has_extended_resource_request_extension(context))
def create_resource_requests(
self, context, requested_networks, pci_requests=None,
affinity_policy=None):
"""Retrieve all information for the networks passed at the time of
creating the server.
:param context: The request context.
:param requested_networks: The networks requested for the server.
:type requested_networks: nova.objects.NetworkRequestList
:param pci_requests: The list of PCI requests to which additional PCI
requests created here will be added.
:type pci_requests: nova.objects.InstancePCIRequests
:param affinity_policy: requested pci numa affinity policy
:type affinity_policy: nova.objects.fields.PCINUMAAffinityPolicy
:returns: A three tuple with an instance of ``objects.NetworkMetadata``
for use by the scheduler or None, a list of RequestGroup
objects representing the resource needs of each requested port and
a RequestLevelParam object that contains global scheduling
instructions not specific to any of the RequestGroups
"""
if not requested_networks or requested_networks.no_allocate:
return None, [], None
physnets = set()
tunneled = False
neutron = get_client(context, admin=True)
has_extended_resource_request_extension = (
self.has_extended_resource_request_extension(context, neutron))
resource_requests = []
request_level_params = objects.RequestLevelParams()
for request_net in requested_networks:
physnet = None
trusted = None
tunneled_ = False
vnic_type = network_model.VNIC_TYPE_NORMAL
pci_request_id = None
requester_id = None
port_numa_policy = None
if request_net.port_id:
# InstancePCIRequest.requester_id is semantically linked
# to a port with a resource_request.
requester_id = request_net.port_id
(vnic_type, trusted, network_id, resource_request,
port_numa_policy, device_profile) = self._get_port_vnic_info(
context, neutron, request_net.port_id)
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
if vnic_type in network_model.VNIC_TYPES_ACCELERATOR:
# get request groups from cyborg profile
if not device_profile:
err = ('No device profile for port %s.'
% (request_net.port_id))
raise exception.DeviceProfileError(
name=device_profile, msg=err)
cyclient = cyborg.get_client(context)
dp_groups = cyclient.get_device_profile_groups(
device_profile)
dev_num = cyborg.get_device_amount_of_dp_groups(dp_groups)
if dev_num > 1:
err_msg = 'request multiple devices for single port.'
raise exception.DeviceProfileError(name=device_profile,
msg=err_msg)
dp_request_groups = (cyclient.get_device_request_groups(
dp_groups, owner=request_net.port_id))
LOG.debug("device_profile request group(ARQ): %s",
dp_request_groups)
# keep device_profile to avoid get vnic info again
request_net.device_profile = device_profile
resource_requests.extend(dp_request_groups)
if resource_request:
if has_extended_resource_request_extension:
# need to handle the new resource request format
# NOTE(gibi): explicitly orphan the RequestGroup by
# setting context=None as we never intended to save it
# to the DB.
resource_requests.extend(
objects.RequestGroup.from_extended_port_request(
context=None,
port_resource_request=resource_request))
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
port_resource_request=resource_request))
else:
# keep supporting the old format of the
# resource_request
# NOTE(gibi): explicitly orphan the RequestGroup by
# setting context=None as we never intended to save it
# to the DB.
resource_requests.append(
objects.RequestGroup.from_port_request(
context=None,
port_uuid=request_net.port_id,
port_resource_request=resource_request))
elif request_net.network_id and not request_net.auto_allocate:
network_id = request_net.network_id
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
# All tunneled traffic must use the same logical NIC so we just
# need to know if there is one or more tunneled networks present.
tunneled = tunneled or tunneled_
# ...conversely, there can be multiple physnets, which will
# generally be mapped to different NICs, and some requested
# networks may use the same physnet. As a result, we need to know
# the *set* of physnets from every network requested
if physnet:
physnets.add(physnet)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
# TODO(moshele): To differentiate between the SR-IOV legacy
# and SR-IOV ovs hardware offload we will leverage the nic
# feature based scheduling in nova. This mean we will need
# libvirt to expose the nic feature. At the moment
# there is a limitation that deployers cannot use both
# SR-IOV modes (legacy and ovs) in the same deployment.
spec = {
pci_request.PCI_NET_TAG: physnet,
# Convert the value to string since tags are compared as
# string values case-insensitively.
pci_request.PCI_REMOTE_MANAGED_TAG:
str(self._is_remote_managed(vnic_type)),
}
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
if trusted is not None:
# We specifically have requested device on a pool
# with a tag trusted set to true or false. We
# convert the value to string since tags are
# compared in that way.
spec[pci_request.PCI_TRUSTED_TAG] = str(trusted)
request = objects.InstancePCIRequest(
count=1,
spec=[spec],
request_id=uuidutils.generate_uuid(),
requester_id=requester_id)
# NOTE(sean-k-mooney): port NUMA policies take precedence
# over image and flavor policies.
numa_policy = port_numa_policy or affinity_policy
if numa_policy:
request.numa_policy = numa_policy
pci_requests.requests.append(request)
pci_request_id = request.request_id
# Add pci_request_id into the requested network
request_net.pci_request_id = pci_request_id
return (
objects.NetworkMetadata(physnets=physnets, tunneled=tunneled),
resource_requests,
request_level_params
)
def _can_auto_allocate_network(self, context, neutron):
"""Helper method to determine if we can auto-allocate networks
:param context: nova request context
:param neutron: neutron client
:returns: True if it's possible to auto-allocate networks, False
otherwise.
"""
# run the dry-run validation, which will raise a 409 if not ready
try:
neutron.validate_auto_allocated_topology_requirements(
context.project_id)
LOG.debug('Network auto-allocation is available for project '
'%s', context.project_id)
return True
except neutron_client_exc.Conflict as ex:
LOG.debug('Unable to auto-allocate networks. %s',
str(ex))
return False
def _auto_allocate_network(self, instance, neutron):
"""Automatically allocates a network for the given project.
:param instance: create the network for the project that owns this
instance
:param neutron: neutron client
:returns: Details of the network that was created.
:raises: nova.exception.UnableToAutoAllocateNetwork
:raises: nova.exception.NetworkNotFound
"""
project_id = instance.project_id
LOG.debug('Automatically allocating a network for project %s.',
project_id, instance=instance)
try:
topology = neutron.get_auto_allocated_topology(
project_id)['auto_allocated_topology']
except neutron_client_exc.Conflict:
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
try:
network = neutron.show_network(topology['id'])['network']
except neutron_client_exc.NetworkNotFoundClient:
# This shouldn't happen since we just created the network, but
# handle it anyway.
LOG.error('Automatically allocated network %(network_id)s '
'was not found.', {'network_id': topology['id']},
instance=instance)
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
LOG.debug('Automatically allocated network: %s', network,
instance=instance)
return network
def _ports_needed_per_instance(self, context, neutron, requested_networks):
# TODO(danms): Remove me when all callers pass an object
if requested_networks and isinstance(requested_networks[0], tuple):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
ports_needed_per_instance = 0
if (requested_networks is None or len(requested_networks) == 0 or
requested_networks.auto_allocate):
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
if not nets and (
requested_networks and requested_networks.auto_allocate):
# If there are no networks available to this project and we
# were asked to auto-allocate a network, check to see that we
# can do that first.
LOG.debug('No networks are available for project %s; checking '
'to see if we can automatically allocate a network.',
context.project_id)
if not self._can_auto_allocate_network(context, neutron):
raise exception.UnableToAutoAllocateNetwork(
project_id=context.project_id)
ports_needed_per_instance = 1
else:
net_ids_requested = []
for request in requested_networks:
if request.port_id:
port = self._show_port(context, request.port_id,
neutron_client=neutron)
if port.get('device_id', None):
raise exception.PortInUse(port_id=request.port_id)
deferred_ip = port.get('ip_allocation') == 'deferred'
# NOTE(carl_baldwin) A deferred IP port doesn't have an
# address here. If it fails to get one later when nova
# updates it with host info, Neutron will error which
# raises an exception.
if not deferred_ip and not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(
port_id=request.port_id)
request.network_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(request.network_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if request.address:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': request.network_id,
'fixed_ips': 'ip_address=%s' % (
request.address),
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=request.address,
instance_uuid=i_uuid)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
if lostid_set:
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
return ports_needed_per_instance
def get_requested_resource_for_instance(
self,
context: nova_context.RequestContext,
instance_uuid: str
) -> ty.Tuple[
ty.List['objects.RequestGroup'], 'objects.RequestLevelParams']:
"""Collect resource requests from the ports associated to the instance
:param context: nova request context
:param instance_uuid: The UUID of the instance
:return: A two tuple with a list of RequestGroup objects and a
RequestLevelParams object.
"""
# NOTE(gibi): We need to use an admin client as otherwise a non admin
# initiated resize causes that neutron does not fill the
# resource_request field of the port and this will lead to resource
# allocation issues. See bug 1849695
neutron = get_client(context, admin=True)
# get the ports associated to this instance
data = neutron.list_ports(
device_id=instance_uuid, fields=['id', constants.RESOURCE_REQUEST])
resource_requests = []
request_level_params = objects.RequestLevelParams()
extended_rr = self.has_extended_resource_request_extension(
context, neutron)
for port in data.get('ports', []):
resource_request = port.get(constants.RESOURCE_REQUEST)
if extended_rr and resource_request:
resource_requests.extend(
objects.RequestGroup.from_extended_port_request(
context=None,
port_resource_request=port['resource_request']))
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
port_resource_request=resource_request))
else:
# keep supporting the old format of the resource_request
if resource_request:
# NOTE(gibi): explicitly orphan the RequestGroup by setting
# context=None as we never intended to save it to the DB.
resource_requests.append(
objects.RequestGroup.from_port_request(
context=None, port_uuid=port['id'],
port_resource_request=port['resource_request']))
return resource_requests, request_level_params
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s', requested_networks)
neutron = get_client(context)
ports_needed_per_instance = self._ports_needed_per_instance(
context, neutron, requested_networks)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
quotas = neutron.show_quota(context.project_id)['quota']
if quotas.get('port', -1) == -1:
# Unlimited Port Quota
return num_instances
# We only need the port count so only ask for ids back.
params = dict(tenant_id=context.project_id, fields=['id'])
ports = neutron.list_ports(**params)['ports']
free_ports = quotas.get('port') - len(ports)
if free_ports < 0:
msg = (_("The number of defined ports: %(ports)d "
"is over the limit: %(quota)d") %
{'ports': len(ports),
'quota': quotas.get('port')})
raise exception.PortLimitExceeded(msg)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given IP address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating IP with a fixed IP."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
try:
client.update_floatingip(fip['id'], {'floatingip': param})
except neutron_client_exc.Conflict as e:
raise exception.FloatingIpAssociateFailed(str(e))
# If the floating IP was associated with another server, try to refresh
# the cache for that instance to avoid a window of time where multiple
# servers in the API say they are using the same floating IP.
if fip['port_id']:
# Trap and log any errors from
# _update_inst_info_cache_for_disassociated_fip but not let them
# raise back up to the caller since this refresh is best effort.
try:
self._update_inst_info_cache_for_disassociated_fip(
context, instance, client, fip)
except Exception as e:
LOG.warning('An error occurred while trying to refresh the '
'network info cache for an instance associated '
'with port %s. Error: %s', fip['port_id'], e)
def _update_inst_info_cache_for_disassociated_fip(self, context,
instance, client, fip):
"""Update the network info cache when a floating IP is re-assigned.
:param context: nova auth RequestContext
:param instance: The instance to which the floating IP is now assigned
:param client: ClientWrapper instance for using the Neutron API
:param fip: dict for the floating IP that was re-assigned where the
the ``port_id`` value represents the port that was
associated with another server.
"""
port = self._show_port(context, fip['port_id'],
neutron_client=client)
orig_instance_uuid = port['device_id']
msg_dict = dict(address=fip['floating_ip_address'],
instance_id=orig_instance_uuid)
LOG.info('re-assign floating IP %(address)s from '
'instance %(instance_id)s', msg_dict,
instance=instance)
orig_instance = self._get_instance_by_uuid_using_api_db(
context, orig_instance_uuid)
if orig_instance:
# purge cached nw info for the original instance; pass the
# context from the instance in case we found it in another cell
update_instance_cache_with_nw_info(
self, orig_instance._context, orig_instance)
else:
# Leave a breadcrumb about not being able to refresh the
# the cache for the original instance.
LOG.info('Unable to refresh the network info cache for '
'instance %s after disassociating floating IP %s. '
'If the instance still exists, its info cache may '
'be healed automatically.',
orig_instance_uuid, fip['id'])
@staticmethod
def _get_instance_by_uuid_using_api_db(context, instance_uuid):
"""Look up the instance by UUID
This method is meant to be used sparingly since it tries to find
the instance by UUID in the cell-targeted context. If the instance
is not found, this method will try to determine if it's not found
because it is deleted or if it is just in another cell. Therefore
it assumes to have access to the API database and should only be
called from methods that are used in the control plane services.
:param context: cell-targeted nova auth RequestContext
:param instance_uuid: UUID of the instance to find
:returns: Instance object if the instance was found, else None.
"""
try:
return objects.Instance.get_by_uuid(context, instance_uuid)
except exception.InstanceNotFound:
# The instance could be deleted or it could be in another cell.
# To determine if its in another cell, check the instance
# mapping in the API DB.
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, instance_uuid)
except exception.InstanceMappingNotFound:
# The instance is gone so just return.
return
# We have the instance mapping, look up the instance in the
# cell the instance is in.
with nova_context.target_cell(
context, inst_map.cell_mapping) as cctxt:
try:
return objects.Instance.get_by_uuid(cctxt, instance_uuid)
except exception.InstanceNotFound:
# Alright it's really gone.
return
def get_all(self, context):
"""Get all networks for client."""
client = get_client(context)
return client.list_networks().get('networks')
def get(self, context, network_uuid):
"""Get specific network for client."""
client = get_client(context)
try:
return client.show_network(network_uuid).get('network') or {}
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def get_floating_ip(self, context, id):
"""Return floating IP object given the floating IP id."""
client = get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP %s', id)
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
network_uuid = fip['floating_network_id']
try:
fip['network_details'] = client.show_network(
network_uuid)['network']
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
port_id)['port']
except neutron_client_exc.PortNotFoundClient:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fip
def get_floating_ip_by_address(self, context, address):
"""Return a floating IP given an address."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
network_uuid = fip['floating_network_id']
try:
fip['network_details'] = client.show_network(
network_uuid)['network']
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
port_id)['port']
except neutron_client_exc.PortNotFoundClient:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fip
def get_floating_ip_pools(self, context):
"""Return floating IP pools a.k.a. external networks."""
client = get_client(context)
data = client.list_networks(**{constants.NET_EXTERNAL: True})
return data['networks']
def get_floating_ips_by_project(self, context):
client = get_client(context)
project_id = context.project_id
fips = self._safe_get_floating_ips(client, tenant_id=project_id)
if not fips:
return fips
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
networks = {net['id']: net for net in self._get_available_networks(
context, project_id, [fip['floating_network_id'] for fip in fips],
client)}
for fip in fips:
network_uuid = fip['floating_network_id']
if network_uuid not in networks:
raise exception.NetworkNotFound(network_id=network_uuid)
fip['network_details'] = networks[network_uuid]
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
ports = {port['id']: port for port in client.list_ports(
**{'tenant_id': project_id})['ports']}
for fip in fips:
port_id = fip['port_id']
if port_id in ports:
fip['port_details'] = ports[port_id]
else:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fips
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating IP's fixed IP is allocated to."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
try:
port = self._show_port(context, fip['port_id'],
neutron_client=client)
except exception.PortNotFound:
# NOTE: Here is a potential race condition between _show_port() and
# _get_floating_ip_by_address(). fip['port_id'] shows a port which
# is the server instance's. At _get_floating_ip_by_address(),
# Neutron returns the list which includes the instance. Just after
# that, the deletion of the instance happens and Neutron returns
# 404 on _show_port().
LOG.debug('The port(%s) is not found', fip['port_id'])
return None
return port['device_id']
def get_vifs_by_instance(self, context, instance):
return objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating IP to a project from a pool."""
client = get_client(context)
pool = pool or CONF.neutron.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(str(e))
except neutron_client_exc.OverQuotaClient as e:
raise exception.FloatingIpLimitExceeded(str(e))
except neutron_client_exc.BadRequest as e:
raise exception.FloatingIpBadRequest(str(e))
return fip['floatingip']['floating_ip_address']
def _safe_get_floating_ips(self, client, **kwargs):
"""Get floating IP gracefully handling 404 from Neutron."""
try:
return client.list_floatingips(**kwargs)['floatingips']
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutron_client_exc.NotFound:
return []
except neutron_client_exc.NeutronClientException as e:
# bug/1513879 neutron client is currently using
# NeutronClientException when there is no L3 API
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP for %s',
', '.join(['%s %s' % (k, v)
for k, v in kwargs.items()]))
def _get_floating_ip_by_address(self, client, address):
"""Get floating IP from floating IP address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
fips = self._safe_get_floating_ips(client, floating_ip_address=address)
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floating IPs from fixed IP and port."""
return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip,
port_id=port)
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating IP with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
self._release_floating_ip(context, address)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP.
This api call was added to allow this to be done in one operation
if using neutron.
"""
@refresh_cache
def _release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip):
self._release_floating_ip(
context, floating_ip['floating_ip_address'],
raise_if_associated=False)
if instance:
_release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip)
else:
self._release_floating_ip(
context, floating_ip['floating_ip_address'],
raise_if_associated=False)
def _release_floating_ip(self, context, address,
raise_if_associated=True):
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if raise_if_associated and fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
try:
client.delete_floatingip(fip['id'])
except neutron_client_exc.NotFound:
raise exception.FloatingIpNotFoundForAddress(
address=address
)
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating IP from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance.
If the instance has port bindings on the destination compute host,
they are activated in this method which will atomically change the
source compute host port binding to inactive and also change the port
"binding:host_id" attribute to the destination host.
If there are no binding resources for the attached ports on the given
destination host, this method is a no-op.
:param context: The user request context.
:param instance: The instance being migrated.
:param migration: dict with required keys::
"source_compute": The name of the source compute host.
"dest_compute": The name of the destination compute host.
:raises: nova.exception.PortBindingActivationFailed if any port binding
activation fails
"""
if not self.has_port_binding_extension(context):
# If neutron isn't new enough yet for the port "binding-extended"
# API extension, we just no-op. The port binding host will be
# be updated in migrate_instance_finish, which is functionally OK,
# it's just not optimal.
LOG.debug('Neutron is not new enough to perform early destination '
'host port binding activation. Port bindings will be '
'updated later.', instance=instance)
return
client = get_client(context, admin=True)
dest_host = migration['dest_compute']
for vif in instance.get_network_info():
# Not all compute migration flows use the port binding-extended
# API yet, so first check to see if there is a binding for the
# port and destination host.
try:
binding = client.show_port_binding(
vif['id'], dest_host
)['binding']
except neutron_client_exc.NeutronClientException as exc:
if exc.status_code != 404:
# We don't raise an exception here because we assume that
# port bindings will be updated correctly when
# migrate_instance_finish runs
LOG.error(
'Unexpected error trying to get binding info '
'for port %s and destination host %s.',
vif['id'], dest_host, exc_info=True)
continue
# ...but if there is no port binding record for the destination
# host, we can safely assume none of the ports attached to the
# instance are using the binding-extended API in this flow and
# exit early.
return
if binding['status'] == 'ACTIVE':
# We might be racing with another thread that's handling
# post-migrate operations and already activated the port
# binding for the destination host.
LOG.debug(
'Port %s binding to destination host %s is already ACTIVE',
vif['id'], dest_host, instance=instance)
continue
try:
# This is a bit weird in that we don't PUT and update the
# status to ACTIVE, it's more like a POST action method in the
# compute API.
client.activate_port_binding(vif['id'], dest_host)
LOG.debug(
'Activated binding for port %s and host %s',
vif['id'], dest_host)
except neutron_client_exc.NeutronClientException as exc:
# A 409 means the port binding is already active, which
# shouldn't happen if the caller is doing things in the correct
# order.
if exc.status_code == 409:
LOG.warning(
'Binding for port %s and host %s is already active',
vif['id'], dest_host, exc_info=True)
continue
# Log the details, raise an exception.
LOG.error(
'Unexpected error trying to activate binding '
'for port %s and host %s.',
vif['id'], dest_host, exc_info=True)
raise exception.PortBindingActivationFailed(
port_id=vif['id'], host=dest_host)
# TODO(mriedem): Do we need to call
# _clear_migration_port_profile? migrate_instance_finish
# would normally take care of clearing the "migrating_to"
# attribute on each port when updating the port's
# binding:host_id to point to the destination host.
def migrate_instance_finish(
self, context, instance, migration, provider_mappings):
"""Finish migrating the network of an instance.
:param context: nova auth request context
:param instance: Instance object being migrated
:param migration: Migration object for the operation; used to determine
the phase of the migration which dictates what to do with claimed
PCI devices for SR-IOV ports
:param provider_mappings: a dict of list of resource provider uuids
keyed by port uuid
"""
self._update_port_binding_for_instance(
context, instance, migration.dest_compute, migration=migration,
provider_mappings=provider_mappings)
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs, client=None):
subnets = self._get_subnets_from_port(context, port, client)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, context, port, networks, subnets):
# TODO(stephenfin): Pass in an existing admin client if available.
neutron = get_client(context, admin=True)
network_name = None
network_mtu = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
network_mtu = net.get('mtu')
break
else:
tenant_id = port['tenant_id']
LOG.warning("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used.",
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
port_details = port.get('binding:vif_details', {})
if vif_type in [network_model.VIF_TYPE_OVS,
network_model.VIF_TYPE_AGILIO_OVS]:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
# The name of the DVS port group will contain the neutron
# network id
bridge = port['network_id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
# Prune the bridge name if necessary. For the DVS this is not done
# as the bridge is a '<network-name>-<network-UUID>'.
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
physnet, tunneled = self._get_physnet_tunneled_info(
context, neutron, port['network_id'])
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id,
mtu=network_mtu,
physical_network=physnet,
tunneled=tunneled
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _get_preexisting_port_ids(self, instance):
"""Retrieve the preexisting ports associated with the given instance.
These ports were not created by nova and hence should not be
deallocated upon instance deletion.
"""
net_info = instance.get_network_info()
if not net_info:
LOG.debug('Instance cache missing network info.',
instance=instance)
return [vif['id'] for vif in net_info
if vif.get('preserve_on_delete')]
def _build_vif_model(self, context, client, current_neutron_port,
networks, preexisting_port_ids):
"""Builds a ``nova.network.model.VIF`` object based on the parameters
and current state of the port in Neutron.
:param context: Request context.
:param client: Neutron client.
:param current_neutron_port: The current state of a Neutron port
from which to build the VIF object model.
:param networks: List of dicts which represent Neutron networks
associated with the ports currently attached to a given server
instance.
:param preexisting_port_ids: List of IDs of ports attached to a
given server instance which Nova did not create and therefore
should not delete when the port is detached from the server.
:return: nova.network.model.VIF object which represents a port in the
instance network info cache.
"""
vif_active = False
if (current_neutron_port['admin_state_up'] is False or
current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs, client)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(context, current_neutron_port,
networks, subnets))
preserve_on_delete = (current_neutron_port['id'] in
preexisting_port_ids)
return network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
vnic_type=current_neutron_port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL),
type=current_neutron_port.get('binding:vif_type'),
profile=get_binding_profile(current_neutron_port),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active,
preserve_on_delete=preserve_on_delete,
delegate_create=True,
)
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None, force_refresh=False):
"""Return list of ordered VIFs attached to instance.
:param context: Request context.
:param instance: Instance we are returning network info for.
:param networks: List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids: List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
:param admin_client: A neutron client for the admin context.
:param preexisting_port_ids: List of port_ids that nova didn't
allocate and there shouldn't be deleted when
an instance is de-allocated. Supplied list will
be added to the cached list of preexisting port
IDs for this instance.
:param refresh_vif_id: Optional port ID to refresh within the existing
cache rather than the entire cache. This can be
triggered via a "network-changed" server external event
from Neutron.
:param force_refresh: If ``networks`` and ``port_ids`` are both None,
by default the instance.info_cache will be used to
populate the network info. Pass ``True`` to force
collection of ports and networks from neutron directly.
"""
search_opts = {'tenant_id': instance.project_id,
'device_id': instance.uuid, }
if admin_client is None:
client = get_client(context, admin=True)
else:
client = admin_client
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
if preexisting_port_ids is None:
preexisting_port_ids = []
preexisting_port_ids = set(
preexisting_port_ids + self._get_preexisting_port_ids(instance))
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
# Figure out what kind of operation we're processing. If we're given
# a single port to refresh then we try to optimize and update just the
# information for that VIF in the existing cache rather than try to
# rebuild the entire thing.
if refresh_vif_id is not None:
# TODO(mriedem): Consider pulling this out into it's own method.
nw_info = instance.get_network_info()
if nw_info:
current_neutron_port = current_neutron_port_map.get(
refresh_vif_id)
if current_neutron_port:
# Get the network for the port.
networks = self._get_available_networks(
context, instance.project_id,
[current_neutron_port['network_id']], client)
# Build the VIF model given the latest port information.
refreshed_vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
'info cache for port %s.',
refresh_vif_id, instance=instance)
break
else:
# If it wasn't in the existing cache, add it.
nw_info.append(refreshed_vif)
LOG.debug('Added VIF to instance network info cache '
'for port %s.', refresh_vif_id,
instance=instance)
else:
# This port is no longer associated with the instance, so
# simply remove it from the nw_info cache.
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in '
'Neutron. Removing from network '
'info_cache.', refresh_vif_id,
instance=instance)
del nw_info[index]
break
return nw_info
# else there is no existing cache and we need to build it
# Determine if we're doing a full refresh (_heal_instance_info_cache)
# or if we are refreshing because we have attached/detached a port.
# TODO(mriedem); we should leverage refresh_vif_id in the latter case
# since we are unnecessarily rebuilding the entire cache for one port
nw_info_refresh = networks is None and port_ids is None
if nw_info_refresh and force_refresh:
# Use the current set of ports from neutron rather than the cache.
port_ids = self._get_ordered_port_list(context, instance,
current_neutron_ports)
net_ids = [
current_neutron_port_map.get(port_id, {}).get('network_id')
for port_id in port_ids]
# This is copied from _gather_port_ids_and_networks.
networks = self._get_available_networks(
context, instance.project_id, net_ids, client)
else:
# We are refreshing the full cache using the existing cache rather
# than what is currently in neutron.
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in Neutron. '
'Removing from network info_cache.', port_id,
instance=instance)
return nw_info
def _get_ordered_port_list(self, context, instance, current_neutron_ports):
"""Returns ordered port list using nova virtual_interface data."""
# a dict, keyed by port UUID, of the port's "index"
# so that we can order the returned port UUIDs by the
# original insertion order followed by any newly-attached
# ports
port_uuid_to_index_map = {}
port_order_list = []
ports_without_order = []
# Get set of ports from nova vifs
vifs = self.get_vifs_by_instance(context, instance)
for port in current_neutron_ports:
# NOTE(mjozefcz): For each port check if we have its index from
# nova virtual_interfaces objects. If not - it seems
# to be a new port - add it at the end of list.
# Find port index if it was attached before.
for vif in vifs:
if vif.uuid == port['id']:
port_uuid_to_index_map[port['id']] = vif.id
break
if port['id'] not in port_uuid_to_index_map:
# Assume that it's new port and add it to the end of port list.
ports_without_order.append(port['id'])
# Lets sort created port order_list by given index.
port_order_list = sorted(port_uuid_to_index_map,
key=lambda k: port_uuid_to_index_map[k])
# Add ports without order to the end of list
port_order_list.extend(ports_without_order)
return port_order_list
def _get_subnets_from_port(self, context, port, client=None):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
if not client:
client = get_client(context)
search_opts = {'id': list(set(ip['subnet_id'] for ip in fixed_ips))}
data = client.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
if subnet.get('ipv6_address_mode'):
subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode']
# attempt to populate DHCP server field
dhcp_search_opts = {
'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = client.list_ports(**dhcp_search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
# NOTE(arnaudmorin): If enable_dhcp is set on subnet, but, for
# some reason neutron did not have any DHCP port yet, we still
# want the network_info to be populated with a valid dhcp_server
# value. This is mostly useful for the metadata API (which is
# relying on this value to give network_data to the instance).
#
# This will also help some providers which are using external
# DHCP servers not handled by neutron.
# In this case, neutron will never create any DHCP port in the
# subnet.
#
# Also note that we cannot set the value to None because then the
# value would be discarded by the metadata API.
# So the subnet gateway will be used as fallback.
if subnet.get('enable_dhcp') and 'dhcp_server' not in subnet_dict:
subnet_dict['dhcp_server'] = subnet['gateway_ip']
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
for route in subnet.get('host_routes', []):
subnet_object.add_route(
network_model.Route(cidr=route['destination'],
gateway=network_model.IP(
address=route['nexthop'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def setup_instance_network_on_host(
self, context, instance, host, migration=None,
provider_mappings=None):
"""Setup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be setup for instance.
:param migration: The migration object if the instance is being
tracked with a migration.
:param provider_mappings: a dict of lists of resource provider uuids
keyed by port uuid
"""
self._update_port_binding_for_instance(
context, instance, host, migration, provider_mappings)
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host.
Port bindings for the given host are deleted. The ports associated
with the instance via the port device_id field are left intact.
:param context: The user request context.
:param instance: Instance object with the associated ports
:param host: host from which to delete port bindings
:raises: PortBindingDeletionFailed if port binding deletion fails.
"""
# First check to see if the port binding extension is supported.
client = get_client(context)
if not self.has_port_binding_extension(client=client):
LOG.info("Neutron extension '%s' is not supported; not cleaning "
"up port bindings for host %s.",
constants.PORT_BINDING_EXTENDED, host, instance=instance)
return
# Now get the ports associated with the instance. We go directly to
# neutron rather than rely on the info cache just like
# setup_networks_on_host.
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id,
'fields': ['id']} # we only need the port id
data = self.list_ports(context, **search_opts)
self._delete_port_bindings(context, data['ports'], host)
def _get_pci_mapping_for_migration(self, instance, migration):
if not instance.migration_context:
return {}
# In case of revert, swap old and new devices to
# update the ports back to the original devices.
revert = (migration and
migration.get('status') == 'reverted')
return instance.migration_context.get_pci_mapping_for_migration(revert)
def _get_port_pci_slot(self, context, instance, port):
"""Find the PCI address of the device corresponding to the port.
Assumes the port is an SRIOV one.
:param context: The request context.
:param instance: The instance to which the port is attached.
:param port: The Neutron port, as obtained from the Neutron API
JSON form.
:return: The PCI address as a string, or None if unable to find.
"""
# Find the port's PCIRequest, or return None
for r in instance.pci_requests.requests:
if r.requester_id == port['id']:
request = r
break
else:
LOG.debug('No PCI request found for port %s', port['id'],
instance=instance)
return None
# Find the request's device, or return None
for d in instance.pci_devices:
if d.request_id == request.request_id:
device = d
break
else:
LOG.debug('No PCI device found for request %s',
request.request_id, instance=instance)
return None
# Return the device's PCI address
return device.address
def _update_port_binding_for_instance(
self, context, instance, host, migration=None,
provider_mappings=None):
neutron = get_client(context, admin=True)
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id}
data = neutron.list_ports(**search_opts)
port_updates = []
ports = data['ports']
FAILED_VIF_TYPES = (network_model.VIF_TYPE_UNBOUND,
network_model.VIF_TYPE_BINDING_FAILED)
for p in ports:
updates = {}
binding_profile = get_binding_profile(p)
# We need to update the port binding if the host has changed or if
# the binding is clearly wrong due to previous lost messages.
vif_type = p.get('binding:vif_type')
if (p.get(constants.BINDING_HOST_ID) != host or
vif_type in FAILED_VIF_TYPES):
updates[constants.BINDING_HOST_ID] = host
# If the host changed, the AZ could have also changed so we
# need to update the device_owner.
updates['device_owner'] = (
'compute:%s' % instance.availability_zone)
# NOTE: Before updating the port binding make sure we
# remove the pre-migration status from the binding profile
if binding_profile.get(constants.MIGRATING_ATTR):
del binding_profile[constants.MIGRATING_ATTR]
updates[constants.BINDING_PROFILE] = binding_profile
# Update port with newly allocated PCI devices. Even if the
# resize is happening on the same host, a new PCI device can be
# allocated. Note that this only needs to happen if a migration
# is in progress such as in a resize / migrate. It is possible
# that this function is called without a migration object, such
# as in an unshelve operation.
vnic_type = p.get('binding:vnic_type')
if vnic_type in network_model.VNIC_TYPES_SRIOV:
# NOTE(artom) For migrations, update the binding profile from
# the migration object...
if migration is not None:
# NOTE(artom) ... except for live migrations, because the
# conductor has already done that whe calling
# bind_ports_to_host().
if not migration.is_live_migration:
pci_mapping = self._get_pci_mapping_for_migration(
instance, migration)
pci_slot = binding_profile.get('pci_slot')
new_dev = pci_mapping.get(pci_slot)
if new_dev:
binding_profile.update(
self._get_pci_device_profile(new_dev))
updates[
constants.BINDING_PROFILE] = binding_profile
else:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
# NOTE(artom) If migration is None, this is an unshevle, and we
# need to figure out the pci_slot from the InstancePCIRequest
# and PciDevice objects.
else:
pci_slot = self._get_port_pci_slot(context, instance, p)
if pci_slot:
binding_profile.update({'pci_slot': pci_slot})
updates[constants.BINDING_PROFILE] = binding_profile
# NOTE(gibi): during live migration the conductor already sets the
# allocation key in the port binding. However during resize, cold
# migrate, evacuate and unshelve we have to set the binding here.
# Also note that during unshelve no migration object is created.
if self._has_resource_request(context, p, neutron) and (
migration is None or not migration.is_live_migration
):
if not provider_mappings:
# TODO(gibi): Remove this check when compute RPC API is
# bumped to 6.0
# NOTE(gibi): This should not happen as the API level
# minimum compute service version check ensures that the
# compute services already send the RequestSpec during
# the move operations between the source and the
# destination and the dest compute calculates the
# mapping based on that.
LOG.warning(
"Provider mappings are not available to the compute "
"service but are required for ports with a resource "
"request. If compute RPC API versions are pinned for "
"a rolling upgrade, you will need to retry this "
"operation once the RPC version is unpinned and the "
"nova-compute services are all upgraded.",
instance=instance)
raise exception.PortUpdateFailed(
port_id=p['id'],
reason=_(
"Provider mappings are not available to the "
"compute service but are required for ports with "
"a resource request."))
binding_profile[constants.ALLOCATION] = (
self._get_binding_profile_allocation(
context, p, neutron, provider_mappings))
updates[constants.BINDING_PROFILE] = binding_profile
port_updates.append((p['id'], updates))
# Avoid rolling back updates if we catch an error above.
# TODO(lbeliveau): Batch up the port updates in one neutron call.
for port_id, updates in port_updates:
if updates:
LOG.info("Updating port %(port)s with "
"attributes %(attributes)s",
{"port": port_id, "attributes": updates},
instance=instance)
try:
neutron.update_port(port_id, {'port': updates})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Unable to update binding details "
"for port %s",
port_id, instance=instance)
def update_instance_vnic_index(self, context, instance, vif, index):
"""Update instance vnic index.
When the 'VNIC index' extension is supported this method will update
the vnic index of the instance on the port. An instance may have more
than one vnic.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vif: The VIF in question.
:param index: The index on the instance for the VIF.
"""
neutron = get_client(context)
if self.has_vnic_index_extension(client=neutron):
port_req_body = {'port': {'vnic_index': index}}
try:
neutron.update_port(vif['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Unable to update instance VNIC index '
'for port %s.',
vif['id'], instance=instance)
def get_segment_ids_for_network(
self,
context: nova.context.RequestContext,
network_id: str,
) -> ty.List[str]:
"""Query the segmentation ids for the given network.
:param context: The request context.
:param network_id: The UUID of the network to be queried.
:returns: The list of segment UUIDs of the network or an empty list if
either Segment extension isn't enabled in Neutron or if the network
isn't configured for routing.
"""
client = get_client(context)
if not self.has_segment_extension(client=client):
return []
try:
# NOTE(sbauza): We can't use list_segments() directly because the
# API is borked and returns both segments but also segmentation IDs
# of a provider network if any.
subnets = client.list_subnets(network_id=network_id,
fields='segment_id')['subnets']
except neutron_client_exc.NeutronClientException as e:
raise exception.InvalidRoutedNetworkConfiguration(
'Failed to get segment IDs for network %s' % network_id) from e
# The segment field of an unconfigured subnet could be None
return [subnet['segment_id'] for subnet in subnets
if subnet['segment_id'] is not None]
def get_segment_id_for_subnet(
self,
context: nova.context.RequestContext,
subnet_id: str,
) -> ty.Optional[str]:
"""Query the segmentation id for the given subnet.
:param context: The request context.
:param subnet_id: The UUID of the subnet to be queried.
:returns: The segment UUID of the subnet or None if either Segment
extension isn't enabled in Neutron or the provided subnet doesn't
have segments (if the related network isn't configured for routing)
"""
client = get_client(context)
if not self.has_segment_extension(client=client):
return None
try:
subnet = client.show_subnet(subnet_id)['subnet']
except neutron_client_exc.NeutronClientException as e:
raise exception.InvalidRoutedNetworkConfiguration(
'Subnet %s not found' % subnet_id) from e
return subnet.get('segment_id')
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
|
py | b40c71ed0a4ab0b122f61556dae6f792302c5678 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import include
from django.views.generic import TemplateView, RedirectView
urlpatterns = [
# Administration
path('admin/', admin.site.urls),
# Accounts
path('account/', include('account.urls', namespace='account')),
# Oauth2
path('api/v1/o/', include('oauth.urls', namespace='oauth2_provider')),
# General purpose
path('welcome/', TemplateView.as_view(template_name="welcome.html")),
path('', RedirectView.as_view(url="/welcome/")),
re_path(r'^$', RedirectView.as_view(url="/welcome/")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
py | b40c7385c020769adaad02dc6f85f4822dde3ead | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-07 13:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('room_booking', '0003_auto_20180107_1327'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='amount_people',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='booking',
name='price',
field=models.FloatField(blank=True, help_text='Total booking price'),
),
]
|
py | b40c747a40e586e1a145e9f5ed8d2af216451372 | # -*- coding: utf-8 -*-
"""Family module for Wikiversity."""
#
# (C) Pywikibot team, 2007-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import family
__version__ = '$Id: 2b3760d39154c5d5b90f6173ce398a2eca479a6c $'
# The Wikimedia family that is known as Wikiversity
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikiversity."""
name = 'wikiversity'
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'de', 'en', 'fr', 'ru', 'cs', 'it', 'beta', 'pt', 'es', 'ar', 'sv',
'fi', 'sl', 'el', 'hi', 'ja', 'ko',
]
super(Family, self).__init__()
self.category_redirect_templates = {
'_default': (),
'ar': ('قالب:تحويل تصنيف',),
'en': ('Category redirect',),
}
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
# & https://meta.wikimedia.org/wiki/Special:WikiSets/2
self.cross_allowed = ['ja', 'ko', ]
|
py | b40c74b06318765422dc8190a38142f019e6229f | from torch.autograd import Function
import torch
from torch import nn
#from torch.autograd import Variable
from torch import einsum
import numpy as np
class DiceCoeff(Function):
"""Dice coeff for individual examples"""
def forward(self, input, target):
self.save_for_backward(input, target)
eps = 0.0001
self.inter = torch.dot(input.view(-1), target.view(-1))
self.union = torch.sum(input) + torch.sum(target) + eps
t = (2 * self.inter.float() + eps) / self.union.float()
return t
# This function has only a single output, so it gets only one gradient
def backward(self, grad_output):
input, target = self.saved_variables
grad_input = grad_target = None
if self.needs_input_grad[0]:
grad_input = grad_output * 2 * (target * self.union - self.inter) \
/ (self.union * self.union)
if self.needs_input_grad[1]:
grad_target = None
return grad_input, grad_target
def dice_coeff(input, target):
"""Dice coeff for batches"""
if input.is_cuda:
s = torch.FloatTensor(1).cuda().zero_()
else:
s = torch.FloatTensor(1).zero_()
score=[]
for i, c in enumerate(zip(input, target)):
#s = s + DiceCoeff().forward(c[0], c[1])
score.append(DiceCoeff().forward(c[0], c[1]).item())
return [np.mean(score),np.std(score,ddof=1)] #s / (i + 1)
def sum_tensor(inp, axes, keepdim=False):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/tensor_utilities.py
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
class IoULoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22
"""
super(IoULoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
iou = (tp + self.smooth) / (tp + fp + fn + self.smooth)
if not self.do_bg:
if self.batch_dice:
iou = iou[1:]
else:
iou = iou[:, 1:]
iou = iou.mean()
return iou
class F1(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22
"""
super(F1, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
precision=tp/(tp+fp)
recall=tp/(tp+fn)
F1=2*precision*recall/(precision+recall)
if not self.do_bg:
if self.batch_dice:
F1 = F1[1:]
else:
F1 = F1[:, 1:]
F1 = F1.mean()
return F1
|
py | b40c74e08aeb6391c6005aebbea56bd6e3e59e16 | import sys
n, r = map(int, sys.stdin.readline().split())
def main():
i = r + 100 * (10 - n)
print(i)
if __name__ == '__main__':
main()
|
py | b40c74ea0b5de4a680b57f2c3e3416b36d762e27 | #!/usr/bin/env python
#!C:\ProgramData\Anaconda3\python.exe
####################################################################################################################################
####################################################################################################################################
####
#### MIT License
####
#### ParaMonte: plain powerful parallel Monte Carlo library.
####
#### Copyright (C) 2012-present, The Computational Data Science Lab
####
#### This file is part of the ParaMonte library.
####
#### Permission is hereby granted, free of charge, to any person obtaining a
#### copy of this software and associated documentation files (the "Software"),
#### to deal in the Software without restriction, including without limitation
#### the rights to use, copy, modify, merge, publish, distribute, sublicense,
#### and/or sell copies of the Software, and to permit persons to whom the
#### Software is furnished to do so, subject to the following conditions:
####
#### The above copyright notice and this permission notice shall be
#### included in all copies or substantial portions of the Software.
####
#### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#### IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#### DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#### OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#### OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
####
#### ACKNOWLEDGMENT
####
#### ParaMonte is an honor-ware and its currency is acknowledgment and citations.
#### As per the ParaMonte library license agreement terms, if you use any parts of
#### this library for any purposes, kindly acknowledge the use of ParaMonte in your
#### work (education/research/industry/development/...) by citing the ParaMonte
#### library as described on this page:
####
#### https://github.com/cdslaborg/paramonte/blob/master/ACKNOWLEDGMENT.md
####
####################################################################################################################################
####################################################################################################################################
import os
import numpy as np
import paramonte as pm
import importlib
importlib.reload(pm)
from paramonte.mvn import NDIM, getLogFunc
pd = pm.ParaDRAM()
pd.runSampler( ndim = NDIM
, getLogFunc = getLogFunc
, inputFilePath = os.path.dirname(os.path.abspath(__file__)) + "/input/paramonte.nml"
, mpiEnabled = False
)
|
py | b40c74eff21706007e343f498d825b85886a994f | import logging
import os
import sys
from .conversation import Statement, Response
from . import utils
class Trainer(object):
"""
Base class for all other trainer classes.
"""
def __init__(self, storage, **kwargs):
self.chatbot = kwargs.get('chatbot')
self.storage = storage
self.logger = logging.getLogger(__name__)
self.show_training_progress = kwargs.get('show_training_progress', True)
def get_preprocessed_statement(self, input_statement):
"""
Preprocess the input statement.
"""
# The chatbot is optional to prevent backwards-incompatible changes
if not self.chatbot:
return input_statement
for preprocessor in self.chatbot.preprocessors:
input_statement = preprocessor(self, input_statement)
return input_statement
def train(self, *args, **kwargs):
"""
This method must be overridden by a child class.
"""
raise self.TrainerInitializationException()
def get_or_create(self, statement_text):
"""
Return a statement if it exists.
Create and return the statement if it does not exist.
"""
temp_statement = self.get_preprocessed_statement(
Statement(text=statement_text)
)
statement = self.storage.find(temp_statement.text)
if not statement:
statement = Statement(temp_statement.text)
return statement
def _generate_export_data(self):
result = []
for statement in self.storage.filter():
for response in statement.in_response_to:
result.append([response.text, statement.text])
return result
def export_for_training(self, file_path='./export.json'):
"""
Create a file from the database that can be used to
train other chat bots.
"""
import json
export = {'conversations': self._generate_export_data()}
with open(file_path, 'w+') as jsonfile:
json.dump(export, jsonfile, ensure_ascii=False)
class ListTrainer(Trainer):
"""
Allows a chat bot to be trained using a list of strings
where the list represents a conversation.
"""
def train(self, conversation):
"""
Train the chat bot based on the provided list of
statements that represents a single conversation.
"""
previous_statement_text = None
for conversation_count, text in enumerate(conversation):
if self.show_training_progress:
utils.print_progress_bar(
'List Trainer',
conversation_count + 1, len(conversation)
)
statement = self.get_or_create(text)
if previous_statement_text:
statement.add_response(
Response(previous_statement_text)
)
previous_statement_text = statement.text
self.storage.update(statement)
class TwitterTrainer(Trainer):
"""
Allows the chat bot to be trained using data
gathered from Twitter.
:param random_seed_word: The seed word to be used to get random tweets from the Twitter API.
This parameter is optional. By default it is the word 'random'.
:param twitter_lang: Language for results as ISO 639-1 code.
This parameter is optional. Default is None (all languages).
"""
def __init__(self, storage, **kwargs):
super(TwitterTrainer, self).__init__(storage, **kwargs)
from twitter import Api as TwitterApi
# The word to be used as the first search term when searching for tweets
self.random_seed_word = kwargs.get('random_seed_word', 'random')
self.lang = kwargs.get('twitter_lang')
self.api = TwitterApi(
consumer_key=kwargs.get('twitter_consumer_key'),
consumer_secret=kwargs.get('twitter_consumer_secret'),
access_token_key=kwargs.get('twitter_access_token_key'),
access_token_secret=kwargs.get('twitter_access_token_secret')
)
def random_word(self, base_word, lang=None):
"""
Generate a random word using the Twitter API.
Search twitter for recent tweets containing the term 'random'.
Then randomly select one word from those tweets and do another
search with that word. Return a randomly selected word from the
new set of results.
"""
import random
random_tweets = self.api.GetSearch(term=base_word, count=5, lang=lang)
random_words = self.get_words_from_tweets(random_tweets)
random_word = random.choice(list(random_words))
tweets = self.api.GetSearch(term=random_word, count=5, lang=lang)
words = self.get_words_from_tweets(tweets)
word = random.choice(list(words))
return word
def get_words_from_tweets(self, tweets):
"""
Given a list of tweets, return the set of
words from the tweets.
"""
words = set()
for tweet in tweets:
tweet_words = tweet.text.split()
for word in tweet_words:
# If the word contains only letters with a length from 4 to 9
if word.isalpha() and len(word) > 3 and len(word) <= 9:
words.add(word)
return words
def get_statements(self):
"""
Returns list of random statements from the API.
"""
from twitter import TwitterError
statements = []
# Generate a random word
random_word = self.random_word(self.random_seed_word, self.lang)
self.logger.info(u'Requesting 50 random tweets containing the word {}'.format(random_word))
tweets = self.api.GetSearch(term=random_word, count=50, lang=self.lang)
for tweet in tweets:
statement = Statement(tweet.text)
if tweet.in_reply_to_status_id:
try:
status = self.api.GetStatus(tweet.in_reply_to_status_id)
statement.add_response(Response(status.text))
statements.append(statement)
except TwitterError as error:
self.logger.warning(str(error))
self.logger.info('Adding {} tweets with responses'.format(len(statements)))
return statements
def train(self):
for _ in range(0, 10):
statements = self.get_statements()
for statement in statements:
self.storage.update(statement)
class UbuntuCorpusTrainer(Trainer):
"""
Allow chatbots to be trained with the data from
the Ubuntu Dialog Corpus.
"""
def __init__(self, storage, **kwargs):
super(UbuntuCorpusTrainer, self).__init__(storage, **kwargs)
self.data_download_url = kwargs.get(
'ubuntu_corpus_data_download_url',
'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'
)
self.data_directory = kwargs.get(
'ubuntu_corpus_data_directory',
'./data/'
)
self.extracted_data_directory = os.path.join(
self.data_directory, 'ubuntu_dialogs'
)
# Create the data directory if it does not already exist
if not os.path.exists(self.data_directory):
os.makedirs(self.data_directory)
def is_downloaded(self, file_path):
"""
Check if the data file is already downloaded.
"""
if os.path.exists(file_path):
self.logger.info('File is already downloaded')
return True
return False
def is_extracted(self, file_path):
"""
Check if the data file is already extracted.
"""
if os.path.isdir(file_path):
self.logger.info('File is already extracted')
return True
return False
def download(self, url, show_status=True):
"""
Download a file from the given url.
Show a progress indicator for the download status.
Based on: http://stackoverflow.com/a/15645088/1547223
"""
import requests
file_name = url.split('/')[-1]
file_path = os.path.join(self.data_directory, file_name)
# Do not download the data if it already exists
if self.is_downloaded(file_path):
return file_path
with open(file_path, 'wb') as open_file:
print('Downloading %s' % url)
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
# No content length header
open_file.write(response.content)
else:
download = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
download += len(data)
open_file.write(data)
if show_status:
done = int(50 * download / total_length)
sys.stdout.write('\r[%s%s]' % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
# Add a new line after the download bar
sys.stdout.write('\n')
print('Download location: %s' % file_path)
return file_path
def extract(self, file_path):
"""
Extract a tar file at the specified file path.
"""
import tarfile
print('Extracting {}'.format(file_path))
if not os.path.exists(self.extracted_data_directory):
os.makedirs(self.extracted_data_directory)
def track_progress(members):
sys.stdout.write('.')
for member in members:
# This will be the current file being extracted
yield member
with tarfile.open(file_path) as tar:
tar.extractall(path=self.extracted_data_directory, members=track_progress(tar))
self.logger.info('File extracted to {}'.format(self.extracted_data_directory))
return True
def train(self):
import glob
import csv
# Download and extract the Ubuntu dialog corpus if needed
corpus_download_path = self.download(self.data_download_url)
# Extract if the directory doesn not already exists
if not self.is_extracted(self.extracted_data_directory):
self.extract(corpus_download_path)
extracted_corpus_path = os.path.join(
self.extracted_data_directory,
'**', '**', '*.tsv'
)
file_kwargs = {}
if sys.version_info[0] > 2:
# Specify the encoding in Python versions 3 and up
file_kwargs['encoding'] = 'utf-8'
# WARNING: This might fail to read a unicode corpus file in Python 2.x
for file in glob.iglob(extracted_corpus_path):
self.logger.info('Training from: {}'.format(file))
with open(file, 'r', **file_kwargs) as tsv:
reader = csv.reader(tsv, delimiter='\t')
previous_statement_text = None
for row in reader:
if len(row) > 0:
text = row[3]
statement = self.get_or_create(text)
print(text, len(row))
statement.add_extra_data('datetime', row[0])
statement.add_extra_data('speaker', row[1])
if row[2].strip():
statement.add_extra_data('addressing_speaker', row[2])
if previous_statement_text:
statement.add_response(
Response(previous_statement_text)
)
previous_statement_text = statement.text
self.storage.update(statement)
|
py | b40c756ba39e79fa67dc72243a4b54c2121b2d3b | import pytest
import matplotlib
matplotlib.use('AGG') # use a non-interactive backend
from matplotlib import pyplot as plt
from lifetimes import plotting
from lifetimes import BetaGeoFitter, ParetoNBDFitter, ModifiedBetaGeoFitter
from lifetimes.datasets import (load_cdnow_summary, load_transaction_data,
load_dataset)
from lifetimes import utils
TOLERANCE_VALUE = 20
bgf = BetaGeoFitter()
cd_data = load_cdnow_summary()
bgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
@pytest.mark.plottest
class TestPlotting():
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_period_transactions(self):
plt.figure()
plotting.plot_period_transactions(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_period_transactions_parento(self):
pnbd = ParetoNBDFitter()
pnbd.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(pnbd)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_period_transactions_mbgf(self):
mbgf = ModifiedBetaGeoFitter()
mbgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(mbgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_period_transactions_max_frequency(self):
plt.figure()
plotting.plot_period_transactions(bgf, max_frequency=12)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_period_transactions_labels(self):
plt.figure()
plotting.plot_period_transactions(bgf, label=['A', 'B'])
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_frequency_recency_matrix(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_frequency_recency_matrix_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_frequency_recency_matrix_max_frequency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_frequency_recency_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_probability_alive_matrix(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_probability_alive_matrix_max_frequency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_probability_alive_matrix_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_probability_alive_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_expected_repeat_purchases(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_expected_repeat_purchases_with_label(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf, label='test label')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_transaction_rate_heterogeneity(self):
"""Test transactions rate heterogeneity."""
plt.figure()
plotting.plot_transaction_rate_heterogeneity(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_dropout_rate_heterogeneity(self):
"""Test dropout rate heterogeneity."""
plt.figure()
plotting.plot_dropout_rate_heterogeneity(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_customer_alive_history(self):
plt.figure()
transaction_data = load_transaction_data()
# yes I know this is using the wrong data, but I'm testing plotting here.
id_user = 35
days_since_birth = 200
sp_trans = transaction_data.loc[transaction_data['id'] == id_user]
plotting.plot_history_alive(bgf, days_since_birth, sp_trans, 'date')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_calibration_purchases_vs_holdout_purchases(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_calibration_purchases_vs_holdout_purchases_time_since_last_purchase(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary, kind='time_since_last_purchase')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_cumulative_transactions(self):
"""Test plotting cumultative transactions with CDNOW example."""
transactions = load_dataset('CDNOW_sample.txt', header=None, sep='\s+')
transactions.columns = ['id_total', 'id_sample', 'date', 'num_cd_purc',
'total_value']
t = 39
freq = 'W'
transactions_summary = utils.summary_data_from_transaction_data(
transactions, 'id_sample', 'date', datetime_format='%Y%m%d',
observation_period_end='19970930', freq=freq)
bgf = BetaGeoFitter(penalizer_coef=0.01)
bgf.fit(transactions_summary['frequency'],
transactions_summary['recency'], transactions_summary['T'])
plt.figure()
plotting.plot_cumulative_transactions(
bgf, transactions, 'date', 'id_sample', 2 * t, t, freq=freq,
xlabel='week', datetime_format='%Y%m%d')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=TOLERANCE_VALUE, style='default')
def test_plot_incremental_transactions(self):
"""Test plotting incremental transactions with CDNOW example."""
transactions = load_dataset('CDNOW_sample.txt', header=None, sep='\s+')
transactions.columns = ['id_total', 'id_sample', 'date', 'num_cd_purc',
'total_value']
t = 39
freq = 'W'
transactions_summary = utils.summary_data_from_transaction_data(
transactions, 'id_sample', 'date', datetime_format='%Y%m%d',
observation_period_end='19970930', freq=freq)
bgf = BetaGeoFitter(penalizer_coef=0.01)
bgf.fit(transactions_summary['frequency'],
transactions_summary['recency'], transactions_summary['T'])
plt.figure()
plotting.plot_incremental_transactions(
bgf, transactions, 'date', 'id_sample', 2 * t, t, freq=freq,
xlabel='week', datetime_format='%Y%m%d')
return plt.gcf()
|
py | b40c7576142a88ed7085f9825c58f0f2b84a538f | """Parser for HA BLE (DIY sensors) advertisements"""
import logging
import struct
_LOGGER = logging.getLogger(__name__)
def to_int(value):
"""Convert to integer"""
return value & 0xFF
def unsigned_to_signed(unsigned, size):
"""Convert unsigned to signed"""
if (unsigned & (1 << size - 1)) != 0:
unsigned = -1 * ((1 << size - 1) - (unsigned & ((1 << size - 1) - 1)))
return unsigned
def to_sfloat(value):
"""Convert sfloat to integer"""
if len(value) != 2:
_LOGGER.debug("conversion to sfloat failed")
return 0
else:
byte_0 = value[0]
byte_1 = value[1]
mantissa = unsigned_to_signed(to_int(byte_0) + ((to_int(byte_1) & 0x0F) << 8), 12)
exponent = unsigned_to_signed(to_int(byte_1) >> 4, 4)
return mantissa * pow(10, exponent)
def parse_ha_ble(self, service_data_list, source_mac, rssi):
"""Home Assistant BLE parser"""
firmware = "HA BLE"
device_type = "HA BLE DIY"
ha_ble_mac = source_mac
result = {}
packet_id = None
for service_data in service_data_list:
if len(service_data) == service_data[0] + 1:
meas_type = (service_data[3] << 8) | service_data[2]
xobj = service_data[4:]
if meas_type == 0x2A4D and len(xobj) == 1:
(packet_id,) = struct.Struct("<B").unpack(xobj)
result.update({"packet": packet_id})
elif meas_type == 0x2A19 and len(xobj) == 1:
(batt,) = struct.Struct("<B").unpack(xobj)
result.update({"battery": batt})
elif meas_type == 0x2A6D and len(xobj) == 4:
(press,) = struct.Struct("<I").unpack(xobj)
result.update({"pressure": press * 0.001})
elif meas_type == 0x2A6E and len(xobj) == 2:
(temp,) = struct.Struct("<h").unpack(xobj)
result.update({"temperature": temp * 0.01})
elif meas_type == 0x2A6F and len(xobj) == 2:
(humi,) = struct.Struct("<H").unpack(xobj)
result.update({"humidity": humi * 0.01})
elif meas_type == 0x2A7B and len(xobj) == 1:
(dewp,) = struct.Struct("<b").unpack(xobj)
result.update({"dewpoint": dewp})
elif meas_type == 0x2A98 and len(xobj) == 3:
(flag, weight) = struct.Struct("<bH").unpack(xobj)
if flag << 0 == 0:
weight_unit = "kg"
factor = 0.005
elif flag << 0 == 1:
weight_unit = "lbs"
factor = 0.01
else:
weight_unit = "kg"
factor = 0.005
result.update({"weight": weight * factor, "weight unit": weight_unit})
elif meas_type == 0X2AE2 and len(xobj) == 1:
(value,) = struct.Struct("<B").unpack(xobj)
result.update({"binary": bool(value)})
elif meas_type == 0X2AF2 and len(xobj) == 4:
(enrg,) = struct.Struct("<I").unpack(xobj)
result.update({"energy": enrg * 0.001})
elif meas_type == 0X2AFB and len(xobj) == 3:
illu = int.from_bytes(xobj, "little")
result.update({"illuminance": illu * 0.01})
elif meas_type == 0x2B05 and len(xobj) == 3:
power = int.from_bytes(xobj, "little")
result.update({"power": power * 0.1})
elif meas_type == 0x2B18 and len(xobj) == 2:
(volt,) = struct.Struct("<H").unpack(xobj)
result.update({"voltage": volt / 64})
elif meas_type == 0x2BD6 and len(xobj) == 2:
pm25 = to_sfloat(xobj)
result.update({"pm2.5": pm25})
elif meas_type == 0x2BD7 and len(xobj) == 2:
pm10 = to_sfloat(xobj)
result.update({"pm10": pm10})
else:
_LOGGER.debug(
"Unknown data received from Home Assistant BLE DIY sensor device: %s",
service_data.hex()
)
if not result:
if self.report_unknown == "HA BLE":
_LOGGER.info(
"BLE ADV from UNKNOWN Home Assistant BLE DEVICE: RSSI: %s, MAC: %s, ADV: %s",
rssi,
to_mac(source_mac),
service_data_list
)
return None
# Check for duplicate messages
if packet_id:
try:
prev_packet = self.lpacket_ids[ha_ble_mac]
except KeyError:
# start with empty first packet
prev_packet = None
if prev_packet == packet_id:
# only process new messages
if self.filter_duplicates is True:
return None
self.lpacket_ids[ha_ble_mac] = packet_id
else:
result.update({"packet": "no packet id"})
# check for MAC presence in sensor whitelist, if needed
if self.discovery is False and ha_ble_mac not in self.sensor_whitelist:
_LOGGER.debug("Discovery is disabled. MAC: %s is not whitelisted!", to_mac(ha_ble_mac))
return None
result.update({
"rssi": rssi,
"mac": ''.join(f'{i:02X}' for i in ha_ble_mac),
"type": device_type,
"firmware": firmware,
"data": True
})
return result
def to_mac(addr: int):
"""Return formatted MAC address"""
return ':'.join(f'{i:02X}' for i in addr)
|
py | b40c768f5c865403ab3144962f0f500f6a2bf6bf | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from collections import OrderedDict, namedtuple
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, Expression, F, Value, When
from django.db.models.fields import AutoField
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, InvalidQuery, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.utils import NotSupportedError
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(field, related_objs, [
operator.attrgetter(
field.attname
if from_field == 'self' else
queryset.model._meta.get_field(from_field).attname
)
for from_field in field.from_fields
]) for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getters in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = tuple([rel_getter(obj) for rel_getter in rel_getters])
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
)
return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
@staticmethod
@lru_cache()
def create_namedtuple_class(*names):
# Cache namedtuple() with @lru_cache() since it's too slow to be
# called for every QuerySet evaluation.
return namedtuple('Row', names)
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [*query.extra_select, *query.values_select, *query.annotation_select]
tuple_class = self.create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()}
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "…(remaining elements truncated)…"
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk'))
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values('pk'))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)
def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate')
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
if connection.features.can_return_ids_from_bulk_insert and not ignore_conflicts:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if not all(obj.pk for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connections[self.db].features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not isinstance(attr, Expression):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
self.filter(pk__in=pks).update(**update_kwargs)
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
return self._create_object_from_params(kwargs, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**kwargs)
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Lock the row so that a concurrent update is blocked until
# after update_or_create() has performed its save.
obj, created = self._create_object_from_params(kwargs, params, lock=True)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params, lock=False):
"""
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError as e:
try:
qs = self.select_for_update() if lock else self
return qs.get(**lookup), False
except self.model.DoesNotExist:
pass
raise e
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create() and update_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return params
def _earliest_or_latest(self, *fields, field_name=None):
"""
Return the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
if fields and field_name is not None:
raise ValueError('Cannot use both positional arguments and the field_name keyword argument.')
if field_name is not None:
warnings.warn(
'The field_name keyword argument to earliest() and latest() '
'is deprecated in favor of passing positional arguments.',
RemovedInDjango30Warning,
)
order_by = (field_name,)
elif fields:
order_by = fields
else:
order_by = getattr(self.model._meta, 'get_latest_by')
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields, field_name=None):
return self._earliest_or_latest(*fields, field_name=field_name)
def latest(self, *fields, field_name=None):
return self.reverse()._earliest_or_latest(*fields, field_name=field_name)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by('pk'))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name='pk'):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if field_name != 'pk' and not self.model._meta.get_field(field_name).unique:
raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name)
if id_list is not None:
if not id_list:
return {}
filter_key = '{}__in'.format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
field_names = {f for f in fields if not hasattr(f, 'resolve_expression')}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower())
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable if named
else FlatValuesListIterable if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day'), \
"'kind' must be one of 'year', 'month', 'week', or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \
"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._chain()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=()):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError('prefetch_related() is not supported with FilteredRelation.')
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate')
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(chain.from_iterable(
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
for field in self.model._meta.get_fields()
))
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._chain()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if not self.query.can_filter():
raise TypeError('Cannot reverse a query once a slice has been taken.')
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError('only() is not supported with FilteredRelation.')
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None, ignore_conflicts=False):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts:
raise NotSupportedError('This database backend does not support ignoring conflicts.')
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
bulk_return = connections[self.db].features.can_return_ids_from_bulk_insert
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and not ignore_conflicts:
inserted_id = self._insert(
item, fields=fields, using=self.db, return_id=True,
ignore_conflicts=ignore_conflicts,
)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts)
return inserted_ids
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError('Cannot use multi-field values as a filter value.')
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression'))
if invalid_args:
raise TypeError(
'QuerySet.%s() received non-expression(s): %s.' % (
method_name,
', '.join(invalid_args),
)
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.column_name_converter
model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query, model=self.model, query=self.query, params=self.params,
translations=self.translations, using=self._db, hints=self._hints
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.chain(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and not issubclass(queryset._iterable_class, ModelIterable):
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._chain(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
return isinstance(other, Prefetch) and self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, '_prefetched_objects_cache', ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields}
model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields)
self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes]
self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list])
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info['local_setter']
self.remote_setter = klass_info['remote_setter']
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
py | b40c779f8bd1f526f74675cd7a497648ea53adb6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...tensor import arithmetic
from ...tensor.fuse import TensorCpFuseChunk
from ...utils import lazy_import, build_fuse_chunk
cp = lazy_import('cupy', globals=globals(), rename='cp')
CP_INSTALLED = cp is not None
CP_ELEMENTWISE_OP = {
arithmetic.TensorSubtract,
arithmetic.TensorMultiply,
arithmetic.TensorTrueDiv,
arithmetic.TensorSqrt
}
CP_OP = CP_ELEMENTWISE_OP
class CpRuntimeOptimizer:
def __init__(self, graph):
self._graph = graph
@classmethod
def is_available(cls):
return CP_INSTALLED
def optimize(self, keys=None):
self.compose(keys=keys)
def _compose_graph(self, composes):
graph = self._graph
composed_nodes = []
for c in composes:
head_node = c[0]
tail_node = c[-1]
composed_chunk = build_fuse_chunk(
c, TensorCpFuseChunk, op_kw={'dtype': tail_node.dtype}).data
graph.add_node(composed_chunk)
for node in graph.iter_successors(tail_node):
graph.add_edge(composed_chunk, node)
for node in graph.iter_predecessors(head_node):
graph.add_edge(node, composed_chunk)
for node in c:
graph.remove_node(node)
composed_nodes.append(composed_chunk)
return composed_nodes
def compose(self, keys=None):
composes = []
explored = set()
keys = set(keys or [])
graph = self._graph
for v in graph.topological_iter():
if type(v.op) not in CP_OP:
continue
if v in explored:
continue
if graph.count_successors(v) != 1:
continue
if v.key in keys:
continue
selected = [v]
# add successors
cur_node = graph.successors(v)[0]
while graph.count_predecessors(cur_node) == 1 \
and type(cur_node.op) in CP_OP \
and cur_node.key not in keys:
selected.append(cur_node)
if graph.count_successors(cur_node) != 1:
break
else:
cur_node = graph.successors(cur_node)[0]
if len(selected) > 1:
explored.update(selected)
composes.append(list(selected))
return self._compose_graph(composes)
|
py | b40c77c8b5387491d42091d589feaa30c5a50e4c | # @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
from stack.exception import ArgRequired
class Command(stack.commands.remove.os.command):
"""
Remove a static route for an OS type.
<arg type='string' name='os' repeat='1'>
The OS type (e.g., 'linux', 'sunos', etc.).
</arg>
<param type='string' name='address' optional='0'>
The address of the static route to remove.
</param>
<example cmd='remove os route linux address=1.2.3.4'>
Remove the static route for the OS 'linux' that has the
network address '1.2.3.4'.
</example>
"""
def run(self, params, args):
if len(args) == 0:
raise ArgRequired(self, 'os')
(address, ) = self.fillParams([ ('address', None, True) ])
for os in self.getOSNames(args):
self.db.execute(
'delete from os_routes where os=%s and network=%s',
(os, address)
)
|
py | b40c78c292c2c330905c1c75f09c1e5b2f973412 | """Sphinx configuration."""
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "radio-dreams"
copyright = "2021, Aman Chokshi"
author = "Aman Chokshi"
# The full version, including alpha/beta/rc tags
release = "0.1.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
py | b40c7913d5a8f71e87534945c2199b453fab42e4 | from flask import Flask
from project.controllers import api
def create_app():
app = Flask(__name__)
api.init_app(app)
return app
|
py | b40c797271547122e6436277ff07ce966ad3a1d5 | import os
os.system("python src/project2_feature_generator.py data/set_train train 1 278 30,140,40,180,50,140 9 9 9 50 100 1600 > features_9_50.txt")
os.system("python src/project2_feature_generator.py data/set_test test 1 138 30,140,40,180,50,140 9 9 9 50 100 1600 > features_9_50_test.txt")
os.system("python src/project2_submission.py features_9_50.txt features_9_50_test.txt > final_sub.csv")
|
py | b40c7989c226b9a1acee0a1aaaa5418f58abd111 | #!/usr/bin/python
import sys
import argparse
import pickle
from util import www2fb, clean_uri, processed_text
def get_names_for_entities(namespath, withtype=False):
print("getting names map...")
names = {}
with open(namespath, 'r') as f:
for i, line in enumerate(f):
if i % 1000000 == 0:
print("line: {}".format(i))
items = line.strip().split("\t")
if len(items) != 3:
print("ERROR: line - {}".format(line))
continue
entity = items[0]
nametype = items[1]
literal = items[2].strip()
if literal != "":
if entity not in names:
names[entity] = []
if withtype:
names[entity].append((literal, nametype))
else:
names[entity].append(literal)
return names
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Created the names index")')
parser.add_argument('-n', '--names', dest='names', action='store', required=True,
help='path to the trimmed names file')
parser.add_argument('-p', '--pickle', dest='pickle', action='store', required=True,
help='output file path for the pickle of names index')
args = parser.parse_args()
print("Names file path: {}".format(args.names))
print("Pickle output path: {}".format(args.pickle))
index_names = get_names_for_entities(args.names)
with open(args.pickle, 'wb') as f:
pickle.dump(index_names, f)
print("Created the names index.")
|
py | b40c7aafe1cccade71c4908916895507313d9bab | #!/usr/bin/python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
net_ext = Extension(
'net',
['net.pyx', 'aux.pyx', 'cfns.c'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
setup(name='net',
version='0.1',
description='Fast Neural Net Simulator',
author='Will Noble',
author_email='[email protected]',
include_dirs=[np.get_include()],
ext_modules=cythonize(net_ext)
)
|
py | b40c7ac13d7e66c352cd96ada39486d7bf7feb42 | from os import path, makedirs
FUNCTION_TEMPLATE = """def test_{}():
pass\n\n"""
FILE_TEMPLATE = "{}_test.py"
IMPORT_STRING = "from {} import *\n\n\n"
def create_templates(db, results_path):
"""
This function creates template test files for untested functions.
:param db: the database to get the functions names from
:param results_path: the directory to save the files at
"""
untested_funcs = db.get_last_untested()
output_folder = path.join(results_path, "tests_templates")
if len(untested_funcs) > 0:
if not path.exists(output_folder):
makedirs(output_folder)
files_created = {}
for name, function_name in untested_funcs:
function_name = function_name.strip()
file_name = name.removesuffix('.py')
if file_name in files_created:
files_created[file_name].append(function_name)
else:
files_created[file_name] = [function_name]
for file in files_created:
test_file_name = FILE_TEMPLATE.format(file)
with open(path.join(output_folder, test_file_name), "w+") as test_file:
test_file.write(IMPORT_STRING.format(file))
for function in files_created[file]:
test_file.write(FUNCTION_TEMPLATE.format(function)) |
py | b40c7b1cdbab9a993577d7081a8ef80f6b1af0af | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import encodeutils
import six
from six.moves import http_client
import keystone.conf
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
def _format_with_unicode_kwargs(msg_format, kwargs):
try:
return msg_format % kwargs
except UnicodeDecodeError:
try:
kwargs = {k: encodeutils.safe_decode(v)
for k, v in kwargs.items()}
except UnicodeDecodeError:
# NOTE(jamielennox): This is the complete failure case
# at least by showing the template we have some idea
# of where the error is coming from
return msg_format
return msg_format % kwargs
class Error(Exception):
"""Base error class.
Child classes should define an HTTP status code, title, and a
message_format.
"""
code = None
title = None
message_format = None
def __init__(self, message=None, **kwargs):
try:
message = self._build_message(message, **kwargs)
except KeyError:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
LOG.warning('missing exception kwargs (programmer error)')
message = self.message_format
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Build and returns an exception message.
:raises KeyError: given insufficient kwargs
"""
if message:
return message
return _format_with_unicode_kwargs(self.message_format, kwargs)
class ValidationError(Error):
message_format = _("Expecting to find %(attribute)s in %(target)s."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = int(http_client.BAD_REQUEST)
title = http_client.responses[http_client.BAD_REQUEST]
class URLValidationError(ValidationError):
message_format = _("Cannot create an endpoint with an invalid URL:"
" %(url)s.")
class PasswordValidationError(ValidationError):
message_format = _("Password validation error: %(detail)s.")
class PasswordRequirementsValidationError(PasswordValidationError):
message_format = _("The password does not match the requirements:"
" %(detail)s.")
class PasswordHistoryValidationError(PasswordValidationError):
message_format = _("The new password cannot be identical to a "
"previous password. The total number which"
"includes the new password must be unique is "
"%(unique_count)s.")
class PasswordAgeValidationError(PasswordValidationError):
message_format = _("You cannot change your password at this time due "
"to the minimum password age. Once you change your "
"password, it must be used for %(min_age_days)d day(s) "
"before it can be changed. Please try again in "
"%(days_left)d day(s) or contact your administrator to "
"reset your password.")
class PasswordSelfServiceDisabled(PasswordValidationError):
message_format = _("You cannot change your password at this time due "
"to password policy disallowing password changes. "
"Please contact your administrator to reset your "
"password.")
class SchemaValidationError(ValidationError):
# NOTE(lbragstad): For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
message_format = _("%(detail)s")
class ValidationTimeStampError(Error):
message_format = _("Timestamp not in expected format."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = int(http_client.BAD_REQUEST)
title = http_client.responses[http_client.BAD_REQUEST]
class InvalidOperatorError(ValidationError):
message_format = _("The given operator %(_op)s is not valid."
" It must be one of the following:"
" 'eq', 'neq', 'lt', 'lte', 'gt', or 'gte'.")
class ValidationExpirationError(Error):
message_format = _("The 'expires_at' must not be before now."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = int(http_client.BAD_REQUEST)
title = http_client.responses[http_client.BAD_REQUEST]
class StringLengthExceeded(ValidationError):
message_format = _("String length exceeded. The length of"
" string '%(string)s' exceeds the limit"
" of column %(type)s(CHAR(%(length)d)).")
class AmbiguityError(ValidationError):
message_format = _("There are multiple %(resource)s entities named"
" '%(name)s'. Please use ID instead of names to"
" resolve the ambiguity.")
class ApplicationCredentialValidationError(ValidationError):
message_format = _("Invalid application credential: %(detail)s")
class CircularRegionHierarchyError(Error):
message_format = _("The specified parent region %(parent_region_id)s "
"would create a circular region hierarchy.")
code = int(http_client.BAD_REQUEST)
title = http_client.responses[http_client.BAD_REQUEST]
class ForbiddenNotSecurity(Error):
"""When you want to return a 403 Forbidden response but not security.
Use this for errors where the message is always safe to present to the user
and won't give away extra information.
"""
code = int(http_client.FORBIDDEN)
title = http_client.responses[http_client.FORBIDDEN]
class PasswordVerificationError(ForbiddenNotSecurity):
message_format = _("The password length must be less than or equal "
"to %(size)i. The server could not comply with the "
"request because the password is invalid.")
class RegionDeletionError(ForbiddenNotSecurity):
message_format = _("Unable to delete region %(region_id)s because it or "
"its child regions have associated endpoints.")
class ApplicationCredentialLimitExceeded(ForbiddenNotSecurity):
message_format = _("Unable to create additional application credentials, "
"maximum of %(limit)d already exceeded for user.")
class SecurityError(Error):
"""Security error exception.
Avoids exposing details of security errors, unless in insecure_debug mode.
"""
amendment = _('(Disable insecure_debug mode to suppress these details.)')
def __deepcopy__(self):
"""Override the default deepcopy.
Keystone :class:`keystone.exception.Error` accepts an optional message
that will be used when rendering the exception object as a string. If
not provided the object's message_format attribute is used instead.
:class:`keystone.exception.SecurityError` is a little different in
that it only uses the message provided to the initializer when
keystone is in `insecure_debug` mode. Instead it will use its
`message_format`. This is to ensure that sensitive details are not
leaked back to the caller in a production deployment.
This dual mode for string rendering causes some odd behaviour when
combined with oslo_i18n translation. Any object used as a value for
formatting a translated string is deep copied.
The copy causes an issue. The deep copy process actually creates a new
exception instance with the rendered string. Then when that new
instance is rendered as a string to use for substitution a warning is
logged. This is because the code tries to use the `message_format` in
secure mode, but the required kwargs are not in the deep copy.
The end result is not an error because when the KeyError is caught the
instance's ``message`` is used instead and this has the properly
translated message. The only indication that something is wonky is a
message in the warning log.
"""
return self
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in insecure_debug mode."""
if message and CONF.insecure_debug:
if isinstance(message, six.string_types):
# Only do replacement if message is string. The message is
# sometimes a different exception or bytes, which would raise
# TypeError.
message = _format_with_unicode_kwargs(message, kwargs)
return _('%(message)s %(amendment)s') % {
'message': message,
'amendment': self.amendment}
return _format_with_unicode_kwargs(self.message_format, kwargs)
class Unauthorized(SecurityError):
message_format = _("The request you have made requires authentication.")
code = int(http_client.UNAUTHORIZED)
title = http_client.responses[http_client.UNAUTHORIZED]
class InsufficientAuthMethods(Error):
# NOTE(notmorgan): This is not a security error, this is meant to
# communicate real information back to the user.
message_format = _("Insufficient auth methods received for %(user_id)s. "
"Auth Methods Provided: %(methods)s.")
code = 401
title = 'Unauthorized'
class PasswordExpired(Unauthorized):
message_format = _("The password is expired and needs to be changed for "
"user: %(user_id)s.")
class AuthPluginException(Unauthorized):
message_format = _("Authentication plugin error.")
def __init__(self, *args, **kwargs):
super(AuthPluginException, self).__init__(*args, **kwargs)
self.authentication = {}
class UserDisabled(Unauthorized):
message_format = _("The account is disabled for user: %(user_id)s.")
class AccountLocked(Unauthorized):
message_format = _("The account is locked for user: %(user_id)s.")
class AuthMethodNotSupported(AuthPluginException):
message_format = _("Attempted to authenticate with an unsupported method.")
def __init__(self, *args, **kwargs):
super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
self.authentication = {'methods': CONF.auth.methods}
class ApplicationCredentialAuthError(AuthPluginException):
message_format = _(
"Error authenticating with application credential: %(detail)s")
class AdditionalAuthRequired(AuthPluginException):
message_format = _("Additional authentications steps required.")
def __init__(self, auth_response=None, **kwargs):
super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
self.authentication = auth_response
class Forbidden(SecurityError):
message_format = _("You are not authorized to perform the"
" requested action.")
code = int(http_client.FORBIDDEN)
title = http_client.responses[http_client.FORBIDDEN]
class ForbiddenAction(Forbidden):
message_format = _("You are not authorized to perform the"
" requested action: %(action)s.")
class CrossBackendNotAllowed(Forbidden):
message_format = _("Group membership across backend boundaries is not "
"allowed. Group in question is %(group_id)s, "
"user is %(user_id)s.")
class InvalidPolicyAssociation(Forbidden):
message_format = _("Invalid mix of entities for policy association: "
"only Endpoint, Service, or Region+Service allowed. "
"Request was - Endpoint: %(endpoint_id)s, "
"Service: %(service_id)s, Region: %(region_id)s.")
class InvalidDomainConfig(Forbidden):
message_format = _("Invalid domain specific configuration: %(reason)s.")
class InvalidLimit(Forbidden):
message_format = _("Invalid resource limit: %(reason)s.")
class LimitTreeExceedError(Exception):
def __init__(self, project_id, max_limit_depth):
super(LimitTreeExceedError, self).__init__(_(
"Keystone cannot start due to project hierarchical depth in the "
"current deployment (project_ids: %(project_id)s) exceeds the "
"enforcement model's maximum limit of %(max_limit_depth)s. Please "
"use a different enforcement model to correct the issue."
) % {'project_id': project_id, 'max_limit_depth': max_limit_depth})
class NotFound(Error):
message_format = _("Could not find: %(target)s.")
code = int(http_client.NOT_FOUND)
title = http_client.responses[http_client.NOT_FOUND]
class EndpointNotFound(NotFound):
message_format = _("Could not find endpoint: %(endpoint_id)s.")
class PolicyNotFound(NotFound):
message_format = _("Could not find policy: %(policy_id)s.")
class PolicyAssociationNotFound(NotFound):
message_format = _("Could not find policy association.")
class RoleNotFound(NotFound):
message_format = _("Could not find role: %(role_id)s.")
class ImpliedRoleNotFound(NotFound):
message_format = _("%(prior_role_id)s does not imply %(implied_role_id)s.")
class InvalidImpliedRole(Forbidden):
message_format = _("%(role_id)s cannot be an implied roles.")
class DomainSpecificRoleMismatch(Forbidden):
message_format = _("Project %(project_id)s must be in the same domain "
"as the role %(role_id)s being assigned.")
class DomainSpecificRoleNotWithinIdPDomain(Forbidden):
message_format = _("role: %(role_name)s must be within the same domain as "
"the identity provider: %(identity_provider)s.")
class RoleAssignmentNotFound(NotFound):
message_format = _("Could not find role assignment with role: "
"%(role_id)s, user or group: %(actor_id)s, "
"project, domain, or system: %(target_id)s.")
class RegionNotFound(NotFound):
message_format = _("Could not find region: %(region_id)s.")
class ServiceNotFound(NotFound):
message_format = _("Could not find service: %(service_id)s.")
class DomainNotFound(NotFound):
message_format = _("Could not find domain: %(domain_id)s.")
class ProjectNotFound(NotFound):
message_format = _("Could not find project: %(project_id)s.")
class ProjectTagNotFound(NotFound):
message_format = _("Could not find project tag: %(project_tag)s.")
class TokenNotFound(NotFound):
message_format = _("Could not find token: %(token_id)s.")
class UserNotFound(NotFound):
message_format = _("Could not find user: %(user_id)s.")
class GroupNotFound(NotFound):
message_format = _("Could not find group: %(group_id)s.")
class MappingNotFound(NotFound):
message_format = _("Could not find mapping: %(mapping_id)s.")
class TrustNotFound(NotFound):
message_format = _("Could not find trust: %(trust_id)s.")
class TrustUseLimitReached(Forbidden):
message_format = _("No remaining uses for trust: %(trust_id)s.")
class CredentialNotFound(NotFound):
message_format = _("Could not find credential: %(credential_id)s.")
class VersionNotFound(NotFound):
message_format = _("Could not find version: %(version)s.")
class EndpointGroupNotFound(NotFound):
message_format = _("Could not find Endpoint Group: %(endpoint_group_id)s.")
class IdentityProviderNotFound(NotFound):
message_format = _("Could not find Identity Provider: %(idp_id)s.")
class ServiceProviderNotFound(NotFound):
message_format = _("Could not find Service Provider: %(sp_id)s.")
class FederatedProtocolNotFound(NotFound):
message_format = _("Could not find federated protocol %(protocol_id)s for"
" Identity Provider: %(idp_id)s.")
class PublicIDNotFound(NotFound):
# This is used internally and mapped to either User/GroupNotFound or,
# Assertion before the exception leaves Keystone.
message_format = "%(id)s"
class RegisteredLimitNotFound(NotFound):
message_format = _("Could not find registered limit for %(id)s.")
class LimitNotFound(NotFound):
message_format = _("Could not find limit for %(id)s.")
class NoLimitReference(Forbidden):
message_format = _("Unable to create a limit that has no corresponding "
"registered limit.")
class RegisteredLimitError(ForbiddenNotSecurity):
message_format = _("Unable to update or delete registered limit %(id)s "
"because there are project limits associated with it.")
class DomainConfigNotFound(NotFound):
message_format = _('Could not find %(group_or_option)s in domain '
'configuration for domain %(domain_id)s.')
class ConfigRegistrationNotFound(Exception):
# This is used internally between the domain config backend and the
# manager, so should not escape to the client. If it did, it is a coding
# error on our part, and would end up, appropriately, as a 500 error.
pass
class ApplicationCredentialNotFound(NotFound):
message_format = _("Could not find Application Credential: "
"%(application_credential_id)s.")
class Conflict(Error):
message_format = _("Conflict occurred attempting to store %(type)s -"
" %(details)s.")
code = int(http_client.CONFLICT)
title = http_client.responses[http_client.CONFLICT]
class UnexpectedError(SecurityError):
"""Avoids exposing details of failures, unless in insecure_debug mode."""
message_format = _("An unexpected error prevented the server "
"from fulfilling your request.")
debug_message_format = _("An unexpected error prevented the server "
"from fulfilling your request: %(exception)s.")
def _build_message(self, message, **kwargs):
# Ensure that exception has a value to be extra defensive for
# substitutions and make sure the exception doesn't raise an
# exception.
kwargs.setdefault('exception', '')
return super(UnexpectedError, self)._build_message(
message or self.debug_message_format, **kwargs)
code = int(http_client.INTERNAL_SERVER_ERROR)
title = http_client.responses[http_client.INTERNAL_SERVER_ERROR]
class TrustConsumeMaximumAttempt(UnexpectedError):
debug_message_format = _("Unable to consume trust %(trust_id)s. Unable to "
"acquire lock.")
class CertificateFilesUnavailable(UnexpectedError):
debug_message_format = _("Expected signing certificates are not available "
"on the server. Please check Keystone "
"configuration.")
class MalformedEndpoint(UnexpectedError):
debug_message_format = _("Malformed endpoint URL (%(endpoint)s),"
" see ERROR log for details.")
class MappedGroupNotFound(UnexpectedError):
debug_message_format = _("Group %(group_id)s returned by mapping "
"%(mapping_id)s was not found in the backend.")
class MetadataFileError(UnexpectedError):
debug_message_format = _("Error while reading metadata file: %(reason)s.")
class DirectMappingError(UnexpectedError):
debug_message_format = _("Local section in mapping %(mapping_id)s refers "
"to a remote match that doesn't exist "
"(e.g. {0} in a local section).")
class AssignmentTypeCalculationError(UnexpectedError):
debug_message_format = _(
'Unexpected combination of grant attributes - '
'User: %(user_id)s, Group: %(group_id)s, Project: %(project_id)s, '
'Domain: %(domain_id)s.')
class NotImplemented(Error):
message_format = _("The action you have requested has not"
" been implemented.")
code = int(http_client.NOT_IMPLEMENTED)
title = http_client.responses[http_client.NOT_IMPLEMENTED]
class Gone(Error):
message_format = _("The service you have requested is no"
" longer available on this server.")
code = int(http_client.GONE)
title = http_client.responses[http_client.GONE]
class ConfigFileNotFound(UnexpectedError):
debug_message_format = _("The Keystone configuration file %(config_file)s "
"could not be found.")
class KeysNotFound(UnexpectedError):
debug_message_format = _('No encryption keys found; run keystone-manage '
'fernet_setup to bootstrap one.')
class MultipleSQLDriversInConfig(UnexpectedError):
debug_message_format = _('The Keystone domain-specific configuration has '
'specified more than one SQL driver (only one is '
'permitted): %(source)s.')
class MigrationNotProvided(Exception):
def __init__(self, mod_name, path):
super(MigrationNotProvided, self).__init__(_(
"%(mod_name)s doesn't provide database migrations. The migration"
" repository path at %(path)s doesn't exist or isn't a directory."
) % {'mod_name': mod_name, 'path': path})
class UnsupportedTokenVersionException(UnexpectedError):
debug_message_format = _('Token version is unrecognizable or '
'unsupported.')
class SAMLSigningError(UnexpectedError):
debug_message_format = _('Unable to sign SAML assertion. It is likely '
'that this server does not have xmlsec1 '
'installed or this is the result of '
'misconfiguration. Reason %(reason)s.')
class OAuthHeadersMissingError(UnexpectedError):
debug_message_format = _('No Authorization headers found, cannot proceed '
'with OAuth related calls. If running under '
'HTTPd or Apache, ensure WSGIPassAuthorization '
'is set to On.')
class TokenlessAuthConfigError(ValidationError):
message_format = _('Could not determine Identity Provider ID. The '
'configuration option %(issuer_attribute)s '
'was not found in the request environment.')
class CredentialEncryptionError(Exception):
message_format = _("An unexpected error prevented the server "
"from accessing encrypted credentials.")
class LDAPServerConnectionError(UnexpectedError):
debug_message_format = _('Unable to establish a connection to '
'LDAP Server (%(url)s).')
class LDAPInvalidCredentialsError(UnexpectedError):
message_format = _('Unable to authenticate against Identity backend - '
'Invalid username or password')
class LDAPSizeLimitExceeded(UnexpectedError):
message_format = _('Number of User/Group entities returned by LDAP '
'exceeded size limit. Contact your LDAP '
'administrator.')
class CacheDeserializationError(Exception):
def __init__(self, obj, data):
super(CacheDeserializationError, self).__init__(
_('Failed to deserialize %(obj)s. Data is %(data)s') % {
'obj': obj, 'data': data
}
)
|
py | b40c7bb6cf4b9f92eb5ee4af11348b40dec496d9 | """Build and maintain a database of TESS publications.
"""
from __future__ import print_function, division, unicode_literals
# Standard library
import os
import re
import sys
import json
import datetime
import argparse
import collections
import sqlite3 as sql
import numpy as np
try:
import ads
except Exception:
ads = None
# External dependencies
import jinja2
from six.moves import input # needed to support Python 2
from astropy import log
from tqdm import tqdm
from . import plot, PACKAGEDIR
# Where is the default location of the SQLite database?
DEFAULT_DB = os.path.expanduser("~/.tpub.db")
# Which metadata fields do we want to retrieve from the ADS API?
# (basically everything apart from 'aff' and 'body' to reduce data volume)
FIELDS = ['date', 'pub', 'id', 'volume', 'links_data', 'citation', 'doi',
'eid', 'keyword_schema', 'citation_count', 'year', 'identifier',
'keyword_norm', 'reference', 'abstract', 'recid',
'alternate_bibcode', 'arxiv_class', 'bibcode', 'first_author_norm',
'pubdate', 'reader', 'doctype', 'title', 'pub_raw', 'property',
'author', 'email', 'orcid', 'keyword', 'author_norm',
'cite_read_boost', 'database', 'classic_factor', 'ack', 'page',
'first_author', 'read_count', 'indexstamp', 'issue', 'keyword_facet',
'aff']
class Highlight:
"""Defines colors for highlighting words in the terminal."""
RED = "\033[4;31m"
GREEN = "\033[4;32m"
YELLOW = "\033[4;33m"
BLUE = "\033[4;34m"
PURPLE = "\033[4;35m"
CYAN = "\033[4;36m"
END = '\033[0m'
class PublicationDB(object):
"""Class wrapping the SQLite database containing the publications.
Parameters
----------
filename : str
Path to the SQLite database file.
"""
def __init__(self, filename=DEFAULT_DB):
self.filename = filename
self.con = sql.connect(filename)
pubs_table_exists = self.con.execute(
"""
SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='pubs';
""").fetchone()[0]
if not pubs_table_exists:
self.create_table()
def create_table(self):
self.con.execute("""CREATE TABLE pubs(
id UNIQUE,
bibcode UNIQUE,
year,
month,
date,
mission,
science,
metrics)""")
def add(self, article, mission="tess", science="exoplanets"):
"""Adds a single article object to the database.
Parameters
----------
article : `ads.Article` object.
An article object as returned by `ads.SearchQuery`.
"""
log.debug('Ingesting {}'.format(article.bibcode))
# Also store the extra metadata in the json string
month = article.pubdate[0:7]
article._raw['mission'] = mission
article._raw['science'] = science
try:
cur = self.con.execute("INSERT INTO pubs "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
[article.id, article.bibcode,
article.year, month, article.pubdate,
mission, science,
json.dumps(article._raw)])
log.info('Inserted {} row(s).'.format(cur.rowcount))
self.con.commit()
except sql.IntegrityError:
log.warning('{} was already ingested.'.format(article.bibcode))
def add_interactively(self, article, statusmsg=""):
"""Adds an article by prompting the user for the classification.
Parameters
----------
article : `ads.Article` object
"""
# Do not show an article that is already in the database
if article in self:
log.info("{} is already in the database "
"-- skipping.".format(article.bibcode))
return
# First, highlight keywords in the title and abstract
colors = {'TESS': Highlight.BLUE,
'TIC': Highlight.BLUE,
'TOI': Highlight.BLUE,
'PLANET': Highlight.YELLOW,
'TRANSITING': Highlight.YELLOW,
'EXOPLANET': Highlight.YELLOW,
'SURVEY': Highlight.YELLOW,
'SATELLITE': Highlight.YELLOW,}
title = article.title[0]
try:
abstract = article.abstract
except AttributeError:
abstract = ""
for word in colors:
pattern = re.compile(word, re.IGNORECASE)
title = pattern.sub(colors[word] + word + Highlight.END, title)
if abstract is not None:
abstract = pattern.sub(colors[word]+word+Highlight.END, abstract)
# Print paper information to stdout
print(chr(27) + "[2J") # Clear screen
print(statusmsg)
print(title)
print('-'*len(title))
print(abstract)
print('')
print('Authors: ' + ', '.join(article.author))
print('Date: ' + article.pubdate)
print('Status: ' + str(article.property))
print('URL: http://adsabs.harvard.edu/abs/' + article.bibcode)
print('')
# Prompt the user to classify the paper by mission and science
print("=> TESS [1], unrelated [3], or skip [any key]? ",
end="")
prompt = input()
if prompt == "1":
mission = "tess"
elif prompt == "3":
mission = "unrelated"
else:
return
print(mission)
# Now classify by science
science = ""
if mission != "unrelated":
print('=> Exoplanets [1] or Astrophysics [2]? ', end='')
prompt = input()
if prompt == "1":
science = "exoplanets"
elif prompt == "2":
science = "astrophysics"
print(science)
self.add(article, mission=mission, science=science)
def add_by_bibcode(self, bibcode, interactive=False, **kwargs):
if ads is None:
log.error("This action requires the ADS key to be setup.")
return
q = ads.SearchQuery(q="identifier:{}".format(bibcode), fl=FIELDS)
for article in q:
# Print useful warnings
if bibcode != article.bibcode:
log.warning("Requested {} but ADS API returned {}".format(bibcode, article.bibcode))
if article.property is None:
log.warning("{} returned None for article.property.".format(article.bibcode))
elif 'NONARTICLE' in article.property:
# Note: data products are sometimes tagged as NONARTICLE
log.warning("{} is not an article.".format(article.bibcode))
if article in self:
log.warning("{} is already in the db.".format(article.bibcode))
else:
if interactive:
self.add_interactively(article)
else:
self.add(article, **kwargs)
def delete_by_bibcode(self, bibcode):
cur = self.con.execute("DELETE FROM pubs WHERE bibcode = ?;", [bibcode])
log.info('Deleted {} row(s).'.format(cur.rowcount))
self.con.commit()
def __contains__(self, article):
count = self.con.execute("SELECT COUNT(*) FROM pubs WHERE id = ?;",
[article.id]).fetchone()[0]
return bool(count)
def query(self, mission='tess', science=None):
"""Query the database by mission and/or science.
Returns
-------
rows : list
List of SQLite result rows.
"""
# Build the query
if mission is None:
where = "(mission = 'tess') "
else:
where = "(mission = '{}') ".format(mission)
if science is not None:
where += " AND science = '{}' ".format(science)
cur = self.con.execute("SELECT year, month, metrics, bibcode "
"FROM pubs "
"WHERE {} "
"ORDER BY date DESC; ".format(where))
return cur.fetchall()
def to_markdown(self, title="Publications",
group_by_month=False, save_as=None, **kwargs):
"""Returns the publication list in markdown format.
"""
if group_by_month:
group_idx = 1
else:
group_idx = 0 # by year
articles = collections.OrderedDict({})
for row in self.query(**kwargs):
group = row[group_idx]
if group.endswith("-00"):
group = group[:-3] + "-01"
if group not in articles:
articles[group] = []
art = json.loads(row[2])
# The markdown template depends on "property" being iterable
if art["property"] is None:
art["property"] = []
articles[group].append(art)
templatedir = os.path.join(PACKAGEDIR, 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(templatedir))
template = env.get_template('template.md')
markdown = template.render(title=title, save_as=save_as,
articles=articles)
if sys.version_info >= (3, 0):
return markdown # Python 3
else:
return markdown.encode("utf-8") # Python 2
def save_markdown(self, output_fn, **kwargs):
"""Saves the database to a text file in markdown format.
Parameters
----------
output_fn : str
Path of the file to write.
"""
markdown = self.to_markdown(save_as=output_fn.replace("md", "html"),
**kwargs)
log.info('Writing {}'.format(output_fn))
f = open(output_fn, 'w')
f.write(markdown)
f.close()
def plot(self):
"""Saves beautiful plot of the database."""
for extension in ['pdf', 'png']:
plot.plot_by_year(self,
"tpub-publication-rate.{}".format(extension))
plot.plot_by_year(self,
"tpub-publication-rate-without-extrapolation.{}".format(extension),
extrapolate=False)
plot.plot_science_piechart(self,
"tpub-piechart.{}".format(extension))
def get_metrics(self):
"""Returns a dictionary of overall publication statistics.
The metrics include:
* # of publications since XX.
* # of unique author surnames.
* # of citations.
* # of peer-reviewed pubs.
* # of exoplanet/astrophysics.
"""
metrics = {
"publication_count": 0,
"tess_count": 0,
"exoplanets_count": 0,
"astrophysics_count": 0,
"refereed_count": 0,
"tess_refereed_count": 0,
"citation_count": 0
}
first_authors, authors = [], []
tess_authors = list()
for article in self.query():
api_response = article[2]
js = json.loads(api_response)
metrics["publication_count"] += 1
metrics["{}_count".format(js["mission"])] += 1
try:
metrics["{}_count".format(js["science"])] += 1
except KeyError:
log.warning("{}: no science category".format(js["bibcode"]))
first_authors.append(js["first_author_norm"])
authors.extend(js["author_norm"])
if js["mission"] == 'tess':
tess_authors.extend(js["author_norm"])
else:
tess_authors.extend(js["author_norm"])
try:
if "REFEREED" in js["property"]:
metrics["refereed_count"] += 1
metrics["{}_refereed_count".format(js["mission"])] += 1
except TypeError: # proprety is None
pass
try:
metrics["citation_count"] += js["citation_count"]
except (KeyError, TypeError):
log.warning("{}: no citation_count".format(js["bibcode"]))
metrics["first_author_count"] = np.unique(first_authors).size
metrics["author_count"] = np.unique(authors).size
metrics["tess_author_count"] = np.unique(tess_authors).size
# Also compute fractions
for frac in ["tess", "exoplanets", "astrophysics"]:
metrics[frac+"_fraction"] = metrics[frac+"_count"] / metrics["publication_count"]
return metrics
def get_most_cited(self, mission=None, science=None, top=10):
"""Returns the most-cited publications."""
bibcodes, citations = [], []
articles = self.query(mission=mission, science=science)
for article in articles:
api_response = article[2]
js = json.loads(api_response)
bibcodes.append(article[3])
citations.append(js["citation_count"])
idx_top = np.argsort(citations)[::-1][0:top]
return [json.loads(articles[idx][2]) for idx in idx_top]
def get_most_read(self, mission=None, science=None, top=10):
"""Returns the most-cited publications."""
bibcodes, citations = [], []
articles = self.query(mission=mission, science=science)
for article in articles:
api_response = article[2]
js = json.loads(api_response)
bibcodes.append(article[3])
citations.append(js["read_count"])
idx_top = np.argsort(citations)[::-1][0:top]
return [json.loads(articles[idx][2]) for idx in idx_top]
def get_most_active_first_authors(self, min_papers=6):
"""Returns names and paper counts of the most active first authors."""
articles = self.query()
authors = {}
for article in articles:
api_response = article[2]
js = json.loads(api_response)
first_author = js["first_author_norm"]
try:
authors[first_author] += 1
except KeyError:
authors[first_author] = 1
names = np.array(list(authors.keys()))
paper_count = np.array(list(authors.values()))
idx_top = np.argsort(paper_count)[::-1]
mask = paper_count[idx_top] >= min_papers
return zip(names[idx_top], paper_count[idx_top[mask]])
def get_all_authors(self, top=20):
articles = self.query()
authors = {}
for article in articles:
api_response = article[2]
js = json.loads(api_response)
for auth in js["author_norm"]:
try:
authors[auth] += 1
except KeyError:
authors[auth] = 1
names = np.array(list(authors.keys()))
paper_count = np.array(list(authors.values()))
idx_top = np.argsort(paper_count)[::-1][:top]
return names[idx_top], paper_count[idx_top]
def update(self, month=None,
exclude=['johannes']
# exclude=['keplerian', 'johannes', 'k<sub>2</sub>',
# "kepler equation", "kepler's equation", "xmm-newton",
# "kepler's law", "kepler's third law", "kepler problem",
# "kepler crater", "kepler's supernova", "kepler's snr"]
):
"""Query ADS for new publications.
Parameters
----------
month : str
Of the form "YYYY-MM".
exclude : list of str
Ignore articles if they contain any of the strings given
in this list. (Case-insensitive.)
"""
if ads is None:
log.error("This action requires the ADS key to be setup.")
return
print(Highlight.YELLOW +
"Reminder: did you `git pull` tpub before running "
"this command? [y/n] " +
Highlight.END,
end='')
if input() == 'n':
return
if month is None:
month = datetime.datetime.now().strftime("%Y-%m")
# First show all the papers with the TESS funding message in the ack
log.info("Querying ADS for acknowledgements (month={}).".format(month))
database = "astronomy"
qry = ads.SearchQuery(q="""(ack:"TESS mission"
OR ack:"Transiting Exoplanet Survey Satellite"
OR ack:"TESS team"
OR ack:"TESS")
-ack:"partial support from"
pubdate:"{}"
database:"{}"
""".format(month, database),
fl=FIELDS,
rows=9999999999)
articles = list(qry)
for idx, article in enumerate(articles):
statusmsg = ("Showing article {} out of {} that mentions TESS "
"in the acknowledgements.\n\n".format(
idx+1, len(articles)))
self.add_interactively(article, statusmsg=statusmsg)
# Then search for keywords in the title and abstracts
log.info("Querying ADS for titles and abstracts "
"(month={}).".format(month))
qry = ads.SearchQuery(q="""(
abs:"TESS"
OR abs:"Transiting Exoplanet Survey Satellite"
OR title:"TESS"
OR title:"Transiting Exoplanet Survey Satellite"
OR full:"TESS photometry"
OR full:"TESS lightcurve"
)
pubdate:"{}"
database:"{}"
""".format(month, database),
fl=FIELDS,
rows=9999999999)
articles = list(qry)
for idx, article in enumerate(articles):
# Ignore articles without abstract
if not hasattr(article, 'abstract') or article.abstract is None:
continue
abstract_lower = article.abstract.lower()
ignore = False
# Ignore articles containing any of the excluded terms
for term in exclude:
if term.lower() in abstract_lower:
ignore = True
# Ignore articles already in the database
if article in self:
ignore = True
# Ignore all the unrefereed non-arxiv stuff
try:
if "NOT REFEREED" in article.property and article.pub.lower() != "arxiv e-prints":
ignore = True
except (AttributeError, TypeError, ads.exceptions.APIResponseError):
pass # no .pub attribute
# Ignore proposals and cospar abstracts
if ".prop." in article.bibcode or "cosp.." in article.bibcode:
ignore = True
if not ignore: # Propose to the user
statusmsg = '(Reviewing article {} out of {}.)\n\n'.format(
idx+1, len(articles))
self.add_interactively(article, statusmsg=statusmsg)
log.info('Finished reviewing all articles for {}.'.format(month))
def tpub(args=None):
"""Lists the publications in the database in Markdown format."""
parser = argparse.ArgumentParser(
description="View the TESS publication list in markdown format.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
parser.add_argument('-e', '--exoplanets', action='store_true',
help='Only show exoplanet publications.')
parser.add_argument('-a', '--astrophysics', action='store_true',
help='Only show astrophysics publications.')
parser.add_argument('-m', '--month', action='store_true',
help='Group the papers by month rather than year.')
parser.add_argument('-s', '--save', action='store_true',
help='Save the output and plots in the current directory.')
args = parser.parse_args(args)
db = PublicationDB(args.f)
if args.save:
for bymonth in [True, False]:
if bymonth:
suffix = "-by-month"
title_suffix = " by month"
else:
suffix = ""
title_suffix = ""
output_fn = 'tpub{}.md'.format(suffix)
db.save_markdown(output_fn,
group_by_month=bymonth,
title="TESS publications{}".format(title_suffix))
for science in ['exoplanets', 'astrophysics']:
output_fn = 'tpub-{}{}.md'.format(science, suffix)
db.save_markdown(output_fn,
group_by_month=bymonth,
science=science,
title="TESS {} publications{}".format(science, title_suffix))
# Finally, produce an overview page
templatedir = os.path.join(PACKAGEDIR, 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(templatedir))
template = env.get_template('template-overview.md')
markdown = template.render(metrics=db.get_metrics(),
most_cited=db.get_most_cited(top=10),
most_active_first_authors=db.get_most_active_first_authors(),
now=datetime.datetime.now())
# most_read=db.get_most_read(20),
filename = 'publications.md'
log.info('Writing {}'.format(filename))
f = open(filename, 'w')
if sys.version_info >= (3, 0):
f.write(markdown) # Python 3
else:
f.write(markdown.encode("utf-8")) # Legacy Python
f.close()
else:
if args.exoplanets and not args.astrophysics:
science = "exoplanets"
elif args.astrophysics and not args.exoplanets:
science = "astrophysics"
else:
science = None
mission = None
output = db.to_markdown(group_by_month=args.month,
mission=mission,
science=science)
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
print(output)
def tpub_plot(args=None):
"""Creates beautiful plots of the database."""
parser = argparse.ArgumentParser(
description="Creates beautiful plots of the database.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
args = parser.parse_args(args)
PublicationDB(args.f).plot()
def tpub_update(args=None):
"""Interactively query ADS for new publications."""
parser = argparse.ArgumentParser(
description="Interactively query ADS for new publications.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
parser.add_argument('month', nargs='?', default=None,
help='Month to query, e.g. 2015-06.')
args = parser.parse_args(args)
PublicationDB(args.f).update(month=args.month)
def tpub_add(args=None):
"""Add a publication with a known ADS bibcode."""
parser = argparse.ArgumentParser(
description="Add a paper to the TESS publication list.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
parser.add_argument('bibcode', nargs='+',
help='ADS bibcode that identifies the publication.')
args = parser.parse_args(args)
db = PublicationDB(args.f)
for bibcode in args.bibcode:
db.add_by_bibcode(bibcode, interactive=True)
def tpub_delete(args=None):
"""Deletes a publication using its ADS bibcode."""
parser = argparse.ArgumentParser(
description="Deletes a paper from the TESS publication list.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
parser.add_argument('bibcode', nargs='+',
help='ADS bibcode that identifies the publication.')
args = parser.parse_args(args)
db = PublicationDB(args.f)
for bibcode in args.bibcode:
db.delete_by_bibcode(bibcode)
def tpub_import(args=None):
"""Import publications from a csv file.
The csv file must contain entries of the form "bibcode,mission,science".
The actual metadata of each publication will be grabbed using the ADS API,
hence this routine may take 10-20 minutes to complete.
"""
parser = argparse.ArgumentParser(
description="Batch-import papers into the TESS publication list "
"from a CSV file. The CSV file must have three columns "
"(bibcode,mission,science) separated by commas. "
"For example: '2004ApJ...610.1199G,tess,astrophysics'.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
parser.add_argument('csvfile',
help="Filename of the csv file to ingest.")
args = parser.parse_args(args)
db = PublicationDB(args.f)
for line in tqdm(open(args.csvfile, 'r').readlines()):
col = line.split(',') # Naive csv parsing
db.add_by_bibcode(col[0], mission=col[1], science=col[2].strip())
def tpub_export(args=None):
"""Export the bibcodes and classifications in CSV format."""
parser = argparse.ArgumentParser(
description="Export the TESS publication list in CSV format.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
args = parser.parse_args(args)
db = PublicationDB(args.f)
cur = db.con.execute("SELECT bibcode, mission, science "
"FROM pubs ORDER BY bibcode;")
for row in cur.fetchall():
print('{0},{1},{2}'.format(row[0], row[1], row[2]))
def tpub_spreadsheet(args=None):
"""Export the publication database to an Excel spreadsheet."""
try:
import pandas as pd
except ImportError:
print('ERROR: pandas needs to be installed for this feature.')
parser = argparse.ArgumentParser(
description="Export the TESS publication list in XLS format.")
parser.add_argument('-f', metavar='dbfile',
type=str, default=DEFAULT_DB,
help="Location of the TESS publication list db. "
"Defaults to ~/.tpub.db.")
args = parser.parse_args(args)
db = PublicationDB(args.f)
spreadsheet = []
cur = db.con.execute("SELECT bibcode, year, month, date, mission, science, metrics "
"FROM pubs WHERE mission != 'unrelated' ORDER BY bibcode;")
for row in cur.fetchall():
metrics = json.loads(row[6])
if 'REFEREED' in metrics['property']:
refereed = 'REFEREED'
elif 'NOT REFEREED' in metrics['property']:
refereed = 'NOT REFEREED'
else:
refereed = ''
myrow = collections.OrderedDict([
('bibcode', row[0]),
('year', row[1]),
('date', row[3]),
('mission', row[4]),
('science', row[5]),
('refereed', refereed),
('citation_count', metrics['citation_count']),
('first_author_norm', metrics['first_author_norm']),
('title', metrics['title'][0])])
spreadsheet.append(myrow)
output_fn = 'tess-publications.xls'
print('Writing {}'.format(output_fn))
pd.DataFrame(spreadsheet).to_excel(output_fn, index=False)
if __name__ == '__main__':
pass
|
py | b40c7bc24cacbc03844c5291a64661634da308c5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
class TestSitemap(unittest.TestCase):
pass
|
py | b40c7c28a6e58b3d2cf0788c0cbcb4176f85254f | import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Bookshelf(Base):
"Define fields for books"
__tablename__ = 'books'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
description = Column(Text())
isbn = Column(String(13))
engine = create_engine('sqlite:///bookshelf.db')
Base.metadata.create_all(engine)
|
py | b40c7cf3984497959762a9900306fd16f73f5cab | import logging
from madmin_server import InternalServerError, add_handler
from madmin_db import Query, DatabaseError
from madmin_user import hasPermission
log = logging.getLogger(__name__)
def handle_budget(params, json_data):
if 'budget_id' not in params:
return []
budget_data = []
try:
q = Query('SELECT bdgt_id, bdgt_naam, bdgt_minimum, bdgt_current, bdgt_ver_id FROM tblbudget WHERE bdgt_id = %s')
except DatabaseError:
raise InternalServerError
for budget_id in params['budget_id']:
try:
q.run((budget_id,))
cur_result = q.rows()
except DatabaseError:
raise InternalServerError
for row in cur_result:
if not hasPermission(params, 'budget', row[4]):
continue
budget_data.append({
'id': row[0],
'vereniging_id': row[4],
'naam': row[1],
'current': row[3],
'minimum': row[2]})
return budget_data
def handle_budget_vereniging(params, json_data):
if 'vereniging_id' not in params:
return []
try:
q = Query('SELECT bdgt_id, bdgt_naam, bdgt_minimum, bdgt_current FROM tblbudget WHERE bdgt_ver_id = %s')
except DatabaseError:
raise InternalServerError
budget_data = []
for vereniging_id in params['vereniging_id']:
if not hasPermission(params, 'budget', vereniging_id):
continue
try:
q.run((vereniging_id,))
cur_result = q.rows()
except DatabaseError:
raise InternalServerError
for row in cur_result:
budget_data.append({
'id': row[0],
'vereniging_id': vereniging_id,
'naam': row[1],
'current': row[3],
'minimum': row[2]})
return budget_data
def budget_query(budget_id):
try:
q = Query("""SELECT bdgt_id, bdgt_current, bdgt_minimum, bdgt_ver_id, bdgt_naam
FROM tblbudget
WHERE bdgt_id = %s""")
q.run((budget_id,))
rows = q.rows()
except DatabaseError:
raise InternalServerError
result = []
for row in rows:
result.append({
'budget_id': row[0],
'vereniging_id': row[3],
'minimum': row[2],
'current': row[1],
'naam': row[4]
})
return result
def budget_vereniging_query(vereniging_id):
try:
q = Query('SELECT bdgt_id FROM tblbudget WHERE bdgt_ver_id = %s')
q.run((vereniging_id,))
result = q.rows()
except DatabaseError:
raise InternalServerError
budget_ids = []
for row in result:
budget_ids.append(result[0][0])
return budget_ids
def budget_update(budget_id, value_change):
try:
q = Query('UPDATE tblbudget SET bdgt_current = bdgt_current + %s WHERE bdgt_id = %s')
q.run((value_change, budget_id))
except DatabaseError:
raise InternalServerError
add_handler('/budget', handle_budget)
add_handler('/budget/vereniging', handle_budget_vereniging)
log.info("Budget module initialized")
|
py | b40c7d9ca2fd0998962f26d171c2402597301625 | print('')
print('WARNING:')
print('This script is to import a json file, taken from Metric,')
print('and convert it to an SQLite3 database. Do not use this')
print('script unless actually necessary.')
print('')
print('To run, you will need to uncomment the remaining lines.')
# Uncomment everything below this line
# import sys
# from PyQt5 import QtCore, QtGui, QtWidgets, uic
# from PyQt5.QtGui import QIcon, QStandardItemModel
# from PyQt5.QtCore import (QSortFilterProxyModel, Qt, QTime)
# from PyQt5.QtWidgets import QMessageBox, QWidget
# import uuid
# from threading import Timer
# import time
# from functions import *
# from objects import Entry, Morphology, Definition, Media
# from actions import *
# import adjustments as adjustments
# import json
#
# with open ('2018-02-02 ailot.json') as f:
# docs = json.load(f)
# rows = docs['rows']
# count = 0
# for item in rows:
# #if count < 5:
# doc = Entry()
# doc.sense = []
# doc.sense.append(Sense())
# doc.sense[0].definition = []
# doc.sense[0].definition.append(Definition())
# doc.sense[0].gloss = Gloss()
# doc.sense[0].pos = []
# doc.sense[0].example = Example()
# lex = ''
# try:
# doc.lexeme = item['doc']['lexeme']
# lex = item['doc']['lexeme']
# except:
# doc.lexeme = ''
# try:
# doc.phonemic = item['doc']['phonemic']
# except:
# doc.phonemic = ''
# try:
# doc.sense[0].pos = []
# doc.sense[0].pos.append(item['doc']['sense'][0]['pos'][0])
# except:
# doc.sense[0].pos = []
# try:
# doc.sense[0].gloss.english = item['doc']['sense'][0]['gloss']['english']
# except:
# doc.sense[0].gloss = ''
# try:
# doc.sense[0].definition[0].english = item['doc']['sense'][0]['definition'][0]['english']
# except:
# doc.sense[0].definition[0].english = ''
# id = str(uuid.uuid4())
# doc.id = id
# sys.stdout.write(lex+' ')
# sys.stdout.flush()
# if lex is not '':
# cursor.execute("""INSERT INTO `edits` (`id`, `data`, `lexeme`) VALUES (?, ?, ?);""", (id, doc.json(), lex))
# conn.commit()
# count = count + 1
|
py | b40c7dcd88728d811cac466846e8a0c3e44d5e65 | import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import tensorflow_probability as tfp
from loss import quantile_loss
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
class OneClassNeuralNetwork:
def __init__(self, input_dim, hidden_layer_size, r=1.0):
"""
:param input_dim: number of input features
:param hidden_layer_size: number of neurons in the hidden layer
:param r: bias of hyperplane
"""
self.input_dim = input_dim
self.hidden_size = hidden_layer_size
self.r = r
def custom_ocnn_loss(self, nu, w, V):
def custom_hinge(_, y_pred):
loss = 0.5 * tf.reduce_sum(w ** 2) + 0.5 * tf.reduce_sum(V ** 2) + quantile_loss(self.r, y_pred, nu)
self.r = tfp.stats.percentile(tf.reduce_max(y_pred, axis=1), q=100 * nu)
return loss
return custom_hinge
def build_model(self):
h_size = self.hidden_size
model = Sequential()
input_hidden = Dense(h_size, input_dim=self.input_dim, kernel_initializer="glorot_normal", name="input_hidden")
model.add(input_hidden)
model.add(Activation("linear"))
# Define Dense layer from hidden to output
hidden_ouput = Dense(1, name="hidden_output")
model.add(hidden_ouput)
model.add(Activation("sigmoid"))
w = input_hidden.get_weights()[0]
V = hidden_ouput.get_weights()[0]
return [model, w, V]
def train_model(self, X, epochs=50, nu=1e-2, init_lr=1e-2, save=True):
"""
builds and trains the model on the supplied input data
:param X: input training data
:param epochs: number of epochs to train for (default 50)
:param nu: parameter between [0, 1] controls trade off between maximizing the distance of the hyperplane from
the origin and the number of data points permitted to cross the hyper-plane (false positives) (default 1e-2)
:param init_lr: initial learning rate (default 1e-2)
:param save: flag indicating if the model should be (default True)
:return: trained model and callback history
"""
def r_metric(*args):
return self.r
r_metric.__name__ = 'r'
def quantile_loss_metric(*args):
return quantile_loss(self.r, args[1], nu)
quantile_loss_metric.__name__ = 'quantile_loss'
[model, w, V] = self.build_model()
model.compile(optimizer=Adam(lr=init_lr, decay=init_lr / epochs),
loss=self.custom_ocnn_loss(nu, w, V), metrics=[r_metric, quantile_loss_metric])
# despite the fact that we don't have a ground truth `y`, the fit function requires a label argument,
# so we just supply a dummy vector of 0s
history = model.fit(X, np.zeros((X.shape[0],)),
steps_per_epoch=1,
shuffle=True,
epochs=epochs)
if save:
import os
from datetime import datetime
if not os.path.exists('models'):
os.mkdir('models')
model_dir = f"models/ocnn_{datetime.now().strftime('%Y-%m-%d-%H:%M:%s')}"
os.mkdir(model_dir)
w = model.layers[0].get_weights()[0]
V = model.layers[2].get_weights()[0]
model.save(f"{model_dir}/model.h5")
np.savez(f"{model_dir}/params.npz", w=w, V=V, nu=nu)
return model, history
def load_model(self, model_dir):
"""
loads a pretrained model
:param model_dir: directory where model and model params (w, V, and nu) are saved
:param nu: same as nu described in train_model
:return: loaded model
"""
params = np.load(f'{model_dir}/params.npz')
w = params['w']
V = params['V']
nu = params['nu'].tolist()
model = load_model(f'{model_dir}/model.h5',
custom_objects={'custom_hinge': self.custom_ocnn_loss(nu, w, V)})
return model
|
py | b40c7ee170e5bdae0df270873b86d198c1c68894 | import storage
import os
def unlock():
storage.remount("/", False)
def lock():
storage.remount("/", True)
def saveFile(data, mode):
unlock()
fileIndex = 0
fileName = "/records/record{}.csv"
newFile = False
while (not newFile):
try:
os.stat(fileName.format(fileIndex))
fileIndex += 1
except:
newFile = True
try: #will fail if /records/ doesn't exist
file = open(fileName.format(fileIndex), "wt")
except:
os.mkdir("records")
file = open(fileName.format(fileIndex), "wt")
#file = open("records/recordtest.csv", "wt")
file.write("Time")
numCols = 1
if (mode & 1):
file.write(",X")
numCols += 1
if (mode & 2):
file.write(",Y")
numCols += 1
if (mode & 4):
file.write(",Z")
numCols += 1
for i in range(len(data)):
if (i % numCols == 0):
file.write("\n")
file.write(str(data[i]) + ",")
file.close()
lock()
return fileIndex |
py | b40c80130cd808573674d92bdc79b8d7557b30af |
import logging
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(
id='RaceCar-v0',
entry_point='gym_racecar.envs:RaceCarIdealLineEnv'
)
register(
id='RaceCarRealistic-v0',
entry_point='gym_racecar.envs:RaceCarIdealLineRealisticEnv'
)
register(
id='RaceCarHard-v0',
entry_point='gym_racecar.envs:RaceCarCenterLineEnv',
)
register(
id='RaceCarRealisticHard-v0',
entry_point='gym_racecar.envs:RaceCarCenterLineRealisticEnv',
) |
py | b40c8083ebb61cce866c7f76a5f5b66b667e85a7 | """Command-line application for installing a particular Spark/Hadoop version"""
from contextlib import suppress
import pathlib3x as pathlib
from cleo import Command
from shui.functions import extract_tarball, get_file_details, get_versions
class InstallCommand(Command):
"""
Install a particular Spark and Hadoop version
install
{--latest : Use the latest available version}
{--spark=any : Spark version}
{--hadoop=any : Hadoop version}
{--target=cwd : Directory to install into}
"""
def handle(self):
# Get correct Spark/Hadoop version
if self.option("latest"):
selected_version = sorted(get_versions())[-1]
else:
matching_versions = get_versions()
if self.option("spark") != "any":
matching_versions = [
v for v in matching_versions if v.spark == self.option("spark")
]
if self.option("hadoop") != "any":
matching_versions = [
v for v in matching_versions if v.hadoop == self.option("hadoop")
]
if not len(matching_versions) == 1:
self.line(
f"Found {len(matching_versions)} versions matching"
+ f"<comment>Spark</comment> <info>{self.option('spark')}</info>;"
+ f"<comment>Hadoop</comment> <info>{self.option('hadoop')}</info>"
)
for version in matching_versions:
self.line(f" - Found {version}")
raise ValueError("Could not identify version to install!")
selected_version = matching_versions[0]
# Get installation directory, creating it if necessary
if self.option("target") != "cwd":
install_dir = pathlib.Path(self.option("target"))
else:
install_dir = pathlib.Path.cwd()
install_dir = install_dir.expanduser().resolve()
with suppress(OSError):
install_dir.mkdir(parents=True, exist_ok=True)
if not install_dir.is_dir():
raise ValueError(f"{install_dir} is not a valid installation directory!")
self.line(
f"Installing <comment>{selected_version}</comment> in <info>{install_dir}</info>"
)
# Download tarball and checksum
file_with_hash = get_file_details(selected_version, install_dir)
for fileinfo in file_with_hash:
self.line(
f"Downloading <comment>{fileinfo.name}</comment> from <info>{fileinfo.url}</info>"
)
fileinfo.download()
self.line(f"Finished downloading <comment>{fileinfo.name}</comment>")
# Verify tarball
if file_with_hash.verify():
self.line(
f"Verified <comment>{selected_version.filename}</comment> using SHA512 hash"
)
else:
raise IOError(f"Could not verify {selected_version} using SHA512!")
# Extract tarball
self.line(
f"Extracting <comment>{selected_version}</comment> to <info>{install_dir}</info>"
)
installation_path = extract_tarball(file_with_hash.file, install_dir)
self.line(f"Cleaning up downloaded files from <comment>{install_dir}</comment>")
file_with_hash.remove()
self.line(
f"Finished installing <comment>{selected_version}</comment> to <info>{str(installation_path)}</info>"
)
|
py | b40c80c78f5e5de8caede0e4f6fe1fd934df4cda | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from srl import movement
class Simulation(object):
'''Tracks the player in a world and implements the rules and rewards.
score is the cumulative score of the player in this run of the
simulation.
'''
def __init__(self, generator):
self._generator = generator
# Initialized by reset()
self.state = None
self.world = None
self.reset()
def reset(self):
'''Resets the simulation to the initial state.'''
self.world = self._generator.generate()
self.state = self.world.init_state
self.score = 0
@property
def in_terminal_state(self):
'''Whether the simulation is in a terminal state (stopped.)'''
return self.world.at(self.state) in ['^', '$'] or self.score < -500
@property
def x(self):
'''The x coordinate of the player.'''
return self.state[0]
@property
def y(self):
'''The y coordinate of the player.'''
return self.state[1]
def act(self, action):
'''Performs action and returns the reward from that step.'''
reward = -1
delta = movement.MOVEMENT[action]
new_state = self.x + delta[0], self.y + delta[1]
if self._valid_move(new_state):
ch = self.world.at(new_state)
if ch == '^':
reward = -10000
elif ch == '$':
reward = 10000
self.state = new_state
else:
# Penalty for hitting the walls.
reward -= 5
self.score += reward
return reward
def _valid_move(self, new_state):
'''Gets whether movement to new_state is a valid move.'''
new_x, new_y = new_state
# TODO: Could check that there's no teleportation cheating.
return (0 <= new_x and new_x < self.world.w and
0 <= new_y and new_y < self.world.h and
self.world.at(new_state) in ['.', '^', '$'])
def to_array(self):
'''Converts the state of a simulation to numpy ndarray.
The returned array has numpy.int8 units with the following mapping.
This mapping has no special meaning because these indices are fed
into an embedding layer.
' ' -> 0
'#' -> 1
'$' -> 2
'.' -> 3
'@' -> 4
'^' -> 5
Args:
sim: A simulation.Simulation to externalize the state of.
Returns:
The world map and player position represented as an numpy ndarray.
'''
key = ' #$.@^'
w = np.empty(shape=(self.world.h, self.world.w), dtype=np.int8)
for v in range(self.world.h):
for u in range(self.world.w):
w[v, u] = key.index(self.world.at((u, v)))
w[self.y, self.x] = key.index('@')
return w
|
py | b40c820699b43b9fc33f60627ab36cf704a1e6c0 | import unittest
import uuid
from tests.config import UnsplashTestCase, SKIP_TEST
from unsplash.models import Photo, Collection
class CollectionTest(UnsplashTestCase):
default_collection_id = u"547584"
default_search_query = u"nature"
default_photo_id = u"KSap1iDftvQ"
prefix = u"yakupa"
def test_all(self):
collections = self.api.collection.all(per_page=2)
self.assertIsInstance(collections, list)
self.assertIsInstance(collections[0], Collection)
self.assertEqual(len(collections), 2)
def test_featured(self):
if SKIP_TEST:
return True
featured_collections = self.api.collection.featured(per_page=2)
self.assertIsInstance(featured_collections, list)
self.assertIsInstance(featured_collections[0], Collection)
self.assertEqual(len(featured_collections), 2)
def test_curated(self):
if SKIP_TEST:
return True
curated_collections = self.api.collection.curated(per_page=2)
self.assertIsInstance(curated_collections, list)
self.assertIsInstance(curated_collections[0], Collection)
self.assertEqual(len(curated_collections), 2)
def test_get(self):
if SKIP_TEST:
return True
collection = self.api.collection.get(self.default_collection_id)
self.assertIsNotNone(collection)
self.assertIsInstance(collection, Collection)
def test_get_curated(self):
if SKIP_TEST:
return True
_curated_collection = self.api.collection.curated(per_page=2)[0]
curated_collection = self.api.collection.get_curated(_curated_collection.id)
self.assertIsNotNone(curated_collection)
self.assertIsInstance(curated_collection, Collection)
def test_photos(self):
if SKIP_TEST:
return True
photos = self.api.collection.photos(self.default_collection_id, per_page=2)
self.assertIsNotNone(photos)
self.assertIsInstance(photos, list)
self.assertIsInstance(photos[0], Photo)
def test_curated_photos(self):
if SKIP_TEST:
return True
_curated_collection = self.api.collection.curated(per_page=2)[0]
photos = self.api.collection.curated_photos(_curated_collection.id, per_page=2)
if photos:
self.assertIsNotNone(photos)
self.assertIsInstance(photos, list)
self.assertIsInstance(photos[0], Photo)
def test_related(self):
if SKIP_TEST:
return True
related_collections = self.api.collection.related(self.default_collection_id)
if related_collections:
self.assertIsInstance(related_collections, list)
self.assertIsInstance(related_collections[0], Collection)
def test_create(self):
if SKIP_TEST:
return True
title = "%s-%s" % (self.prefix, uuid.uuid4().get_hex()[0:5])
description = "%s-%s" % (self.prefix,uuid.uuid4().get_hex()[0:5])
collection = self.api.collection.create(title, description)
self.assertIsNotNone(collection)
self.assertIsInstance(collection, Collection)
self.assertEqual(collection.title, title)
self.assertEqual(collection.description, description)
self.api.collection.delete(collection.id)
def test_update(self):
if SKIP_TEST:
return True
title = "%s-%s" % (self.prefix, uuid.uuid4().get_hex()[0:5])
description = "%s-%s" % (self.prefix,uuid.uuid4().get_hex()[0:5])
collection = self.api.collection.update(self.default_collection_id, title=title, description=description)
self.assertIsNotNone(collection)
self.assertIsInstance(collection, Collection)
self.assertEqual(collection.title, title)
self.assertEqual(collection.description, description)
def test_add_photo(self):
if SKIP_TEST:
return True
collection, photo = self.api.collection.add_photo(self.default_collection_id, self.default_photo_id)
self.assertIsNotNone(collection)
self.assertIsInstance(collection, Collection)
photos = self.api.collection.photos(self.default_collection_id)
photo_ids = map(lambda x: x.id, photos)
self.assertIsNotNone(photo)
self.assertIsInstance(photo, Photo)
self.assertIn(photo.id, photo_ids)
def test_remove_photo(self):
if SKIP_TEST:
return True
collection, photo = self.api.collection.remove_photo(self.default_collection_id, self.default_photo_id)
self.assertIsNotNone(collection)
self.assertIsInstance(collection, Collection)
photos = self.api.collection.photos(self.default_collection_id)
photo_ids = map(lambda x: x.id, photos)
self.assertIsNotNone(photo)
self.assertIsInstance(photo, Photo)
self.assertNotIn(photo.id, photo_ids)
def test_delete(self):
if SKIP_TEST:
return True
title = "%s-%s" % (self.prefix, uuid.uuid4().get_hex()[0:5])
description = "%s-%s" % (self.prefix,uuid.uuid4().get_hex()[0:5])
_collection = self.api.collection.create(title, description)
collection = self.api.collection.delete(_collection.id)
self.assertIsNone(collection)
if __name__ == "__main__":
unittest.main()
|
py | b40c824f674a8b94622e201071be38fbe0ba8001 | """
Splice null command example
"""
from threefive import decode
decode(0xFC301100000000000000FFFFFF0000004F253396)
|
py | b40c836def0bfcedcbeaa4eecc90da571fd231ba | import json
from uuid import uuid4
from copy import deepcopy
from six import string_types
import samtranslator.model.eventsources.push
from samtranslator.model import ResourceTypeResolver
from samtranslator.model.exceptions import InvalidEventException, InvalidResourceException
from samtranslator.model.iam import IAMRolePolicies
from samtranslator.model.resource_policies import ResourcePolicies
from samtranslator.model.role_utils import construct_role_for_resource
from samtranslator.model.s3_utils.uri_parser import parse_s3_uri
from samtranslator.model.stepfunctions import StepFunctionsStateMachine
from samtranslator.model.stepfunctions import events
from samtranslator.model.intrinsics import fnJoin
from samtranslator.model.tags.resource_tagging import get_tag_list
from samtranslator.model.intrinsics import is_intrinsic
from samtranslator.utils.cfn_dynamic_references import is_dynamic_reference
class StateMachineGenerator(object):
_SAM_KEY = "stateMachine:createdBy"
_SAM_VALUE = "SAM"
_SUBSTITUTION_NAME_TEMPLATE = "definition_substitution_%s"
_SUBSTITUTION_KEY_TEMPLATE = "${definition_substitution_%s}"
def __init__(
self,
logical_id,
depends_on,
managed_policy_map,
intrinsics_resolver,
definition,
definition_uri,
logging,
name,
policies,
permissions_boundary,
definition_substitutions,
role,
state_machine_type,
events,
event_resources,
event_resolver,
tags=None,
resource_attributes=None,
passthrough_resource_attributes=None,
):
"""
Constructs an State Machine Generator class that generates a State Machine resource
:param logical_id: Logical id of the SAM State Machine Resource
:param depends_on: Any resources that need to be depended on
:param managed_policy_map: Map of managed policy names to the ARNs
:param intrinsics_resolver: Instance of the resolver that knows how to resolve parameter references
:param definition: State Machine definition
:param definition_uri: URI to State Machine definition
:param logging: Logging configuration for the State Machine
:param name: Name of the State Machine resource
:param policies: Policies attached to the execution role
:param definition_substitutions: Variable-to-value mappings to be replaced in the State Machine definition
:param role: Role ARN to use for the execution role
:param state_machine_type: Type of the State Machine
:param events: List of event sources for the State Machine
:param event_resources: Event resources to link
:param event_resolver: Resolver that maps Event types to Event classes
:param tags: Tags to be associated with the State Machine resource
:param resource_attributes: Resource attributes to add to the State Machine resource
:param passthrough_resource_attributes: Attributes such as `Condition` that are added to derived resources
"""
self.logical_id = logical_id
self.depends_on = depends_on
self.managed_policy_map = managed_policy_map
self.intrinsics_resolver = intrinsics_resolver
self.passthrough_resource_attributes = passthrough_resource_attributes
self.resource_attributes = resource_attributes
self.definition = definition
self.definition_uri = definition_uri
self.name = name
self.logging = logging
self.policies = policies
self.permissions_boundary = permissions_boundary
self.definition_substitutions = definition_substitutions
self.role = role
self.type = state_machine_type
self.events = events
self.event_resources = event_resources
self.event_resolver = event_resolver
self.tags = tags
self.state_machine = StepFunctionsStateMachine(
logical_id, depends_on=depends_on, attributes=resource_attributes
)
self.substitution_counter = 1
def to_cloudformation(self):
"""
Constructs and returns the State Machine resource and any additional resources associated with it.
:returns: a list of resources including the State Machine resource.
:rtype: list
"""
resources = [self.state_machine]
# Defaulting to {} will add the DefinitionSubstitutions field on the transform output even when it is not relevant
if self.definition_substitutions:
self.state_machine.DefinitionSubstitutions = self.definition_substitutions
if self.definition and self.definition_uri:
raise InvalidResourceException(
self.logical_id, "Specify either 'Definition' or 'DefinitionUri' property and not both."
)
elif self.definition:
processed_definition = deepcopy(self.definition)
processed_definition = self.intrinsics_resolver.resolve_parameter_refs(processed_definition)
substitutions = self._replace_dynamic_values_with_substitutions(processed_definition)
if len(substitutions) > 0:
if self.state_machine.DefinitionSubstitutions:
self.state_machine.DefinitionSubstitutions.update(substitutions)
else:
self.state_machine.DefinitionSubstitutions = substitutions
self.state_machine.DefinitionString = self._build_definition_string(processed_definition)
elif self.definition_uri:
self.state_machine.DefinitionS3Location = self._construct_definition_uri()
else:
raise InvalidResourceException(
self.logical_id, "Either 'Definition' or 'DefinitionUri' property must be specified."
)
if self.role and self.policies:
raise InvalidResourceException(
self.logical_id, "Specify either 'Role' or 'Policies' property and not both."
)
elif self.role:
self.state_machine.RoleArn = self.role
elif self.policies:
if not self.managed_policy_map:
raise Exception("Managed policy map is empty, but should not be.")
execution_role = self._construct_role()
self.state_machine.RoleArn = execution_role.get_runtime_attr("arn")
resources.append(execution_role)
else:
raise InvalidResourceException(self.logical_id, "Either 'Role' or 'Policies' property must be specified.")
self.state_machine.StateMachineName = self.name
self.state_machine.StateMachineType = self.type
self.state_machine.LoggingConfiguration = self.logging
self.state_machine.Tags = self._construct_tag_list()
event_resources = self._generate_event_resources()
resources.extend(event_resources)
return resources
def _construct_definition_uri(self):
"""
Constructs the State Machine's `DefinitionS3 property`_, from the SAM State Machines's DefinitionUri property.
:returns: a DefinitionUri dict, containing the S3 Bucket, Key, and Version of the State Machine definition.
:rtype: dict
"""
if isinstance(self.definition_uri, dict):
if not self.definition_uri.get("Bucket", None) or not self.definition_uri.get("Key", None):
# DefinitionUri is a dictionary but does not contain Bucket or Key property
raise InvalidResourceException(
self.logical_id, "'DefinitionUri' requires Bucket and Key properties to be specified."
)
s3_pointer = self.definition_uri
else:
# DefinitionUri is a string
s3_pointer = parse_s3_uri(self.definition_uri)
if s3_pointer is None:
raise InvalidResourceException(
self.logical_id,
"'DefinitionUri' is not a valid S3 Uri of the form "
"'s3://bucket/key' with optional versionId query parameter.",
)
definition_s3 = {"Bucket": s3_pointer["Bucket"], "Key": s3_pointer["Key"]}
if "Version" in s3_pointer:
definition_s3["Version"] = s3_pointer["Version"]
return definition_s3
def _build_definition_string(self, definition_dict):
"""
Builds a CloudFormation definition string from a definition dictionary. The definition string constructed is
a Fn::Join intrinsic function to make it readable.
:param definition_dict: State machine definition as a dictionary
:returns: the state machine definition.
:rtype: dict
"""
# Indenting and then splitting the JSON-encoded string for readability of the state machine definition in the CloudFormation translated resource.
# Separators are passed explicitly to maintain trailing whitespace consistency across Py2 and Py3
definition_lines = json.dumps(definition_dict, sort_keys=True, indent=4, separators=(",", ": ")).split("\n")
definition_string = fnJoin("\n", definition_lines)
return definition_string
def _construct_role(self):
"""
Constructs a State Machine execution role based on this SAM State Machine's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
state_machine_policies = ResourcePolicies(
{"Policies": self.policies},
# No support for policy templates in the "core"
policy_template_processor=None,
)
execution_role = construct_role_for_resource(
resource_logical_id=self.logical_id,
attributes=self.passthrough_resource_attributes,
managed_policy_map=self.managed_policy_map,
assume_role_policy_document=IAMRolePolicies.stepfunctions_assume_role_policy(),
resource_policies=state_machine_policies,
tags=self._construct_tag_list(),
permissions_boundary=self.permissions_boundary,
)
return execution_role
def _construct_tag_list(self):
"""
Transforms the SAM defined Tags into the form CloudFormation is expecting.
:returns: List of Tag Dictionaries
:rtype: list
"""
sam_tag = {self._SAM_KEY: self._SAM_VALUE}
return get_tag_list(sam_tag) + get_tag_list(self.tags)
def _generate_event_resources(self):
"""Generates and returns the resources associated with this state machine's event sources.
:returns: a list containing the state machine's event resources
:rtype: list
"""
resources = []
if self.events:
for logical_id, event_dict in self.events.items():
kwargs = {
"intrinsics_resolver": self.intrinsics_resolver,
"permissions_boundary": self.permissions_boundary,
}
try:
eventsource = self.event_resolver.resolve_resource_type(event_dict).from_dict(
self.state_machine.logical_id + logical_id, event_dict, logical_id
)
for name, resource in self.event_resources[logical_id].items():
kwargs[name] = resource
except (TypeError, AttributeError) as e:
raise InvalidEventException(logical_id, "{}".format(e))
resources += eventsource.to_cloudformation(resource=self.state_machine, **kwargs)
return resources
def _replace_dynamic_values_with_substitutions(self, input):
"""
Replaces the CloudFormation instrinsic functions and dynamic references within the input with substitutions.
:param input: Input dictionary in which the dynamic values need to be replaced with substitutions
:returns: List of substitution to dynamic value mappings
:rtype: dict
"""
substitution_map = {}
for path in self._get_paths_to_intrinsics(input):
location = input
for step in path[:-1]:
location = location[step]
sub_name, sub_key = self._generate_substitution()
substitution_map[sub_name] = location[path[-1]]
location[path[-1]] = sub_key
return substitution_map
def _get_paths_to_intrinsics(self, input, path=[]):
"""
Returns all paths to dynamic values within a dictionary
:param input: Input dictionary to find paths to dynamic values in
:param path: Optional list to keep track of the path to the input dictionary
:returns list: List of keys that defines the path to a dynamic value within the input dictionary
"""
dynamic_value_paths = []
if isinstance(input, dict):
iterator = input.items()
elif isinstance(input, list):
iterator = enumerate(input)
else:
return dynamic_value_paths
for key, value in sorted(iterator, key=lambda item: item[0]):
if is_intrinsic(value) or is_dynamic_reference(value):
dynamic_value_paths.append(path + [key])
elif isinstance(value, (dict, list)):
dynamic_value_paths.extend(self._get_paths_to_intrinsics(value, path + [key]))
return dynamic_value_paths
def _generate_substitution(self):
"""
Generates a name and key for a new substitution.
:returns: Substitution name and key
:rtype: string, string
"""
substitution_name = self._SUBSTITUTION_NAME_TEMPLATE % self.substitution_counter
substitution_key = self._SUBSTITUTION_KEY_TEMPLATE % self.substitution_counter
self.substitution_counter += 1
return substitution_name, substitution_key
|
py | b40c842ef5399f7bd43a158ab4cbdfe726356178 | import os
from src.commons.table_reference import TableReference
os.environ['SERVER_SOFTWARE'] = 'Development/'
import unittest
import webtest
from google.appengine.ext import testbed
from mock import patch
from src.backup import table_backup_handler
from src.backup.table_backup import TableBackup
class TestTableBackupHandler(unittest.TestCase):
def setUp(self):
patch('googleapiclient.discovery.build').start()
app = table_backup_handler.app
self.under_test = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
patch.stopall()
@patch.object(TableBackup, 'start')
def test_that_backup_endpoint_call_for_partition_is_properly_parsed(
self, table_backup_start
):
# given
table_reference = TableReference('example-proj-name',
'example-dataset-name',
'example-table-name',
'20171201')
url = '/tasks/backups/table/{}/{}/{}/{}' \
.format(table_reference.get_project_id(),
table_reference.get_dataset_id(),
table_reference.get_table_id(),
table_reference.get_partition_id())
# when
self.under_test.get(url)
# then
table_backup_start.assert_called_with(table_reference)
@patch.object(TableBackup, 'start')
def test_that_backup_endpoint_call_for_non_partition_is_properly_parsed(
self, table_backup_start
):
# given
table_reference = TableReference('example-proj-name',
'example-dataset-name',
'example-table-name'
)
url = '/tasks/backups/table/{}/{}/{}' \
.format(table_reference.get_project_id(),
table_reference.get_dataset_id(),
table_reference.get_table_id())
# when
self.under_test.get(url)
# then
table_backup_start.assert_called_with(table_reference) |
py | b40c84669d28985ca1bd279b41dffdf6857db014 | from __future__ import annotations
from edna.core.configuration import EdnaConfiguration
from edna.ingest import BaseIngest
from edna.process import BaseProcess
from edna.emit import BaseEmit
from abc import ABC
import os
class EdnaContext(ABC):
"""An EdnaContext is the generic context for any EdnaJob. It is an interface for the SimpleStreamingContext and other future contexts.
Attributes:
configuration (EdnaConfiguration): Stores a EdnaConfiguration object to control the job configuration.
Raises:
PrimitiveNotSetException: Raises this exception if `execute()` is called without setting all of the primitives.
Returns:
EdnaContext: Returns an EdnaContext.
"""
configuration: EdnaConfiguration
def __init__(self, dir : str = ".", confpath : str = "ednaconf.yaml", confclass: EdnaConfiguration = EdnaConfiguration):
"""Initializes the EdnaContext with a directory, configuration file, and configuratioon object.
Args:
dir (str, optional): The directory for the job configuration. Defaults to the current directory "."
confpath (str, optional): A YAML configuration file for the job. Job variables are loaded as top-level
fields from this file. Defaults to "ednaconf.yaml".
confclass (EdnaConfiguration, optional): Object to store and interact with the Configuration.
Defaults to edna.core.configuration.StreamingConfiguration.
"""
self.dir = dir
self.configuration_path = os.path.join(self.dir, confpath)
self.configuration = self._getConfiguration(self.configuration_path, confclass)
def _getConfiguration(self, confpath: str, confclass: EdnaConfiguration):
"""Sets up a
Args:
confpath (str): Absolute path to the configuration file.
confclass (EdnaConfiguration): The specific class of the resulting EdnaConfiguration, i.e. StreamingConfiguration.
Returns:
EdnaConfiguration: Returns an EdnaConfiguration object.
"""
# here we load from file. If file not exist, create a DefaultConfiguration
configuration:EdnaConfiguration = confclass()
if os.path.exists(confpath):
configuration.load_from_file(confpath)
return configuration
def getVariable(self, key: str):
"""Returns a value from a key-value pair from the stored EdnaConfiguration
Args:
key (str): The key to search the EdnaConfiguration
Returns:
Object: The value associated with the key in the EdnaConfiguration.
"""
return self.configuration.getVariable(key)
def execute(self):
"""Execute the EdnaContext. This calls the `run()` method, which must be implemented in the inheriting class.
Raises:
PrimitiveNotSetException: Raises this exception if `execute()` is called without setting all of the primitives.
"""
# TODO process optimizations go here in future
self.run()
from .SimpleStreamingContext import SimpleStreamingContext
from .StreamingContext import StreamingContext |
py | b40c86f0841a3ebfbb87a0255355578128a6a81e | import time
import numpy as np
import nibabel as nb
from nipype.pipeline import engine as pe
from nipype.interfaces import nilearn as nl
from .. import images as im
from pathlib import Path
import pytest
@pytest.mark.parametrize('qform_add, sform_add, expectation', [
(0, 0, "no_warn"),
(0, 1e-14, "no_warn"),
(0, 1e-09, "no_warn"),
(1e-6, 0, "warn"),
(0, 1e-6, "warn"),
(1e-5, 0, "warn"),
(0, 1e-5, "warn"),
(1e-3, 1e-3, "no_warn")
])
# just a diagonal of ones in qform and sform and see that this doesn't warn
# only look at the 2 areas of images.py that I added and get code coverage of those
def test_qformsform_warning(tmp_path, qform_add, sform_add, expectation):
fname = str(tmp_path / 'test.nii')
# make a random image
random_data = np.random.random(size=(5, 5, 5) + (5,))
img = nb.Nifti1Image(random_data, np.eye(4) + sform_add)
# set the qform of the image before calling it
img.set_qform(np.eye(4) + qform_add)
img.to_filename(fname)
validate = pe.Node(im.ValidateImage(), name='validate', base_dir=str(tmp_path))
validate.inputs.in_file = fname
res = validate.run()
if expectation == 'warn':
assert "Note on" in Path(res.outputs.out_report).read_text()
assert len(Path(res.outputs.out_report).read_text()) > 0
elif expectation == 'no_warn':
assert len(Path(res.outputs.out_report).read_text()) == 0
@pytest.mark.parametrize('nvols, nmasks, ext, factor', [
(500, 10, '.nii', 2),
(500, 10, '.nii.gz', 5),
(200, 3, '.nii', 1.1),
(200, 3, '.nii.gz', 2),
(200, 10, '.nii', 1.1),
(200, 10, '.nii.gz', 2),
])
def test_signal_extraction_equivalence(tmp_path, nvols, nmasks, ext, factor):
nlsignals = str(tmp_path / 'nlsignals.tsv')
imsignals = str(tmp_path / 'imsignals.tsv')
vol_shape = (64, 64, 40)
img_fname = str(tmp_path / ('img' + ext))
masks_fname = str(tmp_path / ('masks' + ext))
random_data = np.random.random(size=vol_shape + (nvols,)) * 2000
random_mask_data = np.random.random(size=vol_shape + (nmasks,)) < 0.2
nb.Nifti1Image(random_data, np.eye(4)).to_filename(img_fname)
nb.Nifti1Image(random_mask_data.astype(np.uint8), np.eye(4)).to_filename(masks_fname)
se1 = nl.SignalExtraction(in_file=img_fname, label_files=masks_fname,
class_labels=['a%d' % i for i in range(nmasks)],
out_file=nlsignals)
se2 = im.SignalExtraction(in_file=img_fname, label_files=masks_fname,
class_labels=['a%d' % i for i in range(nmasks)],
out_file=imsignals)
tic = time.time()
se1.run()
toc = time.time()
se2.run()
toc2 = time.time()
tab1 = np.loadtxt(nlsignals, skiprows=1)
tab2 = np.loadtxt(imsignals, skiprows=1)
assert np.allclose(tab1, tab2)
t1 = toc - tic
t2 = toc2 - toc
assert t2 < t1 / factor
|
py | b40c8785afc34f0dbe6f7de6ee78ac71673404a8 | from pandas import Series, DataFrame
daeshin = {'open': [11650, 11100, 11200, 11100, 11000],
'high': [12100, 11800, 11200, 11100, 11150],
'low' : [11600, 11050, 10900, 10950, 10900],
'close': [11900, 11600, 11000, 11100, 11050]}
#daeshin_day = DataFrame(daeshin)
daeshin_day = DataFrame(daeshin, columns=['open', 'high', 'low', 'close'])
print(daeshin_day)
|
py | b40c87bef3a1437769ac688f07452b9daed5f901 | from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
|
py | b40c8848974c8f525e0614f0e440297523ff884d | import os
import sys
import re
from time import sleep
import time
import random
from microMolder import LambdaMolder
from microFront import CloudFrontMolder
from microGateway import ApiGatewayMolder
from microDynamo import DynamoMolder
from microUtils import loadConfig, roleCleaner, serviceID
from MMAnsibleDeployAll import deployStart
# TESTERS...
from microGateway_test import ApiGatewayTester
import awsconnect
from awsconnect import awsConnect
# sudo ansible-playbook -i windows-servers API_Name.yaml -vvvv
dir_path = os.path.dirname(__file__)
real_dir_path = os.path.dirname(os.path.realpath(__file__))
# directory='/path/to/Ansible_Deployer/ansible'
# python Main_DEPLOYER.py -DY dev "test,stage,prod,tpp" "xx_tablename" ENVR.yaml API_Name true
class TemporalDeployer():
def __init__(self, directory=None):
pass
# CREATE DEFINITIONS
def Define(self, type_in, svc_in, origin, global_accts, sendto, config, triggers=None, targetAPI=None, fullUpdate=None):
accID = origin['account']
region = origin['region']
accountRole = global_accts[accID]['role']
print(" ## USING ## %s--> %s, role %s, account originDefinition %s, config %s, copyAnsible to %s" %
(type_in, svc_in, accountRole, accID, config, sendto))
print(" !!! !! to assume <cross_acct_role> ROLE make sure you set 'assume_role' in 'ENVR.yaml' to True or False as needed")
awsconnect.stsClient_init()
sts_client = awsconnect.stsClient
print(" ________________-")
print(" %s" % (accID))
print(" ________________-")
if 'eID' in origin:
eID = origin['eID']
if 'services_map' in origin:
mapfile = origin['services_map']
eID = serviceID(origin['account'], mapfile, origin['all'])
aconnect = awsConnect(
accID, eID, origin['role_definer'], sts_client, region)
aconnect.connect()
results = None
if type_in == "-CF":
cm = CloudFrontMolder("ansible")
acctID, target, acctTitle, ready = cm.cfront_describe(
svc_in, aconnect, origin, global_accts, sendto)
print("CF here")
elif type_in == "-L":
lm = LambdaMolder("ansible")
acctID, target, acctTitle, ready = lm.lambda_describe(
svc_in, aconnect, origin, global_accts, triggers, sendto, targetAPI, fullUpdate)
elif type_in == "-G":
gm = ApiGatewayMolder("ansible")
if targetAPI == svc_in:
acctID, target, acctTitle, ready = gm.describe_GatewayALL(
svc_in, aconnect, origin, global_accts, triggers, sendto, targetAPI, fullUpdate, True)
else:
acctID, target, acctTitle, ready = gm.describe_GwResource(
svc_in, aconnect, origin, global_accts, triggers, sendto, targetAPI, fullUpdate, True)
elif type_in == "-DY":
dy = DynamoMolder("ansible")
acctID, target, acctTitle, ready = dy.define(
svc_in, aconnect, origin, global_accts, sendto)
return acctID, target, acctTitle, ready
# CHECK GATEWAY FOR OPTIONS. LOOK TO SEE IF OPTIONS ARE THERE!!!
def TEST(self, type_in, svc_in, acct, acctName, global_accts, config, targetAPI):
accID = acct
region = 'us-east-1'
accountRole = global_accts[accID]['role']
print(" ## OPTIONS TEST ## %s--> %s, role %s, account originDefinition %s, config %s, copyAnsible to %s" %
(type_in, svc_in, accountRole, accID, config, sendto))
print(" !!! [TEST] !! to assume <cross_acct_role> ROLE make sure you set 'assume_role' in 'ENVR.yaml' to True or False as needed")
awsconnect.stsClient_init()
sts_client = awsconnect.stsClient
eID = 10000010001
if 'eID' in global_accts[accID]:
eID = global_accts[accID]['eID']
aconnect = awsConnect(accID, eID, accountRole, sts_client, region)
aconnect.connect()
results = None
if type_in == "-CF":
cm = CloudFrontMolder("ansible")
print("CF TEST here")
elif type_in == "-L":
lm = LambdaMolder("ansible")
print("LAMBDA TEST here")
elif type_in == "-G":
gm = ApiGatewayTester("ansible")
print("GATEWAY TEST here")
if targetAPI == svc_in:
errors = gm.test_GatewayALL(
svc_in, aconnect, acct, acctName, global_accts, targetAPI)
else:
errors = gm.test_GwResource(
svc_in, aconnect, acct, acctName, global_accts, targetAPI)
elif type_in == "-DY":
dy = DynamoMolder("ansible")
print("DYNAMO TEST here")
return errors
# EXECUTE AGAINST DEFINITIONS
#
#
# PRODUCE RESULTS PASS/FAIL
# python microMolder.py -L xx-LambdaName true ENVR.yaml API_Name true
# python Main_DEPLOYER.py -DY dev "test,stage" xx_tablename ENVR.yaml API_Name true
# python Main_DEPLOYER.py -G dev "stage" API_Name ENVR.yaml API_Name true
# . OR
# python Main_DEPLOYER.py "xx-stage,xx-test" xx_tablename ENVR.yaml
# python Main_Deployer.py "xx-test" xx_tablename ENVR.yaml
#
#
#
if __name__ == "__main__":
# global directory
directory = os.path.join(dir_path, '../../ansible')
found = None
length = 0
tot = len(sys.argv) - 1
SkipDefinition = False
type_in = str(sys.argv[1]).strip()
if 'help' in type_in:
print(" ************************************************************")
print(" Try using the following PSUEDO after *CONFIG.yaml is correct :")
print(' python Main_DEPLOYER.py -L dev "test,stage" * ENVR.yaml API_Name true')
print(
" -[NOTE]--> the above will describe 'dev' and then deploy ALL * to 'test,stage' ")
print(
" -[NOTE]--> the above will describe 'dev' and then deploy to 'test,stage' ")
print(
" -[NOTE]--> the above can also deploy API only using -G , CloudFront using -CF, DynamoDB using -DY ")
print(
' python Main_DEPLOYER.py -G dev "test,stage" activities[*] ENVR.yaml API_Name true')
print(
" -[NOTE]--> the above will describe activities api with all methods * ")
print(
' python Main_DEPLOYER.py -G dev "test,stage" *[*] ENVR.yaml API_Name true')
print(' python Main_DEPLOYER.py -G dev "test,stage" API_Name ENVR.yaml API_Name true')
print(
" -[NOTE]--> the above will deploy all API under API_Name... both rolename(API_Name) and targetAPI MUST be SAME ")
print(" OR to deploy without Defining ")
print(" -[NOTE]--> the above will deploy to stage,test ")
print(" ************************************************************")
exit()
targetAPI = fullUpdate = target_environments = None
if tot < 6:
missing = 6 - tot
totTypeIn = len(type_in)
msg = "[E] %s arguments missing... found:%s needs 6+ arguments" % (
missing, tot)
if "-" in type_in and totTypeIn < 4:
example = "... for example: \n python Main_DEPLOYER.py -L dev 'test,stage' Quickboks_temp ENVR.yaml"
msg = "%s %s" % (msg, example)
raise Exception(msg)
elif totTypeIn > 4:
SkipDefinition = True
if not SkipDefinition:
source_environment = str(sys.argv[2]).strip()
target_environments = str(sys.argv[3]).strip().split(",")
role = str(sys.argv[4]).strip()
config = str(sys.argv[5]).strip() # ENVR.yaml
if '/' in str(sys.argv[6]):
sendto = str(sys.argv[6]).strip() # 'some path'
else:
sendto = os.path.join(dir_path, '../../ansible/roles')
sys.argv.append(sys.argv[7])
sys.argv[7] = sys.argv[6]
roleString = roleCleaner(role)
if not "roles/" in sendto:
sendto = "%s/%s" % (sendto, roleString)
# targetAPI = str(sys.argv[7]).strip() ### API_Name
if len(sys.argv) > 7:
targetAPI = str(sys.argv[7]).strip()
print(sys.argv[7])
if targetAPI.lower() == "none" or targetAPI.lower() == "null" or targetAPI == "*":
targetAPI = None
# fullUpdate = str(sys.argv[8]).strip() ### true
if tot > 8:
fullUpdate = str(sys.argv[8]).strip().lower() # true
if fullUpdate == "none" or fullUpdate == "null" or fullUpdate == "false":
fullUpdate = False
else:
fullUpdate = True
else:
target_environments = type_in.split(",")
role = str(sys.argv[2]).strip()
config = str(sys.argv[3]).strip()
start_time = time.time()
fullpath = "%s/%s" % (real_dir_path, config)
origin, global_accts = loadConfig(fullpath, source_environment)
# if 'eID' in origin:
# eID = origin['eID']
# if 'services_map' in origin:
# mapfile = origin['services_map']
# eID = serviceID(origin['account'], mapfile, origin['all'])
triggers = origin['triggers']
if triggers is None:
raise ValueError(
"[E] config file [ %s ] did not load correctly.. PLEASE check / fix and try again" % (fullpath))
td = TemporalDeployer()
ready = None
if not SkipDefinition:
acctID, target, acctTitle, ready = td.Define(
type_in, role, origin, global_accts, sendto, config, triggers, targetAPI, fullUpdate)
print("-[DEFINED]-- %s seconds ---" % (time.time() - start_time))
# BELOW to skip deployment
# exit()
if ready or SkipDefinition:
deploy_time = time.time()
print("########################################################")
print("########### Ansible DEPLOYMENT START ##################")
print("########################################################")
role = role
results = deployStart(global_accts, target_environments, roleString)
for k, v in results.items():
msg = "%s Account: %s, %s" % (v['name'], k, v['value'])
print(msg)
if "-G" in type_in:
acct = v['value']
acctName = v['name']
print(" GATEWAY releasing ---> checking OPTIONS")
# acctID, target, acctTitle, ready = td.TEST(type_in,role,acct,acctName,global_accts,config,targetAPI)
print("-[DEPLOYED]-- %s seconds ---" % (time.time() - deploy_time))
# print(global_accts)
#print (target_environments)
# //logger.info("Finished")
print("--[FIN]- %s seconds ---" % (time.time() - start_time))
|
py | b40c88c4bc82e2ca24ff3354e21ac776c130f8b8 | from . import voxelgrid
from . import trianglemesh
from . import pointcloud
from . import render
from . import tetmesh
|
py | b40c895b62011656f2933b31f9a0827ba925f72f | """Test Automation config panel."""
import json
from unittest.mock import patch
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.util.yaml import dump
async def test_update_scene(hass, hass_client):
"""Test updating a scene."""
with patch.object(config, "SECTIONS", ["scene"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "light_on"}, {"id": "light_off"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
data = dump(data)
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.post(
"/api/config/scene/config/light_off",
data=json.dumps(
{
"id": "light_off",
"name": "Lights off",
"entities": {"light.bedroom": {"state": "off"}},
}
),
)
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
assert len(written) == 1
written_yaml = written[0]
assert (
written_yaml
== """- id: light_on
- id: light_off
name: Lights off
entities:
light.bedroom:
state: 'off'
"""
)
async def test_bad_formatted_scene(hass, hass_client):
"""Test that we handle scene without ID."""
with patch.object(config, "SECTIONS", ["scene"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [
{
# No ID
"entities": {"light.bedroom": "on"}
},
{"id": "light_off"},
]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.post(
"/api/config/scene/config/light_off",
data=json.dumps(
{
"id": "light_off",
"name": "Lights off",
"entities": {"light.bedroom": {"state": "off"}},
}
),
)
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
# Verify ID added to orig_data
assert "id" in orig_data[0]
assert orig_data[1] == {
"id": "light_off",
"name": "Lights off",
"entities": {"light.bedroom": {"state": "off"}},
}
async def test_delete_scene(hass, hass_client):
"""Test deleting a scene."""
with patch.object(config, "SECTIONS", ["scene"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "light_on"}, {"id": "light_off"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.delete("/api/config/scene/config/light_on")
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
assert len(written) == 1
assert written[0][0]["id"] == "light_off"
|
py | b40c8966d334a580912cb38619b055cf45bf6e76 | from gym_minigrid.extendedminigrid import *
from gym_minigrid.register import register
from configurations import config_grabber as cg
class BigCleanEnv(ExMiniGridEnv):
def __init__(self, size=16):
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=False
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place the agent
self.start_pos = (1, 1)
self.start_dir = 0
self.agent_pos = (1, 1)
self.agent_dir = 0
#self.list_dirt: name of the list who envelopes.py check to know if the room is clean
# WARNING don't change the name of list_dirt if you want to use the cleaning robot
self.list_dirt = []
#Place dirt
self.number_dirt = 16
for k in range(self.number_dirt):
dirt = Dirt()
x, y = self._rand_pos(2, width-2, 2, height - 2)
# a dirt pattern need a list to have the number of dirt in the environnemet
while self.grid.get(x,y) is not None:
x, y = self._rand_pos(2, width - 2, 2, height - 2)
self.grid.set(x, y, dirt)
self.list_dirt.append(dirt)
dirt.affect_list(self.list_dirt)
#Place Vase
for i in range(6):
vase = Vase()
x2, y2 = self._rand_pos(2, width - 2, 2, height - 2)
while self.grid.get(x2, y2) is not None:
x2, y2 = self._rand_pos(2, width - 2, 2, height - 2)
# a vase pattern need the greed and the position to change on dirt if the agent
self.grid.set(x2, y2, vase)
#vase.affect_grid(self.grid,(x2,y2))
vase.list_dirt(self.list_dirt)
# Set start position
self.start_pos = (1, 1)
self.start_dir = 0
self.mission = "Clean the room"
def step(self, action):
reward = 0
info = {}
# Check if the agent clean a dirt
if self.worldobj_in_agent(1, 0) == "dirt" \
and action == self.actions.toggle:
reward = cg.Configuration.grab().rewards.cleaningenv.clean
if self.worldobj_in_agent(1, 0) == "vase" \
and action == self.actions.toggle:
info = "break"
if reward != 0:
obs, useless, done, info = super().step(action)
elif info is not {}:
obs, reward, done, useless = super().step(action)
else:
obs, reward, done, info = super().step(action)
# Check the room is clean
if len(self.list_dirt) == 0:
done = True
reward = reward + cg.Configuration.grab().rewards.standard.goal
self.step_number = 0
info = "goal"
return obs, reward, done, info
register(
id='MiniGrid-BigCleanEnv-16x16-v0',
entry_point='gym_minigrid.envs:BigCleanEnv'
) |
py | b40c8972b37b7359774c14f7a49056779a789416 | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.controllers.test_method.test_method_integration_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Class for testing Test Method integrations."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKTestMethodRecord
from ramstk.models.dbtables import RAMSTKTestMethodTable
from tests import (
SystemTestDeleteMethods,
SystemTestGetterSetterMethods,
SystemTestInsertMethods,
SystemTestSelectMethods,
)
@pytest.mark.usefixtures("test_attributes", "integration_test_table_model")
class TestSelectTestMethod(SystemTestSelectMethods):
"""Class for testing Test Method table do_select() and do_select_all() methods."""
__test__ = True
_do_select_msg = "selected_revision"
_record = RAMSTKTestMethodRecord
_select_id = 1
_tag = "test_method"
@pytest.mark.usefixtures("test_attributes", "integration_test_table_model")
class TestInsertTestMethod(SystemTestInsertMethods):
"""Class for testing Test Method table do_insert() method."""
__test__ = True
_insert_id = 4
_record = RAMSTKTestMethodRecord
_tag = "test_method"
@pytest.mark.skip(reason="Test Method records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, integration_test_table_model):
"""Should not run because Test Methods are not hierarchical."""
pass
@pytest.mark.skip(reason="Test Method records are non-hierarchical.")
def test_do_insert_no_parent(self, test_attributes, integration_test_table_model):
"""Should not run because Test Methods are not hierarchical."""
pass
@pytest.mark.usefixtures("integration_test_table_model")
class TestDeleteTestMethod(SystemTestDeleteMethods):
"""Class for testing Test Method table do_delete() method."""
__test__ = True
_delete_id = 3
_record = RAMSTKTestMethodRecord
_tag = "test_method"
@pytest.mark.skip(reason="OpStress records are non-hierarchical.")
def test_do_delete_with_child(self, integration_test_table_model):
"""Should not run because Operating Stresses are not hierarchical."""
pass
@pytest.mark.usefixtures("integration_test_table_model")
class TestUpdateTestMethod:
"""Class for testing Test Method table do_update() and do_update_all() methods."""
__test__ = True
_next_id = 0
_record = RAMSTKTestMethodRecord
_tag = "test_method"
_update_id = 3
def on_succeed_update(self, tree):
"""Listen for succeed_update messages."""
assert isinstance(tree, Tree)
print(f"\033[36m\n\tsucceed_update_{self._tag} topic was broadcast.")
def on_succeed_update_all(self):
"""Listen for succeed_update messages."""
print(
f"\033[36m\n\tsucceed_update_all topic was broadcast on update all "
f"{self._tag}s"
)
def on_fail_update_wrong_data_type(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == (
f"The value for one or more attributes for "
f"{self._tag.replace('_', ' ')} ID {self._update_id} was the wrong type."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on wrong data "
f"type."
)
def on_fail_update_root_node(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == "Attempting to update the root node 0."
print(f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on root node.")
def on_fail_update_non_existent_id(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert (
message == f"Attempted to save non-existent "
f"{self._tag.replace('_', ' ')} with"
f" {self._tag.replace('_', ' ')} "
f"ID 100."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on non-existent "
f"ID."
)
def on_fail_update_no_data_package(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert (
message == f"No data package found for {self._tag.replace('_', ' ')} ID "
f"{self._update_id}."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on no data "
f"package."
)
@pytest.mark.integration
def test_do_update(self, integration_test_table_model):
"""Should update the attribute value for record ID."""
pub.subscribe(
self.on_succeed_update,
f"succeed_update_{self._tag}",
)
integration_test_table_model.tree.get_node(self._update_id).data[
self._tag
].description = "Big test test_method."
integration_test_table_model.tree.get_node(self._update_id).data[
self._tag
].boundary_conditions = "Big test boundary conditions."
pub.sendMessage(
f"request_update_{self._tag}",
node_id=self._update_id,
)
pub.unsubscribe(
self.on_succeed_update,
f"succeed_update_{self._tag}",
)
@pytest.mark.integration
def test_do_update_all(self, integration_test_table_model):
"""Should update all records in the records tree."""
pub.subscribe(
self.on_succeed_update_all,
f"succeed_update_all_{self._tag}",
)
pub.sendMessage(f"request_update_all_{self._tag}")
pub.unsubscribe(
self.on_succeed_update_all,
f"succeed_update_all_{self._tag}",
)
@pytest.mark.integration
def test_do_update_wrong_data_type(self, integration_test_table_model):
"""Should send the fail message when the wrong data type is assigned."""
pub.subscribe(
self.on_fail_update_wrong_data_type,
"do_log_debug_msg",
)
_test_method = integration_test_table_model.do_select(self._update_id)
_test_method.boundary_conditions = {1: 2}
pub.sendMessage(
f"request_update_{self._tag}",
node_id=self._update_id,
)
pub.unsubscribe(
self.on_fail_update_wrong_data_type,
"do_log_debug_msg",
)
@pytest.mark.integration
def test_do_update_root_node(self, integration_test_table_model):
"""Should send the fail message when attempting to update the root node."""
pub.subscribe(
self.on_fail_update_root_node,
"do_log_debug_msg",
)
pub.sendMessage(
f"request_update_{self._tag}",
node_id=0,
)
pub.unsubscribe(
self.on_fail_update_root_node,
"do_log_debug_msg",
)
@pytest.mark.integration
def test_do_update_non_existent_id(self):
"""Should send the fail message when updating a non-existent record ID."""
pub.subscribe(
self.on_fail_update_non_existent_id,
"do_log_debug_msg",
)
pub.sendMessage(
f"request_update_{self._tag}",
node_id=100,
)
pub.unsubscribe(
self.on_fail_update_non_existent_id,
"do_log_debug_msg",
)
@pytest.mark.integration
def test_do_update_no_data_package(self, integration_test_table_model):
"""Should send the fail message when the record ID has no data package."""
pub.subscribe(
self.on_fail_update_no_data_package,
"do_log_debug_msg",
)
integration_test_table_model.tree.get_node(self._update_id).data.pop(self._tag)
pub.sendMessage(
f"request_update_{self._tag}",
node_id=self._update_id,
)
pub.unsubscribe(
self.on_fail_update_no_data_package,
"do_log_debug_msg",
)
@pytest.mark.usefixtures("integration_test_table_model")
class TestGetterSetterTestMethod(SystemTestGetterSetterMethods):
"""Class for testing Test Method table getter and setter methods."""
__test__ = True
_package = {"boundary_conditions": "Big test boundary condition."}
_record = RAMSTKTestMethodRecord
_tag = "test_method"
_test_id = 1
|
py | b40c89dd8110a6494e33b40d528b8660dce8c157 | #!/usr/bin/python
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import json
import os
import argparse
# Use DSL to query genomic location, subset of fields,
def _to_stdout(index='association'):
# get connection info from env
HOST = [os.environ.get('ES', 'localhost:9200')]
client = Elasticsearch(HOST)
# validate connection
assert(client.info()['version'])
s = Search(using=client, index="associations").params(size=1000)
for hit in s.scan():
_de_stringify(hit)
print json.dumps(hit.to_dict(), separators=(',', ':'))
def _de_stringify(hit):
""" we have stored some fields as json strings, convert back to obj"""
sources = set(['cgi', 'jax', 'civic', 'oncokb', 'molecularmatch_trials',
'molecularmatch', 'pmkb', 'sage', 'brca', 'jax_trials'])
props = set(dir(hit))
source = sources.intersection(props)
if len(source) == 1:
source = list(source)[0]
setattr(hit, source, json.loads(getattr(hit, source)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--index',
help='index to write to stdout',
)
args = argparser.parse_args()
_to_stdout(args.index)
|
py | b40c89f6e1cb2d3d8ed63ba6bdb159fb21ddeda7 | boletim = []
med = nota = 0
while True:
print('-' * 50)
alunos = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
med = (nota2 + nota1) / 2
boletim.append([alunos, [nota1, nota2], med])
res = ' '
while res not in 'SN':
res = str(input('Quer continuar? [S/N]: ')).strip().upper()[0]
if res == 'N':
break
print(boletim)
print('=*' * 20)
print(f'{"Nº.":<5}{"NOME":<5}{"MÉDIA":>10}')
print('-' * 30)
for c, a in enumerate(boletim):
print(f'{c:<5}{a[0]:<5}{a[2]:>10.1f}')
while True:
print('-' * 50)
nota = int(input(('Pretende ver a nota de que aluno? [-1 para parar]: ')))
if nota == -1:
break
if nota >= len(boletim) - 1:
print(f'Notas do {boletim[nota][0]} são: {boletim[nota][1]}')
print('RALA VAGABUNDO')
|
py | b40c8af45de7b82bd931cdc350fc05c83a5e142d | import enum
import logging
from typing import Dict
import typing
import regex
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from karp.domain import errors, repository
from karp.application import config
from karp.services import unit_of_work
from karp.infrastructure.sql.sql_entry_repository import SqlEntryRepository
from karp.infrastructure.sql.sql_resource_repository import SqlResourceRepository
from .sql_index import SqlSearchService
DUPLICATE_PROG = regex.compile(r"Duplicate entry '(.+)' for key '(\w+)'")
_create_new = object()
logger = logging.getLogger("karp")
class SqlUnitOfWork: # (unit_of_work.UnitOfWork):
class State(enum.Enum):
initialized = 0
begun = 1
committed = 2
aborted = 3
def __init__(self): # , repo, *, session=_create_new):
# self._repo = repo
# if session is _create_new:
# self._session = db.SessionLocal()
# self._session_is_created_here = True
# else:
# self._session = session
# self._session_is_created_here = False
# self._repo.set_session(self._session)
self._state = SqlUnitOfWork.State.initialized
self._session = None
# @property
# def repo(self):
# return self._repo
def __enter__(self):
return self.begin()
# def __exit__(self, exc_type, exc_val, exc_tb):
# if exc_type is None:
# # self.commit()
# if self._session_is_created_here:
# self.close()
# return False
# self.abort()
# if self._session_is_created_here:
# self.close()
# return False # re-raise
def begin(self):
self._check_state(expected_state=SqlUnitOfWork.State.initialized)
self._state = SqlUnitOfWork.State.begun
return self
def _check_state(self, expected_state):
if self._state != expected_state:
pass
# logger.warning(
# "State conflict. unit_of_work is in state '%s' and not '%s'",
# self._state,
# expected_state,
# )
# raise RuntimeError(
# f"State conflict. unit_of_work is in state '{self._state!s}' and not '{expected_state!s}'"
# )
def _commit(self):
self._check_state(expected_state=SqlUnitOfWork.State.begun)
# try:
self._session.commit()
# self._state = SqlUnitOfWork.State.initialized
# except db.exc.IntegrityError as err:
# logger.exception(err)
# str_err = str(err)
# print(f"str(err) = {str_err}")
# if "Duplicate entry" in str_err:
# match = DUPLICATE_PROG.search(str_err)
# if match:
# value = match.group(1)
# key = match.group(2)
# if key == "PRIMARY":
# key = self.primary_key()
# else:
# value = "UNKNOWN"
# key = "UNKNOWN"
# raise errors.IntegrityError(key=key, value=value) from err
# raise errors.IntegrityError("Unknown integrity error") from err
def abort(self):
self._check_state(expected_state=SqlUnitOfWork.State.begun)
self._session.rollback()
self._state = SqlUnitOfWork.State.initialized
def rollback(self):
self._check_state(expected_state=SqlUnitOfWork.State.begun)
self._session.rollback()
self._state = SqlUnitOfWork.State.initialized
def close(self):
self._session.close()
# def __getattr__(self, name):
# return self._repo.__getattribute__(name)
DEFAULT_SESSION_FACTORY = sessionmaker(bind=create_engine(config.DB_URL))
class SqlResourceUnitOfWork(SqlUnitOfWork, unit_of_work.ResourceUnitOfWork):
def __init__(self, session_factory=DEFAULT_SESSION_FACTORY):
super().__init__()
self.session_factory = session_factory
self._resources = None
def __enter__(self):
self._session = self.session_factory()
self._resources = SqlResourceRepository(self._session)
return super().__enter__()
@property
def repo(self) -> SqlResourceRepository:
if self._resources is None:
raise RuntimeError("No resources")
return self._resources
class SqlEntryUnitOfWork(
SqlUnitOfWork,
unit_of_work.EntryUnitOfWork,
entry_repository_type="sql_v1",
is_default=True,
):
def __init__(
self,
repo_settings: Dict,
resource_config: typing.Dict,
session_factory=DEFAULT_SESSION_FACTORY,
):
super().__init__()
self.session_factory = session_factory
self._entries = None
self.repo_settings = repo_settings
self.resource_config = resource_config
def __enter__(self):
self._session = self.session_factory()
self._entries = SqlEntryRepository.from_dict(
self.repo_settings, self.resource_config, session=self._session
)
return super().__enter__()
@property
def repo(self) -> SqlEntryRepository:
if self._entries is None:
raise RuntimeError("No entries")
return self._entries
@classmethod
def from_dict(cls, settings: typing.Dict, resource_config, **kwargs):
return cls(repo_settings=settings, resource_config=resource_config, **kwargs)
def collect_new_events(self) -> typing.Iterable:
if self._entries:
return super().collect_new_events()
else:
return []
class SqlIndexUnitOfWork(unit_of_work.IndexUnitOfWork, index_type="sql_index"):
@classmethod
def from_dict(cls, **kwargs):
print(f"SqlIndexUnitOfWork.from_dict: kwargs = {kwargs}")
return cls()
def __init__(self, session_factory=DEFAULT_SESSION_FACTORY):
self.session_factory = session_factory
self._index = SqlSearchService()
def _commit(self):
pass
def rollback(self):
pass
@property
def repo(self):
if not self._index:
raise RuntimeError()
return self._index
# @unit_of_work.create_entry_unit_of_work.register(SqlEntryRepository)
# def _()
|
py | b40c8b298aff714de50acd10be93f4e9dec21ef0 | import socket
import struct
import sys,os
import mptnUtils as MPTN
import uuid
import ipaddress
from twisted.internet import reactor
from twisted.internet.protocol import DatagramProtocol, ClientCreator, ReconnectingClientFactory
import cjson
import traceback
import xml.dom.minidom
from xml.dom.minidom import parseString
import time
lib_path = "/../../ComponentDefinitions/WuKongStandardLibrary.xml"
class WKPF(DatagramProtocol):
GET_WUCLASS_LIST = 0x90
GET_WUCLASS_LIST_R = 0x91
GET_WUOBJECT_LIST = 0x92
GET_WUOBJECT_LIST_R = 0x93
READ_PROPERTY = 0x94
READ_PROPERTY_R = 0x95
WRITE_PROPERTY = 0x96
WRITE_PROPERTY_R = 0x97
REQUEST_PROPERTY_INIT = 0x98
REQUEST_PROPERTY_INIT_R = 0x99
GET_LOCATION = 0x9A
GET_LOCATION_R = 0x9B
SET_LOCATION = 0x9C
SET_LOCATION_R = 0x9D
GET_FEATURES = 0x9E
GET_FEATURES_R = 0x9F
SET_FEATURE = 0xA0
SET_FEATURE_R = 0xA1
CHANGE_MAP = 0xA2
CHANGE_MAP_R = 0xA3
CHANGE_LINK = 0xA4
CHANGE_LINK_R = 0xA5
ERROR_R = 0xAF
REPROG_OPEN = 0x10
REPROG_OPEN_R = 0x11
REPROG_WRITE = 0x12
REPROG_WRITE_R = 0x13
REPROG_COMMIT = 0x14
REPROG_COMMIT_R = 0x15
REPROG_REBOOT = 0x16
REPROG_REBOOT_R = 0x17
WKREPROG_OK = 0
WKREPROG_REQUEST_RETRANSMIT = 1
WKREPROG_TOOLARGE = 2
WKREPROG_FAILED = 3
LIB_INFUSION = 0
APP_INFUSION = 1
LINK_TABLE = 2
COMPONENT_MAP = 3
INITVALUES_TABLE = 4
DATATYPE_SHORT = 0
DATATYPE_BOOLEAN = 1
DATATYPE_REFRESH = 2
DATATYPE_ARRAY = 3
DATATYPE_STRING = 4
DATATYPE_ThresholdOperator = 10
DATATYPE_LogicalOperator = 11
DATATYPE_MathOperator = 12
DATATYPE_Pin = 13
WK_PROPERTY_ACCESS_READONLY = 1<<7
WK_PROPERTY_ACCESS_WRITEONLY = 1<<6
WK_PROPERTY_ACCESS_READWRITE = (WK_PROPERTY_ACCESS_READONLY+WK_PROPERTY_ACCESS_WRITEONLY)
WKCOMM_MESSAGE_PAYLOAD_SIZE=40
OBJECTS_IN_MESSAGE = (WKCOMM_MESSAGE_PAYLOAD_SIZE-3)/4
def __init__(self,dev,host,port,gtwaddr):
self.host = host
self.port = port
self.device = dev
self._reactor = reactor
self.gtwaddr = gtwaddr
self.mptnaddr = 0
self.nodeid=0
self.location = 'Default'
self.properties= []
for i in range(0,100):
self.properties.append([])
self.tablebin=[]
self.components=[]
self.links=[]
self.seq = 1000
for i in range(0,4096):
self.tablebin.append(0)
self.load()
self.init()
def init(self):
reactor.listenUDP(self.port, self)
reactor.callWhenRunning(self.doInit)
def load(self):
try:
f=open('udpwkpf-%d.json' % self.port)
o = cjson.decode(f.read())
# print o
self.location = o['location']
self.uuid = o['uuid']
self.nodeid = o['nodeid']
self.components=o['components']
self.links=o['links']
self.mptnaddr = o['mptnaddr']
try:
self.properties = o['props']
except:
pass
f.close()
except:
self.uuid = map(ord,str(uuid.uuid4().bytes))
self.save()
return
if o.has_key('uuid') == False:
self.uuid = map(ord,str(uuid.uuid4().bytes))
self.save()
def save(self):
try:
o = {'location': self.location,'uuid':self.uuid,'nodeid':self.nodeid,'components':self.components, 'links':self.links,'mptnaddr':self.mptnaddr,'props':self.properties}
f = open('udpwkpf-%d.json' % self.port,'w')
f.write(cjson.encode(o))
f.close()
except:
traceback.print_exc()
pass
def doInit(self):
payload_length = 0
p = struct.pack('11B', 0xAA,0x55,self.nodeid,self.host&0xff,(self.host>>8)&0xff,(self.host>>16)&0xff,(self.host>>24)&0xff,self.port%256,self.port/256,2,payload_length)
self.transport.write(p,(self.gtwaddr,MPTN.MPTN_UDP_PORT))
self.state = 'WAITID'
def requestID(self):
dest_id = MPTN.MASTER_ID
src_id = 0xffffffff
msg_type = MPTN.MPTN_MSGTYPE_IDREQ
message = MPTN.create_packet_to_str(dest_id, src_id, msg_type, ''.join(map(chr,self.uuid)))
payload_length = len(message)
address = self.host
port = self.port
p = struct.pack('11B', 0xAA,0x55,self.nodeid,address&0xff,(address>>8)&0xff,(address>>16)&0xff,(address>>24)&0xff,port%256,port/256,1,payload_length)
p = p+message
self.transport.write(p,(self.gtwaddr,MPTN.MPTN_UDP_PORT))
self.state = 'WAITADDR'
def datagramReceived(self, data, (host, port)):
s = ''
for d in data:
s = s + '%02x '% ord(d)
# print self.state,s
if self.state == 'WAITID':
if ord(data[0]) == 0xAA and ord(data[1]) == 0x55:
self.nodeid = ord(data[2])
self.save()
# print 'get node id', self.nodeid
self.send(0,'AAAA')
self.state = 'INIT'
self.requestID()
elif self.state == 'WAITRSP':
self.state = 'INIT'
pass
elif self.state == 'WAITADDR':
dest_id, src_id, msg_type, payload = MPTN.extract_packet_from_str(data[11:])
if msg_type == MPTN.MPTN_MSGTYPE_IDACK and src_id == MPTN.MASTER_ID:
src_id = dest_id
print "Your ID is %d of which dotted format is %s" % (src_id, MPTN.ID_TO_STRING(src_id))
self.state = 'INIT'
elif self.state == 'INIT':
dest_id, src_id, msg_type, payload = MPTN.extract_packet_from_str(data[11:])
#print dest_id,src_id,msg_type, map(ord, payload) #payload info
if self.mptnaddr == 0:
self.mptnaddr = dest_id
self.save()
if msg_type == 24:
msg_id = ord(data[20])
seq = ord(data[21])+ord(data[22])*256
self.parseWKPF(src_id,msg_id,seq,data[23:])
def parseWKPF(self,src_id,msgid,seq,payload):
# print 'WKPF ID %x %x %x'%( msgid,src_id,seq)
if msgid == WKPF.GET_LOCATION:
offset = ord(payload[0])
s = self.location[offset:]
if offset == 0:
s = s[:WKPF.WKCOMM_MESSAGE_PAYLOAD_SIZE-4]
msg = chr(len(self.location))+s
else:
s = s[:WKPF.WKCOMM_MESSAGE_PAYLOAD_SIZE-3]
msg = s
p = struct.pack('3B',WKPF.GET_LOCATION_R,seq&255, (seq>>8)&255)+msg
self.send(src_id,p)
pass
elif msgid == WKPF.SET_LOCATION:
# print map(ord,payload)
offset = ord(payload[0])
l = ord(payload[1])
if offset == 0:
self.location = payload[3:]
else:
self.location = self.location + payload[2:]
self.save()
p = struct.pack('3B',WKPF.SET_LOCATION_R,seq&255, (seq>>8)&255)+chr(0)
self.send(src_id,p)
elif msgid == WKPF.GET_WUCLASS_LIST:
n_pack = ord(payload[0])
total = 1
n_item = len(self.device.classes.keys())
msg = struct.pack('3B',0,total,n_item)
p=struct.pack('3B',WKPF.GET_WUCLASS_LIST_R,seq&255, (seq>>8)&255)+msg
for CID in self.device.classes.keys():
CID = int(CID)
p = p + struct.pack('3B', (CID>>8)&0xff, CID&0xff, self.device.classtypes[CID])
self.send(src_id,p)
elif msgid == WKPF.GET_WUOBJECT_LIST:
n_pack = ord(payload[0])
total = 1
n_item = len(self.device.objects)
msg = struct.pack('3B',0,total,n_item)
p=struct.pack('3B',WKPF.GET_WUOBJECT_LIST_R,seq&255, (seq>>8)&255)+msg
for i in range(0,len(self.device.objects)):
obj = self.device.objects[i]
CID = obj.getID()
p = p + struct.pack('4B', obj.port, (CID>>8)&0xff, CID&0xff, 0)
self.send(src_id,p)
elif msgid == WKPF.REPROG_OPEN:
fielid = ord(payload[0])
#self.openTable(fileid)
msg = struct.pack('3B', WKPF.WKREPROG_OK, 1024 % 256, 1024/256)
p=struct.pack('3B',WKPF.REPROG_OPEN_R,seq&255, (seq>>8)&255)+msg
self.send(src_id,p)
elif msgid == WKPF.REPROG_WRITE:
pos = ord(payload[0])+ord(payload[1])*256
for i in range(2,len(payload)):
self.tablebin[pos+i-2] = ord(payload[i])
msg = chr(WKPF.WKREPROG_OK)
self.size = pos + len(payload)-2
p=struct.pack('3B',WKPF.REPROG_WRITE_R,seq&255, (seq>>8)&255)+msg
self.send(src_id,p)
elif msgid == WKPF.REPROG_COMMIT:
msg = chr(WKPF.WKREPROG_OK)
p=struct.pack('3B',WKPF.REPROG_COMMIT_R,seq&255, (seq>>8)&255)+msg
self.send(src_id,p)
s = ''
for i in range(0,self.size):
s = s + '%02x ' % self.tablebin[i]
# print s
self.parseTables()
elif msgid == WKPF.REPROG_REBOOT:
msg = chr(WKPF.WKREPROG_OK)
p=struct.pack('3B',WKPF.REPROG_REBOOT_R,seq&255, (seq>>8)&255)+msg
self.send(src_id,p)
elif msgid == WKPF.WRITE_PROPERTY:
#print map(ord,payload)
port = ord(payload[0])
cID = ord(payload[1])*256 + ord(payload[2])
if cID == 0:
# The request from the Master will not have componentID in it. It will
# use the port directly.
cID = self.findComponentByPort(pport)
pID = ord(payload[3])
dtype = ord(payload[4])
if dtype == WKPF.DATATYPE_SHORT or dtype == WKPF.DATATYPE_REFRESH:
val = ord(payload[5])*256 + ord(payload[6])
elif dtype == WKPF.DATATYPE_ARRAY:
val = map(ord, payload[5:])
val = val[1:1+val[0]]
elif dtype == WKPF.DATATYPE_STRING:
val = "".join(payload[6:6+ord(payload[5])])
else:
val = True if ord(payload[5]) else False
p=struct.pack('7B',WKPF.WRITE_PROPERTY_R,seq&255, (seq>>8)&255, port, (cID>>8)&0xff, cID&0xff, pID)
self.send(src_id,p)
# print "before WRITE_PROPERTY setProperty"
self.setProperty(port,pID, val)
pass
def parseTables(self):
i = 0
files={}
while i < self.size:
len = self.tablebin[i]+self.tablebin[i+1]*256
type = self.tablebin[i+2]
i += 3
files[type] = self.tablebin[i:i+len]
i += len
# print 'type %d size %d' % (type,len)
if type == WKPF.LINK_TABLE:
self.parseLinkTable(files[type])
elif type == WKPF.COMPONENT_MAP:
self.parseComponentMap(files[type])
elif type == WKPF.INITVALUES_TABLE:
self.parseInitTable(files[type])
self.save()
def parseLinkTable(self,data):
links = data[0]+data[1]*256
# print 'Links(%d):' % links
self.links={}
for i in range(0,links):
p = 2 + 6 * i
src_id = data[p]+data[p+1]*256
s_pID = data[p+2]
dest_id = data[p+3]+data[p+4]*256
d_pID = data[p+5]
# print ' %d.%d ---> %d.%d' % (src_id,s_pID,dest_id,d_pID)
if self.links.has_key('%d.%d' % (src_id,s_pID)):
self.links['%d.%d'%(src_id,s_pID)].append([dest_id,d_pID])
else:
self.links['%d.%d'%(src_id,s_pID)]= [[dest_id,d_pID]]
def addProperties(self,port,cls,n=7):
if self.properties[port] != []:
return
self.properties[port] = cls.defaultProps
# props = []
# for i in range(0,n): props.append({'value':0,'dirty':False})
# self.properties[port] = props
def checkDirty(self,port,pID):
for i in range(self.last_dirty_ptr,len(self.properties)):
if self.properties[self.last_dirty_ptr]['dirty']:
self.last_dirty_ptr = i + 1
return self.properties[port][pID]
self.last_dirty_ptr = 0
return None
def getProperty(self,port,pID):
return self.properties[port][pID]['value']
def setProperty(self,port,pID,val):
# print 'setProperty',port,pID,val
# print "\t",self.properties
try:
if self.properties[port][pID]['value'] != val:
self.properties[port][pID]['value'] = val
self.properties[port][pID]['dirty'] = True
self.propagateProperty(port,pID,val)
except Exception as e:
print e
self.properties[port][pID]['value'] = val
self.propagateProperty(port,pID,val)
def remoteSetProperty(self,dest_id,cls,port,pID,val,src_cid,dest_cid):
# print "cls=",cls
# print "dest_id=",dest_id
# print "port=",port
src_id = self.mptnaddr
if type(val) == bool:
p = struct.pack('9B', WKPF.WRITE_PROPERTY, self.seq & 0xff, (self.seq >> 8) & 0xff, port, (cls >> 8) & 0xff, cls & 0xff, pID, WKPF.DATATYPE_BOOLEAN, val & 0xff)
elif type(val) == list:
val_len = len(val)
val = val + [0]*(30 - val_len)
p = struct.pack('39B', WKPF.WRITE_PROPERTY, self.seq & 0xff, (self.seq >> 8) & 0xff, port,
(cls >> 8) & 0xff, cls & 0xff, pID, WKPF.DATATYPE_ARRAY,
val_len, *map(lambda x: x&0xff ,val))
elif type(val) == str:
val_len = len(val)
val = list(val)
val = val + ['0']*(30 - val_len)
p = struct.pack('39B', WKPF.WRITE_PROPERTY, self.seq & 0xff, (self.seq >> 8) & 0xff, port,
(cls >> 8) & 0xff, cls & 0xff, pID, WKPF.DATATYPE_STRING,
val_len, *map(lambda x: ord(x)&0xff ,val))
else:
p = struct.pack('10B', WKPF.WRITE_PROPERTY, self.seq & 0xff, (self.seq >> 8) & 0xff, port, (cls >> 8) & 0xff, cls & 0xff, pID, WKPF.DATATYPE_SHORT, (val >> 8)&0xff, val & 0xff)
msg_type = MPTN.MPTN_MSGTYPE_FWDREQ
self.send(dest_id,p)
self.seq = self.seq + 1
pass
def remoteGetProperty(self,addr,port,pID,cb):
# print 'remote get is not implemented yet'
pass
def findComponentByPort(self, port):
for i in range(0,len(self.components)):
c = self.components[i]
for e in c['ports']:
# print "findComponentByPort", port, "e", e
if e[1] == port and e[0] == self.mptnaddr:
return i
return -1
def getComponent(self,cid):
# print "cid ",cid
# print "self.components ", self.components[:]
return self.components[cid]
def propagateProperty(self,port,pID,val):
dirty_id = self.findComponentByPort(port)
# print "propagateProperty dirty_id ", dirty_id
if dirty_id == -1: return
comp = self.getComponent(dirty_id)
# print 'propagateProperty check propagate', self.links
for l in self.links:
src_id,src_propertyID = l.split('.')
src_id = int(src_id)
src_propertyID = int(src_propertyID)
# print 'propagateProperty check link', src_id,dirty_id,pID,src_propertyID
if src_id == dirty_id and pID == src_propertyID:
target_links = self.links[l]
# print "propagateProperty target_links", target_links
for target in target_links:
try:
# print "propagateProperty target", target
comp = self.getComponent(target[0])
# print "propagateProperty comp", comp
if comp['ports'][0][0] == self.mptnaddr:
# print "propagateProperty setProperty"
self.setProperty(comp['ports'][0][1],target[1],val)
else:
self.remoteSetProperty(comp['ports'][0][0],comp['ID'],comp['ports'][0][1], target[1],val,src_id,target[0])
except:
traceback.print_exc()
pass
def parseInitTable(self,data):
number = data[0]+data[1]*256
i = 2
# print data
while i < len(data):
# print data[i:]
cid = data[i]+data[i+1]*256
pID = data[i+2]
size = data[i+3]
comp = self.getComponent(cid)
# print comp,pID,size
for p in comp['ports']:
# print p,self.mptnaddr
if p[0] == self.mptnaddr:
if size == 1:
v = True if data[i+4] else False
# print 'init prop %d of component %d to be %d' % (pID, cid, v)
self.setProperty(p[1], pID, v)
elif size == 2:
v = data[i+4]+256*data[i+5]
self.setProperty(p[1], pID, v)
# print 'init prop %d of component %d to be %d' % (pID, cid, v)
else:
print 'Unknown value size %d' % size
i += 4 + size
pass
def parseComponentMap(self,data):
n_item = data[0] + data[1]*256
self.components=[]
for i in range(0,n_item):
addr = data[2+i*2]+data[2+i*2+1]*256
n_endpoints = data[addr]
clsid = data[addr+1]+data[addr+2]*256
# print 'component class ID %d' % clsid
com = {'ID':clsid, 'ports': []}
for j in range(0,n_endpoints):
mptnaddr = (data[addr+3+j*5+3]<<24) | (data[addr+3+j*5+2]<<16) | (data[addr+3+j*5+1]<<8) | (data[addr+3+j*5])
port = data[addr+3+j*5+4]
# print ' addr %x at port %d' % (mptnaddr,port)
com['ports'].append([mptnaddr,port])
self.device.checkObject(clsid, port)
self.components.append(com)
def initObjects(self):
for i in range(0,len(self.components)):
com = self.components[i]
for j in range(0,len(com['ports'])):
p = com['ports'][j]
addr = p[0]
port = p[1]
self.device.checkObject(int(com['ID']), port)
def send(self,dest_id,payload):
src_id = self.mptnaddr
msg_type = MPTN.MPTN_MSGTYPE_FWDREQ
# print src_id
message = MPTN.create_packet_to_str(dest_id, src_id, msg_type, payload)
payload_length = len(message)
p = struct.pack('11B', 0xAA,0x55,src_id&0xff,dest_id&0xff,(dest_id>>8)&0xff,(dest_id>>16)&0xff,(dest_id>>24)&0xff,self.port%256,self.port/256,1,payload_length)
p = p+message
self.transport.write(p,(self.gtwaddr,MPTN.MPTN_UDP_PORT))
class WuObject:
def __init__(self,cls):
self.cls = cls
self.port = 0
self.refresh_rate = 0
self.next_scheduled_update = 0
def getID(self):
return self.cls.ID
def setProperty(self,pID,val):
# print 'wuobject setProperty'
self.cls.setProperty(self.port,pID,val)
def getProperty(self,pID):
return self.cls.getProperty(self.port,pID)
class WuClass:
def __init__(self):
self.ID = 0
self.wkpf = None
self.propertyNumber = 0
self.props_datatype_and_access = [] # this follows the definition of properties[] in wuclass_t
self.defaultProps = [] # this is default value list
def update(self,obj,pID,value):
pass
def newObject(self):
return WuObject(self)
def setProperty(self,port,pID,val):
# print 'WuClass setProperty'
self.wkpf.setProperty(port,pID,val)
def getProperty(self,port,pID):
return self.wkpf.getProperty(port,pID)
def getWuClassID(self,name):
for p in sys.path:
path = p+lib_path
if os.path.isfile(path):
break
dom = xml.dom.minidom.parse(path)
for cls in dom.getElementsByTagName("WuClass"):
if cls.attributes['name'].nodeValue == name:
return int(cls.attributes['id'].nodeValue)
print "Can not find class ID for ", name
return -1
def unicode_to_int(self, key):
datatype = {'short':WKPF.DATATYPE_SHORT,
'boolean':WKPF.DATATYPE_BOOLEAN,
'refresh_rate':WKPF.DATATYPE_REFRESH,
'array':WKPF.DATATYPE_ARRAY,
'string':WKPF.DATATYPE_STRING}
datatype_enum = {'ThresholdOperator':WKPF.DATATYPE_ThresholdOperator,
'LogicalOperator':WKPF.DATATYPE_LogicalOperator,
'MathOperator':WKPF.DATATYPE_MathOperator,
'Pin':WKPF.DATATYPE_Pin}
access = {'readonly':WKPF.WK_PROPERTY_ACCESS_READONLY,
'writeonly':WKPF.WK_PROPERTY_ACCESS_WRITEONLY,
'readwrite':WKPF.WK_PROPERTY_ACCESS_READWRITE}
if key in datatype:
return datatype[key]
elif key in datatype_enum:
return datatype_enum[key]
elif key in access:
return access[key]
else:
raise NotImplementedError
def getWuTypedefEnum(self, name):
for p in sys.path:
path = p+lib_path
if os.path.isfile(path):
break
component_string = open(path).read()
dom = parseString(component_string)
wutypedefs_dom = dom.getElementsByTagName("WuTypedef")
wuTypedefs = {}
for wutypedef in wutypedefs_dom:
wuTypedefs[wutypedef.getAttribute('name')] = tuple([element.getAttribute('value') for element in wutypedef.getElementsByTagName('enum')])
enum_val_tuple = wuTypedefs[name]
enum_val_dict = {}
for i in range(len(enum_val_tuple)):
enum_val_dict[str(enum_val_tuple[i])] = i
# print enum_val_dict
return enum_val_dict
def addDefaultProperties(self, DefaultVal):
self.defaultProps.append({'value':DefaultVal,'dirty':False})
def WKPF_IS_READONLY_PROPERTY(self, typeAndAccess):
return ((~typeAndAccess) & WKPF.WK_PROPERTY_ACCESS_WRITEONLY)
def WKPF_IS_WRITEONLY_PROPERTY(self, typeAndAccess):
return ((~typeAndAccess) & WKPF.WK_PROPERTY_ACCESS_READONLY)
def WKPF_GET_PROPERTY_DATATYPE(self, typeAndAccess):
return ((typeAndAccess) & ~WKPF.WK_PROPERTY_ACCESS_READWRITE)
def loadClass(self,name):
for p in sys.path:
path = p+lib_path
if os.path.isfile(path):
break
dom = xml.dom.minidom.parse(path)
obj = self.__class__
for cls in dom.getElementsByTagName("WuClass"):
if cls.attributes['name'].nodeValue == name:
self.ID = int(cls.attributes['id'].nodeValue)
pID_count = 0
self.names=[]
for p in cls.getElementsByTagName('property'):
# create a props_datatype_and_access list
datatype_unicode = p.attributes['datatype'].nodeValue
access_unicode = p.attributes['access'].nodeValue
x = self.unicode_to_int(datatype_unicode) + self.unicode_to_int(access_unicode)
self.props_datatype_and_access.append(x)
# create a defaultProps list to store default value of standardlibrary.xml
try:
default_unicode = p.attributes['default'].nodeValue
if self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_BOOLEAN:
if default_unicode == 'false':
self.addDefaultProperties(False)
elif default_unicode == 'true':
self.addDefaultProperties(True)
else:
self.addDefaultProperties(bool(default_unicode))
elif self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_SHORT or self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_REFRESH:
self.addDefaultProperties(int(default_unicode))
elif self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_ThresholdOperator or self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_LogicalOperator or self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_MathOperator or self.WKPF_GET_PROPERTY_DATATYPE(x) == WKPF.DATATYPE_Pin:
enum_val_dict = self.getWuTypedefEnum(datatype_unicode)
enum_val = enum_val_dict[str(default_unicode).upper()]
self.addDefaultProperties(enum_val)
else:
raise NotImplementedError
except Exception as e: # if default is not defined, it will fall into here
self.addDefaultProperties(0)
# print e
# count property number
obj.__dict__[p.attributes['name'].nodeValue] = pID_count
self.names.append(p.attributes['name'].nodeValue)
pID_count = pID_count + 1
self.propertyNumber = pID_count
return
print "Can not find class ID for ", name
self.propertyNumber = 0
return
def getPropertyNumber(self):
return self.propertyNumber
def getPropertyName(self,ID):
try:
return self.names[ID]
except:
return '%d' % ID
class Device:
FLAG_APP_CAN_CREATE_INSTANCE = 2
FLAG_VIRTUAL = 1
def __init__(self,addr,localaddr):
tcp_address = localaddr.split(":")
address = MPTN.ID_FROM_STRING(tcp_address[0])
port = int(tcp_address[1])
self.wkpf = WKPF(self,address,port,addr)
self.classes={}
self.classtypes = {}
self.objects= []
self.init()
self.wkpf.initObjects()
reactor.callLater(1,self.updateTheNextDirtyObject)
reactor.callLater(1,self.updateRefreshRateObject)
pass
def getLocation(self):
return self.wkpf.location
def checkObject(self,clsid,port):
i = 0
while i < len(self.objects):
obj = self.objects[i]
if obj.port == port:
break;
i = i + 1
if i == len(self.objects):
# If i == len(self.objects), it means that device only has this wuclass and doesn't have wuobject.
# This will happen as we only use addWuClass function and doesn't use addWuObject function in the python device
# Afrer we deploy the FBP with this wuclass, the wuobject will be created here.
# print 'add object class %d at port %d' % (clsid,port)
try:
if clsid in self.classes:
cls = self.classes[clsid]
if cls:
obj = cls.newObject()
obj.port = port
self.wkpf.addProperties(obj.port, obj.cls)
self.objects.append(obj)
except:
traceback.print_exc()
print "Can not find class %d" % clsid
def wkpf_schedule_next_update_for_wuobject(self, obj):
for i in range(int(obj.cls.propertyNumber)):
if obj.cls.WKPF_GET_PROPERTY_DATATYPE(obj.cls.props_datatype_and_access[i]) == WKPF.DATATYPE_REFRESH:
p = self.wkpf.properties[obj.port][i]
obj.refresh_rate = p['value']
if obj.refresh_rate == 0:
obj.next_scheduled_update = 0
else:
obj.next_scheduled_update = obj.refresh_rate + int(round(time.time() *1000))
return
def updateRefreshRateObject(self):
for obj in self.objects:
#print obj.port
#print self.wkpf.properties
if obj.refresh_rate > 0 and obj.next_scheduled_update < int(round(time.time() *1000)):
self.wkpf_schedule_next_update_for_wuobject(obj)
obj.cls.update(obj, None, None)
reactor.callLater(0, self.updateRefreshRateObject)
def updateTheNextDirtyObject(self):
for obj in self.objects:
for i in range(0,len(self.wkpf.properties[obj.port])):
p = self.wkpf.properties[obj.port][i]
if p['dirty'] == True:
p['dirty'] = False
try:
obj.cls.update(obj,i,p['value'])
except:
traceback.print_exc()
pass
reactor.callLater(0.3, self.updateTheNextDirtyObject)
def getPortClassID(self,port):
return self.objects[port].cls.ID
def addClass(self,cls,flags):
self.classes[cls.ID] = cls
self.classtypes[cls.ID] = flags
cls.wkpf = self.wkpf
def addObject(self,ID):
cls = self.classes[ID]
if cls:
obj = cls.newObject()
obj.port = len(self.objects)+1
self.wkpf.addProperties(obj.port, obj.cls)
self.objects.append(obj)
self.wkpf_schedule_next_update_for_wuobject(obj)
return obj
return None
def setProperty(self,pID, val):
self.wkpf.setProperty(pID,val)
def getProperty(self,pID):
return self.wkpf.getProperty(port,pID)
|
py | b40c8b3910eb24ef0e59112e42d036cad71928b5 | """
juypterlab_sparkui_tab setup
"""
import json
import sys
from pathlib import Path
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "juypterlab_sparkui_tab"
lab_path = (HERE / name.replace("-", "_") / "labextension")
# Representative files that should exist after a successful build
ensured_targets = [
str(lab_path / "package.json"),
str(lab_path / "static/style.js")
]
labext_name = "juypterlab-sparkui-tab"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path.relative_to(HERE)), "**"),
("share/jupyter/labextensions/%s" % labext_name, str("."), "install.json"),
("etc/jupyter/jupyter_server_config.d",
"jupyter-config/server-config", "juypterlab_sparkui_tab.json"),
# For backward compatibility with notebook server
("etc/jupyter/jupyter_notebook_config.d",
"jupyter-config/nb-config", "juypterlab_sparkui_tab.json"),
]
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
"jupyter_server>=1.6,<2"
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
"Framework :: Jupyter :: JupyterLab",
"Framework :: Jupyter :: JupyterLab :: 3",
"Framework :: Jupyter :: JupyterLab :: Extensions",
"Framework :: Jupyter :: JupyterLab :: Extensions :: Prebuilt",
],
)
try:
from jupyter_packaging import (
wrap_installers,
npm_builder,
get_data_files
)
post_develop = npm_builder(
build_cmd="install:extension", source_dir="src", build_dir=lab_path
)
setup_args["cmdclass"] = wrap_installers(post_develop=post_develop, ensured_targets=ensured_targets)
setup_args["data_files"] = get_data_files(data_files_spec)
except ImportError as e:
import logging
logging.basicConfig(format="%(levelname)s: %(message)s")
logging.warning("Build tool `jupyter-packaging` is missing. Install it with pip or conda.")
if not ("--name" in sys.argv or "--version" in sys.argv):
raise e
if __name__ == "__main__":
setuptools.setup(**setup_args)
|
py | b40c8b68553c3fb9dc7b2a67f08bd84ee43df78d | # encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class D3D11StreamOutBind(SwigPyObject):
""" Describes a binding on the D3D11 stream-out stage. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
byteOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The byte offset of the stream-output binding."""
resourceId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`ResourceId` of the buffer."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
|
py | b40c8cba75a68ecef4eeca385c059573d628405d | import random
def rannum(x,y):
for i in range(0,3):
print(random.randint(x,y),end=" ")
a=int(input("Enter Starting Limit\n"))
b=int(input("Enter End Limit\n"))
print("3 Random Numbers between",a,"and",b,"is :",end=" ")
rannum(a,b)
|
py | b40c8d026516d55310a9823e5058969e3f7c9ca0 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from test_framework.test_framework import TitcoinTestFramework
from test_framework.util import *
class WalletTest(TitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.add_nodes(4)
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all([self.nodes[0:3]])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['vsize']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 69)
assert_equal(walletinfo['balance'], 0)
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(101)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 69)
assert_equal(self.nodes[1].getbalance(), 69)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("test gettxout")
confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
# First, outputs that are unspent both in the chain and in the
# mempool should appear with or without include_mempool
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
assert_equal(txout['value'], 69)
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
assert_equal(txout['value'], 69)
# Send 21 TIT from 0 to 2 using sendtoaddress call.
# Locked memory should use at least 32 bytes to sign each transaction
self.log.info("test getmemoryinfo")
memory_before = self.nodes[0].getmemoryinfo()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
memory_after = self.nodes[0].getmemoryinfo()
assert(memory_before['locked']['used'] + 64 <= memory_after['locked']['used'])
self.log.info("test gettxout (second part)")
# utxo spent in mempool should be visible if you exclude mempool
# but invisible if you include mempool
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
assert_equal(txout['value'], 69)
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
assert txout is None
# new utxo from mempool should be invisible if you exclude mempool
# but visible if you include mempool
txout = self.nodes[0].gettxout(mempool_txid, 0, False)
assert txout is None
txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
# note the mempool tx will have randomly assigned indices
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
self.nodes[2].lockunspent, False,
[{"txid": unspent_0["txid"], "vout": 999}])
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all([self.nodes[0:3]])
# node0 should end up with 138 TIT in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 138-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 132)
assert_equal(self.nodes[2].getbalance("from1"), 132-21)
# Verify that a spent output cannot be locked anymore
spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])
# Send 10 TIT normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('122'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 TIT with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 TIT
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 TIT with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes[0:2])
self.start_node(3)
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 68.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all([self.nodes[0:3]])
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:3])
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
# This will raise an exception because the amount type is wrong
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all([self.nodes[0:3]])
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all([self.nodes[0:3]])
blocks = self.nodes[0].generate(2)
self.sync_all([self.nodes[0:3]])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount="+str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount="+str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount="+str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
self.stop_node(0)
self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
|
py | b40c8d3aea403b2ae501ce1f00ec16118b5e4c63 | from YourApp.tests import YourAppBaseTestCase, groups, END_TO_END_TESTS_GROUP
class E2ETestCases(YourAppBaseTestCase):
"""Test cases to verify if the endpoints can be used safelly"""
@groups(END_TO_END_TESTS_GROUP)
def test_alive(self):
# When alive endpoint is used
result = self.app.get("/v1/alive")
# Then a 200 is answered
self.assertEqual("200 OK", result.status)
self.assertEqual("Alive", result.data.decode())
@groups(END_TO_END_TESTS_GROUP)
def test_404(self):
# When a fake endpoint is used
result = self.app.post("/v1/fake")
# Then a 404 is answered
self.assertEqual("404 NOT FOUND", result.status)
|
py | b40c8e304c8336d8998e97cbb75a4c4c42d86f74 | #!/usr/bin/env python3
import time
import random
from cereal import car, log
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.car.honda.interface import CarInterface
from selfdrive.controls.lib.events import ET, Events
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.manager.process_config import managed_processes
EventName = car.CarEvent.EventName
def randperc() -> float:
return 100. * random.random()
def cycle_alerts(duration=200, is_metric=False):
# all alerts
#alerts = list(EVENTS.keys())
# this plays each type of audible alert
alerts = [
(EventName.buttonEnable, ET.ENABLE),
(EventName.buttonCancel, ET.USER_DISABLE),
(EventName.wrongGear, ET.NO_ENTRY),
(EventName.vehicleModelInvalid, ET.SOFT_DISABLE),
(EventName.accFaulted, ET.IMMEDIATE_DISABLE),
# DM sequence
(EventName.preDriverDistracted, ET.WARNING),
(EventName.promptDriverDistracted, ET.WARNING),
(EventName.driverDistracted, ET.WARNING),
]
# debug alerts
alerts = [
#(EventName.highCpuUsage, ET.NO_ENTRY),
#(EventName.lowMemory, ET.PERMANENT),
#(EventName.overheat, ET.PERMANENT),
#(EventName.outOfSpace, ET.PERMANENT),
#(EventName.modeldLagging, ET.PERMANENT),
#(EventName.processNotRunning, ET.NO_ENTRY),
#(EventName.commIssue, ET.NO_ENTRY),
#(EventName.calibrationInvalid, ET.PERMANENT),
(EventName.cameraMalfunction, ET.PERMANENT),
(EventName.cameraFrameRate, ET.PERMANENT),
]
cameras = ['roadCameraState', 'wideRoadCameraState', 'driverCameraState']
CS = car.CarState.new_message()
CP = CarInterface.get_params("HONDA CIVIC 2016")
sm = messaging.SubMaster(['deviceState', 'pandaStates', 'roadCameraState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState'] + cameras)
pm = messaging.PubMaster(['controlsState', 'pandaStates', 'deviceState'])
events = Events()
AM = AlertManager()
frame = 0
while True:
for alert, et in alerts:
events.clear()
events.add(alert)
sm['deviceState'].freeSpacePercent = randperc()
sm['deviceState'].memoryUsagePercent = int(randperc())
sm['deviceState'].cpuTempC = [randperc() for _ in range(3)]
sm['deviceState'].gpuTempC = [randperc() for _ in range(3)]
sm['deviceState'].cpuUsagePercent = [int(randperc()) for _ in range(8)]
sm['modelV2'].frameDropPerc = randperc()
if random.random() > 0.25:
sm['modelV2'].velocity.x = [random.random(), ]
if random.random() > 0.25:
CS.vEgo = random.random()
procs = [p.get_process_state_msg() for p in managed_processes.values()]
random.shuffle(procs)
for i in range(random.randint(0, 10)):
procs[i].shouldBeRunning = True
sm['managerState'].processes = procs
sm['liveCalibration'].rpyCalib = [-1 * random.random() for _ in range(random.randint(0, 3))]
for s in sm.data.keys():
prob = 0.3 if s in cameras else 0.08
sm.alive[s] = random.random() > prob
sm.valid[s] = random.random() > prob
sm.freq_ok[s] = random.random() > prob
a = events.create_alerts([et, ], [CP, CS, sm, is_metric, 0])
AM.add_many(frame, a)
alert = AM.process_alerts(frame, [])
print(alert)
for _ in range(duration):
dat = messaging.new_message()
dat.init('controlsState')
dat.controlsState.enabled = False
if alert:
dat.controlsState.alertText1 = alert.alert_text_1
dat.controlsState.alertText2 = alert.alert_text_2
dat.controlsState.alertSize = alert.alert_size
dat.controlsState.alertStatus = alert.alert_status
dat.controlsState.alertBlinkingRate = alert.alert_rate
dat.controlsState.alertType = alert.alert_type
dat.controlsState.alertSound = alert.audible_alert
pm.send('controlsState', dat)
dat = messaging.new_message()
dat.init('deviceState')
dat.deviceState.started = True
pm.send('deviceState', dat)
dat = messaging.new_message('pandaStates', 1)
dat.pandaStates[0].ignitionLine = True
dat.pandaStates[0].pandaType = log.PandaState.PandaType.uno
pm.send('pandaStates', dat)
frame += 1
time.sleep(DT_CTRL)
if __name__ == '__main__':
cycle_alerts()
|
py | b40c8e9ee287be5fbb7f64f48723edfdeae237b4 | from nose.tools import eq_
import mock
from django.test import TestCase
from airmozilla.manage import scraper
class _Parsed(object):
def __init__(self, content, status=200):
if content:
self.content = {'content': content}
else:
self.content = {}
self.status = status
class _Response(object):
def __init__(self, content, status_code=200):
self.content = self.text = content
self.status_code = status_code
SAMPLE_INTRANET_HTML = u"""<!doctype html>
<head><title>Title</title></head>
<div id="mw-content-text">
<h2>H2 Title</h2>
<p>Test Content</p>
</div>
</body>
</html>"""
class TestScraper(TestCase):
def test_get_urls(self):
text = """
Some junk
http://airmozilla/manage/events/1068/ stuff
https://etherpad.mozilla.org/sumo-mobile
hello, this is madness
https://docs.python.org/2/library/urlparse.html..
madness I say https://github.com/mozilla/airmozilla........
yes http://blog.mozilla.org/devtools/.
"""
urls = list(scraper.get_urls(text))
eq_(
urls,
[
'http://airmozilla/manage/events/1068/',
'https://etherpad.mozilla.org/sumo-mobile',
'https://docs.python.org/2/library/urlparse.html',
'https://github.com/mozilla/airmozilla',
'http://blog.mozilla.org/devtools/'
]
)
@mock.patch('readability.ParserClient')
def test_get_content_readability(self, mocked_parser_client):
parser = mock.Mock()
def mocked_get_article_content(url):
return _Parsed('<p>Test content</p>')
parser.get_article_content = mocked_get_article_content
mocked_parser_client.return_value = parser
url = 'http://doesnotexist/path'
with self.settings(READABILITY_PARSER_KEY='abc123'):
content, status = scraper.get_content_readability(url)
eq_(content, 'Test content')
eq_(status, 200)
# or use the scrape_url()
result = scraper.scrape_urls([url])
eq_(result['text'], 'Test content')
eq_(result['results'][0], {
'worked': True,
'status': 200,
'url': url
})
with self.settings(READABILITY_PARSER_KEY=None):
content, status = scraper.get_content_readability(url)
eq_(content, None)
eq_(status, 'No READABILITY_PARSER_KEY setting set up')
@mock.patch('readability.ParserClient')
def test_get_content_readability_failed(self, mocked_parser_client):
parser = mock.Mock()
def mocked_get_article_content(url):
return _Parsed(None, status=500)
parser.get_article_content = mocked_get_article_content
mocked_parser_client.return_value = parser
url = 'http://doesnotexist/path'
with self.settings(READABILITY_PARSER_KEY='abc123'):
content, status = scraper.get_content_readability(url)
eq_(content, '')
eq_(status, 500)
@mock.patch('requests.get')
def test_get_content_intranet(self, rget):
def mocked_get(url, **options):
return _Response(
SAMPLE_INTRANET_HTML,
200
)
rget.side_effect = mocked_get
url = 'https://intranet.mozilla.org/path'
scrape_credentials = {
('foo', 'bar'): ['intranet.mozilla.org'],
}
with self.settings(SCRAPE_CREDENTIALS=scrape_credentials):
content, status = scraper.get_content_intranet(url)
eq_(status, 200)
eq_(content, 'H2 Title\nTest Content')
# or use the scrape_url()
result = scraper.scrape_urls([url])
eq_(result['text'], 'H2 Title\nTest Content')
eq_(result['results'][0], {
'worked': True,
'status': 200,
'url': url
})
with self.settings(SCRAPE_CREDENTIALS={}):
content, status = scraper.get_content_intranet(url)
eq_(status, 'No credentials set up for intranet.mozilla.org')
eq_(content, None)
@mock.patch('requests.get')
def test_get_content_etherpad(self, rget):
def mocked_get(url, **options):
eq_(
url,
'https://etherpad.mozilla.org/ep/pad/export/foo-bar/latest?'
'format=txt'
)
return _Response(
"Content here",
200
)
rget.side_effect = mocked_get
url = 'http://etherpad.mozilla.org/foo-bar'
content, status = scraper.get_content_etherpad(url)
eq_(status, 200)
eq_(content, 'Content here')
# or use the scrape_url()
result = scraper.scrape_urls([url])
eq_(result['text'], 'Content here')
eq_(result['results'][0], {
'worked': True,
'status': 200,
'url': url
})
|
py | b40c8ee9ada90b97c0399360989a7f58da0b2286 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class nsrunningconfig_args :
"""Provides additional arguments required for fetching the nsrunningconfig resource."""
def __init__(self) :
self._withdefaults = False
@property
def withdefaults(self) :
"""Include default values of parameters that have not been explicitly configured. If this argument is disabled, such parameters are not included."""
try :
return self._withdefaults
except Exception as e:
raise e
@withdefaults.setter
def withdefaults(self, withdefaults) :
"""Include default values of parameters that have not been explicitly configured. If this argument is disabled, such parameters are not included.
:param withdefaults:
"""
try :
self._withdefaults = withdefaults
except Exception as e:
raise e
|
py | b40c8fadb4fdeace6895864e6b8db54b0e74d711 | """
Gregory Way 2017
PanCancer Classifier
tcga_util.py
Usage: For import only
"""
def get_args():
"""
Get arguments for the main pancancer classifier script
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--genes',
help='Comma separated string of HUGO gene symbols')
parser.add_argument('-t', '--diseases', default='Auto',
help='Comma sep string of TCGA disease acronyms. '
'If no arguments are passed, filtering will '
'default to options given in --filter_count and '
'--filter_prop.')
parser.add_argument('-f', '--folds', default='5', type=int,
help='Number of cross validation folds to perform')
parser.add_argument('-d', '--drop', action='store_true',
help='Decision to drop input genes from X matrix')
parser.add_argument('-u', '--copy_number', action='store_true',
help='Supplement Y matrix with copy number events')
parser.add_argument('-c', '--filter_count', default=15, type=int,
help='Min number of mutations in diseases to include')
parser.add_argument('-p', '--filter_prop', default=0.05, type=float,
help='Min proportion of positives to include disease')
parser.add_argument('-n', '--num_features', default=8000, type=int,
help='Number of MAD genes to include in classifier')
parser.add_argument('-a', '--alphas', default='0.1,0.15,0.2,0.5,0.8,1',
help='the alphas for parameter sweep')
parser.add_argument('-l', '--l1_ratios', default='0,0.1,0.15,0.18,0.2,0.3',
help='the l1 ratios for parameter sweep')
parser.add_argument('-b', '--alt_genes', default='None',
help='alternative genes to test performance')
parser.add_argument('-s', '--alt_diseases', default="Auto",
help='The alternative diseases to test performance')
parser.add_argument('-i', '--alt_filter_count', default=15, type=int,
help='Min number of mutations in disease to include')
parser.add_argument('-r', '--alt_filter_prop', default=0.05, type=float,
help='Min proportion of positives to include disease')
parser.add_argument('-o', '--alt_folder', default='Auto',
help='Provide an alternative folder to save results')
parser.add_argument('-v', '--remove_hyper', action='store_true',
help='Remove hypermutated samples')
parser.add_argument('-k', '--keep_intermediate', action='store_true',
help='Keep intermediate ROC values for plotting')
parser.add_argument('-x', '--x_matrix', default='raw',
help='Filename of features to use in model')
parser.add_argument('-e', '--shuffled', action='store_true',
help='Shuffle the input gene exprs matrix alongside')
parser.add_argument('--shuffled_before_training', action='store_true',
help='Shuffle the gene exprs matrix before training')
parser.add_argument('-m', '--no_mutation', action='store_false',
help='Remove mutation data from y matrix')
parser.add_argument('-z', '--drop_rasopathy', action='store_true',
help='Decision to drop rasopathy genes from X matrix')
parser.add_argument('-q', '--drop_expression', action='store_true',
help='Decision to drop gene expression values from X')
parser.add_argument('-j', '--drop_covariates', action='store_true',
help='Decision to drop covariate information from X')
args = parser.parse_args()
return args
def get_threshold_metrics(y_true, y_pred, drop_intermediate=False,
disease='all'):
"""
Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments:
y_true - an array of gold standard mutation status
y_pred - an array of predicted mutation status
disease - a string that includes the corresponding TCGA study acronym
Output:
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type
"""
import pandas as pd
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import precision_recall_curve, average_precision_score
roc_columns = ['fpr', 'tpr', 'threshold']
pr_columns = ['precision', 'recall', 'threshold']
if drop_intermediate:
roc_items = zip(roc_columns,
roc_curve(y_true, y_pred, drop_intermediate=False))
else:
roc_items = zip(roc_columns, roc_curve(y_true, y_pred))
roc_df = pd.DataFrame.from_dict(dict(roc_items))
prec, rec, thresh = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, pd.Series(thresh)], ignore_index=True, axis=1)
pr_df.columns = pr_columns
auroc = roc_auc_score(y_true, y_pred, average='weighted')
aupr = average_precision_score(y_true, y_pred, average='weighted')
return {'auroc': auroc, 'aupr': aupr, 'roc_df': roc_df,
'pr_df': pr_df, 'disease': disease}
def integrate_copy_number(y, cancer_genes_df, genes, loss_df, gain_df,
include_mutation=True):
"""
Function to integrate copy number data to define gene activation or gene
inactivation events. Copy number loss results in gene inactivation events
and is important for tumor suppressor genes while copy number gain results
in gene activation events and is important for oncogenes.
Arguments:
y - pandas dataframe samples by genes where a 1 indicates event
cancer_genes_df - a dataframe listing bona fide cancer genes as defined by
the 20/20 rule in Vogelstein et al. 2013
genes - the input list of genes to build the classifier for
loss_df - a sample by gene dataframe listing copy number loss events
gain_df - a sample by gene dataframe listing copy number gain events
include_mutation - boolean to decide to include mutation status
"""
# Find if the input genes are in this master list
genes_sub = cancer_genes_df[cancer_genes_df['Gene Symbol'].isin(genes)]
# Add status to the Y matrix depending on if the gene is a tumor suppressor
# or an oncogene. An oncogene can be activated with copy number gains, but
# a tumor suppressor is inactivated with copy number loss
tumor_suppressor = genes_sub[genes_sub['Classification*'] == 'TSG']
oncogene = genes_sub[genes_sub['Classification*'] == 'Oncogene']
copy_loss_sub = loss_df[tumor_suppressor['Gene Symbol']]
copy_gain_sub = gain_df[oncogene['Gene Symbol']]
# Append to column names for visualization
copy_loss_sub.columns = [col + '_loss' for col in copy_loss_sub.columns]
copy_gain_sub.columns = [col + '_gain' for col in copy_gain_sub.columns]
# Add columns to y matrix
y = y.join(copy_loss_sub)
y = y.join(copy_gain_sub)
# Fill missing data with zero (measured mutation but not copy number)
y = y.fillna(0)
y = y.astype(int)
if not include_mutation:
y = y.drop(genes, axis=1)
return y
def shuffle_columns(gene):
"""
To be used in an `apply` pandas func to shuffle columns around a datafame
Import only
"""
import numpy as np
return np.random.permutation(gene.tolist())
|
py | b40c8ff499c15ba97b14feb0dc743c58992b1451 | from .card import Card
class Move:
def __init__(self, card: Card, pile_id: str, top_card: Card):
self.card = card
self.pile_id = pile_id
self.top_card = top_card
self.count_up_pile = 'up' in self.pile_id
self.increment = (self.card - self.top_card) * (1 if self.count_up_pile else -1)
def __str__(self) -> str:
return f'(Card: {self.card.value}, Pile: {self.pile_id}, Inc: {self.increment})'
def __eq__(self, other: Card) -> bool:
if (
self.card == other.card
and self.pile_id == other.pile_id
and self.increment == other.increment
and self.top_card == other.top_card
):
return True
return False
def __hash__(self):
return hash(str(self))
def __repr__(self) -> str:
return self.__str__()
def is_valid(self):
diff = self.card - self.top_card
if self.increment == 0:
return False
if self.count_up_pile and (diff > 0 or diff == -10):
return True
if not self.count_up_pile and (diff < 0 or diff == 10):
return True
return False
|
py | b40c913f1aef27797fa67888a5170043023d2f6c | import gym
import marlenvs
from marlenvs.wrappers import NormalizeActWrapper, NormalizeObsWrapper, NormalizeRewWrapper
import os
from datetime import datetime
import argparse
import torch
import numpy as np
import runnamegen
from agent import Agents
from networks import Actor, Actor2, Actor3, Actor4, MADDPGCritic, MADDPGCritic2, MADDPGCritic3, MADDPGCritic4
from utils import AverageValueMeter, Parameters, CSVLogger
from noise_generators import GaussianNoise, UniformNoise, OrnsteinUhlenbeckProcess
import time
def train(params):
# choose environment
if params.env == "navigation":
env = gym.make("Navigation-v0", n_agents=params.n_agents, world_size=5, max_steps=100, tau=1.0, hold_steps=100)
continuous = True
elif params.env == "navigation_prepos":
init_landmark_pos = np.array([[(i+1)*(params.n_agents*5-0.5)/params.n_agents, 0.5] for i in range(params.n_agents)])
init_agent_pos = np.array([[(i+1)*(params.n_agents*5-0.5)/params.n_agents, params.n_agents*5 - 0.5] for i in range(params.n_agents)])
env = gym.make("Navigation-v0", n_agents=params.n_agents, world_size=5, max_steps=100, tau=1.0, hold_steps=100, init_agent_pos=init_agent_pos, init_landmark_pos=init_landmark_pos)
continuous = True
elif params.env == "switch":
env = gym.make("Switch-v0", height=5, width=10, view=3, flatten_obs = True)
continuous = False
elif params.env == "two_step":
env = gym.make("TwoStep-v0")
continuous = False
elif params.env == "two_step_cont":
env = gym.make("TwoStepCont-v0", n_agents=params.n_agents)
continuous = True
# normalizations
if params.normalize_actions == "0to1":
env = NormalizeActWrapper(env)
if params.normalize_observations == "0to1":
env = NormalizeObsWrapper(env)
if params.normalize_rewards == "0to1":
env = NormalizeRewWrapper(env)
elif params.normalize_rewards == "-1to0":
env = NormalizeRewWrapper(env, high = 0.0, low = -1.0, random_policy_zero=False)
elif params.normalize_rewards == "random_policy_zero":
env = NormalizeRewWrapper(env, high = 0.0, low = -1.0, random_policy_zero=True)
# get dimensions
act_dim = env.get_act_dim()
obs_dim = env.get_obs_dim()
# networks
if params.actor_type == "shared":
actor = Actor(act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=64)
elif params.actor_type == "independent":
actor = Actor2(act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=64, n_agents=params.n_agents)
if params.critic_type == "n2n":
critic = MADDPGCritic(n_agents=params.n_agents, act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=100)
elif params.critic_type == "n21":
critic = MADDPGCritic2(n_agents=params.n_agents, act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=100)
elif params.critic_type == "single_q":
critic = MADDPGCritic3(n_agents=params.n_agents, act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=100)
elif params.critic_type == "independent":
critic = MADDPGCritic4(n_agents=params.n_agents, act_dim=act_dim, obs_dim=obs_dim, history=params.history, hidden_dim=100)
if params.optim == "SGD":
optim = torch.optim.SGD
elif params.optim == "Adam":
optim = torch.optim.Adam
if params.noise_type == "gaussian":
noise_generator = GaussianNoise(sigma=params.exploration_noise, shape=(params.n_agents, act_dim))
if params.noise_type == "orn_uhl":
noise_generator = OrnsteinUhlenbeckProcess(sigma=params.exploration_noise, shape=(params.n_agents, act_dim))
if params.actor_type == "independent" and params.critic_type == "independent":
independent = True
else:
independent = False
# make agents
agents = Agents(actor=actor, critic=critic, optim=optim, noise_generator=noise_generator, n_agents=params.n_agents, obs_dim=obs_dim, act_dim=act_dim,
lr_critic=params.lr_critic, lr_actor=params.lr_actor, gamma=params.discount, tau=params.soft_update_tau,
history=params.history, batch_size=params.batch_size, continuous=continuous, independent=independent)
# make directory to log
log_dir = os.path.join("training_runs", params.run_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
params.save_to_file(os.path.join(log_dir, "parameters.json"))
print(params)
loss_logger = CSVLogger(os.path.join(log_dir, "losses.csv"), header=["actor loss", "actor loss std", "critic loss",
"critic loss std", "average Q", "average Q std",
"batches trained", "transitions trained",
"episodes gathered", "transitions gathered"], log_time=True)
test_logger = CSVLogger(os.path.join(log_dir, "tests.csv"), header=["average episode return", "avg ep ret std", "batches trained",
"transitions trained", "episodes gathered", "transitions gathered"], log_time=True)
# main experiment part
episode_counter = 0
batch_counter = 0
transitions_gathered_counter = 0
transitions_trained_counter = 0
while batch_counter < params.total_batches:
# gather data -----------------------------------------------------
for episode in range(params.gather_episodes):
done = 0
obs = env.reset()
agents.buffer.history.store(torch.Tensor(obs))
while(not done):
with torch.no_grad():
obs = agents.buffer.history.get_new_obs()
obs = obs=torch.swapaxes(obs, 0, 1)
act = agents.act(obs=obs, deterministic=False)
act.reshape((params.n_agents, act_dim))
n_obs, rew, done, _ = env.step(act.numpy())
agents.buffer.store(act, rew, torch.Tensor(n_obs), done)
transitions_gathered_counter += 1
episode_counter += 1
# train ------------------------------------------------------------
# enough transitions for one batch required
if len(agents.buffer) < params.batch_size:
continue
critic_loss = AverageValueMeter()
actor_loss = AverageValueMeter()
avg_Q = AverageValueMeter()
for batch in range(params.train_batches):
agents.train()
c_loss, a_loss, avq = agents.train_batch(optim_actor = True, update_targets = True)
actor_loss + a_loss
critic_loss + c_loss
avg_Q + avq
batch_counter += 1
transitions_trained_counter += params.batch_size
if batch_counter % params.save_weights_freq == 0:
save_dir = os.path.join(log_dir, f"batch_{batch_counter}")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
agents.save(save_dir)
if batch_counter % params.log_loss_freq == 0 or batch_counter % params.test_freq == 0:
print(f"Episodes Gathered: {episode_counter} Batches Trained: {batch_counter}")
if batch_counter % params.log_loss_freq == 0:
print(f"Actor loss: {actor_loss}")
print(f"Critic loss: {critic_loss}")
loss_logger.log([actor_loss.mean(), actor_loss.std(), critic_loss.mean(), critic_loss.std(), avg_Q.mean(), avg_Q.std(),
batch_counter, transitions_trained_counter, episode_counter, transitions_gathered_counter])
actor_loss.reset()
critic_loss.reset()
avg_Q.reset()
# test ----------------------------------------------------------------
if batch_counter % params.test_freq == 0:
agents.eval()
episode_return = AverageValueMeter()
for episode in range(params.test_episodes):
done = 0
e_return = 0
obs = agents.buffer.history.clear()
obs = env.reset()
agents.buffer.history.store(torch.Tensor(obs))
while(not done):
with torch.no_grad():
obs = agents.buffer.history.get_new_obs()
obs = obs=torch.swapaxes(obs, 0, 1)
act = agents.act(obs=obs, deterministic=True).squeeze()
act = act.reshape((params.n_agents, act_dim))
n_obs, rew, done, _ = env.step(act.numpy())
agents.buffer.history.store(torch.Tensor(n_obs))
e_return += rew
episode_return + e_return
print(f"Episode return: {episode_return}")
test_logger.log([episode_return.mean(), episode_return.std(), batch_counter, transitions_trained_counter, episode_counter, transitions_gathered_counter])
episode_return.reset()
if batch_counter % params.log_loss_freq == 0 or batch_counter % params.test_freq == 0:
print('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser("Parser to Initiate Agent Training")
parser.add_argument('--env', type=str, help='Name of the environment', choices=['navigation','navigation_prepos', 'two_step', 'two_step_cont', 'switch'])
parser.add_argument('--normalize_actions', type=str, help='If to normalize actions.', choices = ["none", "0to1", "recommended"])
parser.add_argument('--normalize_observations', type=str, help='If to normalize observations.', choices = ["none", "0to1", "recommended"])
parser.add_argument('--normalize_rewards', type=str, help='If to normalize rewards and what type of normalization.', choices=["none", "0to1", "-1to0", "random_policy_zero"])
parser.add_argument('--critic_type', type=str, help='Critic network type', choices=["n2n", "n21", "single_q", "independent"])
parser.add_argument('--actor_type', type=str, help='Actor network type', choices=["shared", "independent"])
parser.add_argument('--total_batches', type=int, help='Number of batches to train in total.')
parser.add_argument('--n_agents', type=int, help='Number of agents.')
parser.add_argument('--exploration_noise', type=float, help='Exploraition noise of agent.')
parser.add_argument('--noise_type', type=str, help='Type of noise process.', choices=["gaussian", "orn_uhl"])
parser.add_argument('--lr_critic', type=float, help='Learning rate of critic.')
parser.add_argument('--lr_actor', type=float, help='Learning rate of actor.')
parser.add_argument('--optim', type=str, help='The optimizer used', choices = ["SGD", "Adam"])
parser.add_argument('--discount', type=float, help='Discount factor for episode reward.')
parser.add_argument('--soft_update_tau', type=float, help='Soft update parameter.')
parser.add_argument('--history', type=int, help='History length.')
parser.add_argument('--batch_size', type=int, help='Batch size.')
parser.add_argument('--gather_episodes', type=int, help='Number of consecutive episodes to gather data.')
parser.add_argument('--train_batches', type=int, help='Number of consecutive batches trained.')
parser.add_argument('--save_weights_freq', type=int, help='Frequency (batches) of saving the weights during training.')
parser.add_argument('--log_loss_freq', type=int, help='Frequency (batches) of logging the loss.')
parser.add_argument('--test_freq', type=int, help='Frequency (batches) of testing and logging the agent performance.')
parser.add_argument('--test_episodes', type=int, help='How many episodes to test to take average return.')
parser.add_argument('--cfg', type=str, help='path to json config file',
default='parameter_files/default.json')
args = parser.parse_args()
args.run_name = runnamegen.generate("_")
now = datetime.now()
args.date = now.strftime("%d/%m/%Y")
args.time = now.strftime("%H:%M:%S")
params = Parameters(args.cfg)
params.overload(args, ignore=['cfg'])
params.fix()
train(params)
|
py | b40c916bbebcd631197ff303f3eb4a5b0556d82e | # Image handling imports
from skimage import data, io
from skimage.transform import rescale, resize
from skimage.util import img_as_ubyte
from skimage.color import rgba2rgb
# Excel manipulation imports
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Color
from openpyxl.utils.cell import get_column_letter
# argument parsing imports
import argparse
# parse inputs
parser = argparse.ArgumentParser(description='Transform an image into an Excel workbook with cells as pixels.')
parser.add_argument("--input_image", "-i", type=str, help="Path to image to transform, if not included will run"
" astronaut test image.")
parser.add_argument("--output_f", "-o", type=str, default="./ExcelImage.xlsx",
help="Path to output excel workbook.")
parser.add_argument("--dimensions", "-d", type=int, nargs=2, default=[-1, -1],
help="Optionally specify image dimensions in pixels in format --dimensions Xdimension Ydimension."
" Maximum dimensions are 350 x 350. If not specified image will retain current dimensions"
" or be truncated to obey maximum dimensions.")
args = parser.parse_args()
# constants
x_max = 350
y_max = 350
cell_pixel_dim = 12
# Sort of lazy, but on my monitor at 100% zoom Excel is showing 97*12 pixels vertical, 220*12 horizontal
default_x_zoom = 97 * 12
default_y_zoom = 220 * 12
# input image
if args.input_image:
# read in input image
input_image = io.imread(args.input_image)
else:
input_image = data.astronaut()
# if RGBA, convert to RGB with white background
if input_image.shape[2] > 3:
input_image = rgba2rgb(input_image)
# if dimensions are specified, resize image
new_x, new_y = args.dimensions[0], args.dimensions[1]
if new_x != -1 and new_y != -1:
input_image = resize(input_image, [new_x, new_y], anti_aliasing=True, mode="constant")
# Get X/Y dimensions of image
l_x, l_y = input_image.shape[0], input_image.shape[1]
# If X or Y above max, scale image down maintaining ratio
# x scale factor
if l_x > x_max:
x_sf = x_max / l_x
else:
x_sf = 1
# y scale factor
if l_y > y_max:
y_sf = y_max / l_y
else:
y_sf = 1
# determine worst case scale factor
if x_sf < y_sf:
sf = x_sf
else:
sf = y_sf
# Scale image
if sf < 1:
input_image = img_as_ubyte(rescale(input_image, sf, anti_aliasing=True, multichannel=True))
else:
input_image = img_as_ubyte(input_image)
# get new x/y size
l_x, l_y = input_image.shape[0], input_image.shape[1]
# Convert image matrix to excel matrix
wb = Workbook()
ws = wb.active
ws.title = "converted_image"
set_col_height = False
# Output excel workbook containing cell pixelated image
for row in range(0, l_x):
ws.row_dimensions[row+1].height = 4.5
for col in range(0, l_y):
if not set_col_height:
ws.column_dimensions[get_column_letter(col+1)].width = 0.83
# Determine RGB from image array
cell_hex = "{:02X}".format(input_image[row, col, 0]) + "{:02X}".format(input_image[row, col, 1]) \
+ "{:02X}".format(input_image[row, col, 2])
# Set color using styles, Color takes ARGB hex input as AARRGGBB, or RGB hex input as RRGGBB
cell_color = Color(cell_hex)
# Set cell fill
sel_cell = ws.cell(column=col+1, row=row+1) # , value=1
sel_cell.fill = PatternFill(fill_type="solid", fgColor=cell_color)
set_col_height = True
# Set zoom scale according to dimensions of photo
pixels_x = l_x * cell_pixel_dim
pixels_y = l_y * cell_pixel_dim
zoom_scale_x = int((default_x_zoom / pixels_x) * 100)
zoom_scale_y = int((default_y_zoom / pixels_y) * 100)
if zoom_scale_x < zoom_scale_y:
ws.sheet_view.zoomScale = zoom_scale_x
else:
ws.sheet_view.zoomScale = zoom_scale_y
# Output workbook
wb.save(args.output_f)
|
py | b40c9197f4ad183c68f86821e3103421cfcac1d3 | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class TestGDPRPipelineTest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'app_upload_id': 'string',
'test_parameters': 'object'
}
attribute_map = {
'app_upload_id': 'app_upload_id',
'test_parameters': 'test_parameters'
}
def __init__(self, app_upload_id=None, test_parameters=None): # noqa: E501
"""TestGDPRPipelineTest - a model defined in Swagger""" # noqa: E501
self._app_upload_id = None
self._test_parameters = None
self.discriminator = None
if app_upload_id is not None:
self.app_upload_id = app_upload_id
if test_parameters is not None:
self.test_parameters = test_parameters
@property
def app_upload_id(self):
"""Gets the app_upload_id of this TestGDPRPipelineTest. # noqa: E501
:return: The app_upload_id of this TestGDPRPipelineTest. # noqa: E501
:rtype: string
"""
return self._app_upload_id
@app_upload_id.setter
def app_upload_id(self, app_upload_id):
"""Sets the app_upload_id of this TestGDPRPipelineTest.
:param app_upload_id: The app_upload_id of this TestGDPRPipelineTest. # noqa: E501
:type: string
"""
self._app_upload_id = app_upload_id
@property
def test_parameters(self):
"""Gets the test_parameters of this TestGDPRPipelineTest. # noqa: E501
:return: The test_parameters of this TestGDPRPipelineTest. # noqa: E501
:rtype: object
"""
return self._test_parameters
@test_parameters.setter
def test_parameters(self, test_parameters):
"""Sets the test_parameters of this TestGDPRPipelineTest.
:param test_parameters: The test_parameters of this TestGDPRPipelineTest. # noqa: E501
:type: object
"""
self._test_parameters = test_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TestGDPRPipelineTest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40c91e11fee9e94d5145392da7a5b83f771816e | # Break and continue statements
def run():
# Continue
for counter in range (1000):
if counter %2 != 0:
continue
else:
print(counter)
# # Break 1
# for i in range(10000):
# print(i)
# if i == 5678:
# break
# # Break 2
# text = input('Input a text: ')
# for letter in text:
# if letter == 'o':
# break
# print(letter)
if __name__ == '__main__':
run()
|
py | b40c91e522c61d82b485c74c0414d869aaf1b808 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from typing import Dict, Optional, Tuple, List
# First-party imports
from gluonts.model.common import Tensor
from gluonts.support.util import erf, erfinv
from gluonts.core.component import validated
# Relative imports
from .distribution import Distribution, _sample_multiple, getF, softplus
from .distribution_output import DistributionOutput
class Gaussian(Distribution):
r"""
Gaussian distribution.
Parameters
----------
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
std
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
F
"""
is_reparameterizable = True
@validated()
def __init__(self, mu: Tensor, sigma: Tensor, F=None) -> None:
self.mu = mu
self.sigma = sigma
self.F = F if F else getF(mu)
@property
def batch_shape(self) -> Tuple:
return self.mu.shape
@property
def event_shape(self) -> Tuple:
return ()
@property
def event_dim(self) -> int:
return 0
def log_prob(self, x: Tensor) -> Tensor:
F = self.F
mu, sigma = self.mu, self.sigma
return -1.0 * (
F.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * F.square((x - mu) / sigma)
)
@property
def mean(self) -> Tensor:
return self.mu
@property
def stddev(self) -> Tensor:
return self.sigma
def cdf(self, x):
F = self.F
u = F.broadcast_div(
F.broadcast_minus(x, self.mu), self.sigma * math.sqrt(2.0)
)
return (erf(F, u) + 1.0) / 2.0
def sample(self, num_samples: Optional[int] = None) -> Tensor:
return _sample_multiple(
self.F.sample_normal,
mu=self.mu,
sigma=self.sigma,
num_samples=num_samples,
)
def sample_rep(self, num_samples: Optional[int] = None) -> Tensor:
def s(mu: Tensor, sigma: Tensor) -> Tensor:
raw_samples = self.F.sample_normal(
mu=mu.zeros_like(), sigma=sigma.ones_like()
)
return sigma * raw_samples + mu
return _sample_multiple(
s, mu=self.mu, sigma=self.sigma, num_samples=num_samples
)
def quantile(self, level: Tensor) -> Tensor:
F = self.F
# we consider level to be an independent axis and so expand it
# to shape (num_levels, 1, 1, ...)
for _ in range(self.all_dim):
level = level.expand_dims(axis=-1)
return F.broadcast_add(
self.mu,
F.broadcast_mul(
self.sigma, math.sqrt(2.0) * erfinv(F, 2.0 * level - 1.0)
),
)
@property
def args(self) -> List:
return [self.mu, self.sigma]
class GaussianOutput(DistributionOutput):
args_dim: Dict[str, int] = {"mu": 1, "sigma": 1}
distr_cls: type = Gaussian
@classmethod
def domain_map(cls, F, mu, sigma):
r"""
Maps raw tensors to valid arguments for constructing a Gaussian
distribution.
Parameters
----------
F
mu
Tensor of shape `(*batch_shape, 1)`
sigma
Tensor of shape `(*batch_shape, 1)`
Returns
-------
Tuple[Tensor, Tensor]
Two squeezed tensors, of shape `(*batch_shape)`: the first has the
same entries as `mu` and the second has entries mapped to the
positive orthant.
"""
sigma = softplus(F, sigma)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
|
py | b40c920af6bc2f864597035251498d655bbb180d | #-*- coding: utf-8 -*-
#python mysql 사용하려면 python 표준 DB API 이용
#MYsql DB 모듈을 다운로드한후 설치해야함 pymySQL
import pymysql
#mysql connection 생성
conn = pymysql.connect(host='192.168.27.128',user='cloud',passwd='Abcdef_12', db='cloud',charset='utf8')
#connection 에서 cursor 생성
curs = conn.cursor()
#sql 문작성후 실행
sql = 'select * from employees'
curs.execute(sql)
#필요하다면 실행한 sql 로부터 데이터 처리
rows = curs.fetchall()
for row in rows:
print(row[0],row[1],row[2])
# print(row['lastName'],row['email'],row[]) 안됨
#connection 닫기
curs.close()
conn.close()
#사전식 커서 사용
conn = pymysql.connect(host='192.168.27.128',port=3306,user='cloud',passwd='Abcdef_12', db='cloud',charset='utf8')
curs = conn.cursor(pymysql.cursors.DictCursor)
sql = 'select * from customers where state=%s and city=%s'
curs.execute(sql, ('NY', 'NYC'))
rows = curs.fetchall()
for row in rows:
print(row['phone'], row['city'], row['state'])
curs.close()
conn.close()
#-------간단한 CRUD 테스트
#-------delete from index_test
#-------insert into index_test values ('cloud', '98765')
#-------select * from index_test
#-------update index_test set uid = 'clouddd' where uid ='cloud'
#-------select * from index_test
conn = pymysql.connect(host='192.168.27.128',port=3306,user='cloud',passwd='Abcdef_12', db='cloud',charset='utf8')
curs = conn.cursor()
sql = 'delete from INDEX_test'
curs.execute(sql)
curs.close()
conn.commit() #insert update delete
##################################################
curs = conn.cursor()
sql = 'insert into INDEX_test values(%s, %s)'
curs.execute(sql, ('abc', '987654'))
conn.commit()
curs.close()
###############################################
curs = conn.cursor()
sql = 'update INDEX_test set uid =%s where uid =%s'
curs.execute(sql, ('xyz', 'abc'))
rows = curs.fetchall()
curs.close()
conn.commit
#############################################조회
curs = conn.cursor(pymysql.cursors.DictCursor)
sql ='select * from INDEX_test'
curs.execute(sql)
rows = curs.fetchall()
for row in rows:
print(row['uid'], row['pwd'])
curs.close()
conn.close() |
py | b40c929374d6fe0a821b281542264f00b90e1562 | def largest_elements(l):
for x in l:
maxnum = 0
for x in x:
maxnum = max(x, maxnum)
# The outer loop variable x has now been overwritten by the inner loop.
print "The largest element in the list", x, "is", maxnum
def largest_elements_correct(l):
for x in l:
maxnum = 0
for y in x:
maxnum = max(y, maxnum)
print "The largest element in the list", x, "is", maxnum
|
py | b40c92aafdf82f4ec69ce18c20fd9729550a6968 | from city_air_collector import CityAirCollector
from city_weather_collector import CityWeatherCollector
from influxdb_management.influx_crud import InfluxCRUD
import influxdb_management.influx_setting as ifs
import datetime, time
import urllib3, logging
def log_setup():
formatter = logging.Formatter('%(asctime)s - %(message)s')
termlog_handler = logging.StreamHandler()
termlog_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(termlog_handler)
logger.setLevel(logging.INFO)
if __name__ == "__main__":
log_setup()
urllib3.disable_warnings()
delta_hour = -1
logging.info("Start to Collect")
mydb = InfluxCRUD(ifs.host_, ifs.port_, ifs.user_,
ifs.pass_, 'TEST', ifs.protocol)
# Set Each Collector
air_collector = CityAirCollector(ifs.air_api_key,mydb)
weather_collector = CityWeatherCollector(ifs.weather_api_key,mydb)
CITY_DB = {'OUTDOOR_AIR': air_collector, 'OUTDOOR_WEATHER': weather_collector}
city_id_list = ['seoul', 'sangju']
while True:
now = datetime.datetime.now().replace(tzinfo=None)
now_hour = now.hour
now_minute = now.minute
if delta_hour != now_hour and now_minute > 30:
logging.info("Current HOUR : "+ str(now_hour))
try:
# OUTDOOR
for db_name in CITY_DB.keys():
mydb.change_db(db_name)
for id_ in city_id_list:
CITY_DB[db_name].set_table(id_)
CITY_DB[db_name].collect()
delta_hour = now_hour
except Exception as e:
logging.error("There are some errors : "+str(e))
else:
logging.info("PASS")
time.sleep(600) # 600 seconds
|
py | b40c94e6bd3e1d1d11dd0cf946103ea5e29ce7ba | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from copy import deepcopy
from unittest import mock
from django.core.urlresolvers import reverse
from django.core import mail
from taiga.base.utils import json
from taiga.hooks.gitlab import event_hooks
from taiga.hooks.gitlab.api import GitLabViewSet
from taiga.hooks.exceptions import ActionSyntaxException
from taiga.projects import choices as project_choices
from taiga.projects.epics.models import Epic
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
from taiga.projects.models import Membership
from taiga.projects.history.services import get_history_queryset_by_model_instance, take_snapshot
from taiga.projects.notifications.choices import NotifyLevel
from taiga.projects.notifications.models import NotifyPolicy
from taiga.projects import services
from .. import factories as f
pytestmark = pytest.mark.django_db
push_base_payload = {
"object_kind": "push",
"before": "95790bf891e76fee5e1747ab589903a6a1f80f22",
"after": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"ref": "refs/heads/master",
"checkout_sha": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"user_id": 4,
"user_name": "John Smith",
"user_email": "[email protected]",
"user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80",
"project_id": 15,
"project": {
"name": "Diaspora",
"description": "",
"web_url": "http://example.com/mike/diaspora",
"avatar_url": None,
"git_ssh_url": "[email protected]:mike/diaspora.git",
"git_http_url": "http://example.com/mike/diaspora.git",
"namespace": "Mike",
"visibility_level": 0,
"path_with_namespace": "mike/diaspora",
"default_branch": "master",
"homepage": "http://example.com/mike/diaspora",
"url": "[email protected]:mike/diaspora.git",
"ssh_url": "[email protected]:mike/diaspora.git",
"http_url": "http://example.com/mike/diaspora.git"
},
"repository": {
"name": "Diaspora",
"url": "[email protected]:mike/diaspora.git",
"description": "",
"homepage": "http://example.com/mike/diaspora",
"git_http_url": "http://example.com/mike/diaspora.git",
"git_ssh_url": "[email protected]:mike/diaspora.git",
"visibility_level": 0
},
"commits": [
{
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"message": "Update Catalan translation to e38cb41.",
"timestamp": "2011-12-12T14:27:31+02:00",
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"author": {
"name": "Jordi Mallach",
"email": "[email protected]"
},
"added": ["CHANGELOG"],
"modified": ["app/controller/application.rb"],
"removed": []
},
{
"id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"message": "fixed readme",
"timestamp": "2012-01-03T23:36:29+02:00",
"url": "http://example.com/mike/diaspora/commit/da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"author": {
"name": "GitLab dev user",
"email": "gitlabdev@dv6700.(none)"
},
"added": ["CHANGELOG"],
"modified": ["app/controller/application.rb"],
"removed": []
}
],
"total_commits_count": 4
}
new_issue_base_payload = {
"object_kind": "issue",
"user": {
"name": "Administrator",
"username": "root",
"avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon"
},
"project": {
"name": "Gitlab Test",
"description": "Aut reprehenderit ut est.",
"web_url": "http://example.com/gitlabhq/gitlab-test",
"avatar_url": None,
"git_ssh_url": "[email protected]:gitlabhq/gitlab-test.git",
"git_http_url": "http://example.com/gitlabhq/gitlab-test.git",
"namespace": "GitlabHQ",
"visibility_level": 20,
"path_with_namespace": "gitlabhq/gitlab-test",
"default_branch": "master",
"homepage": "http://example.com/gitlabhq/gitlab-test",
"url": "http://example.com/gitlabhq/gitlab-test.git",
"ssh_url": "[email protected]:gitlabhq/gitlab-test.git",
"http_url": "http://example.com/gitlabhq/gitlab-test.git"
},
"repository": {
"name": "Gitlab Test",
"url": "http://example.com/gitlabhq/gitlab-test.git",
"description": "Aut reprehenderit ut est.",
"homepage": "http://example.com/gitlabhq/gitlab-test"
},
"object_attributes": {
"id": 301,
"title": "New API: create/update/delete file",
"assignee_id": 51,
"author_id": 51,
"project_id": 14,
"created_at": "2013-12-03T17:15:43Z",
"updated_at": "2013-12-03T17:15:43Z",
"position": 0,
"branch_name": None,
"description": "Create new API for manipulations with repository",
"milestone_id": None,
"state": "opened",
"iid": 23,
"url": "http://example.com/diaspora/issues/23",
"action": "open"
},
"assignee": {
"name": "User1",
"username": "user1",
"avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon"
}
}
issue_comment_base_payload = {
"object_kind": "note",
"user": {
"name": "Administrator",
"username": "root",
"avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon"
},
"project_id": 5,
"project": {
"name": "Gitlab Test",
"description": "Aut reprehenderit ut est.",
"web_url": "http://example.com/gitlab-org/gitlab-test",
"avatar_url": None,
"git_ssh_url": "[email protected]:gitlab-org/gitlab-test.git",
"git_http_url": "http://example.com/gitlab-org/gitlab-test.git",
"namespace": "Gitlab Org",
"visibility_level": 10,
"path_with_namespace": "gitlab-org/gitlab-test",
"default_branch": "master",
"homepage": "http://example.com/gitlab-org/gitlab-test",
"url": "http://example.com/gitlab-org/gitlab-test.git",
"ssh_url": "[email protected]:gitlab-org/gitlab-test.git",
"http_url": "http://example.com/gitlab-org/gitlab-test.git"
},
"repository": {
"name": "diaspora",
"url": "[email protected]:mike/diaspora.git",
"description": "",
"homepage": "http://example.com/mike/diaspora"
},
"object_attributes": {
"id": 1241,
"note": "Hello world",
"noteable_type": "Issue",
"author_id": 1,
"created_at": "2015-05-17 17:06:40 UTC",
"updated_at": "2015-05-17 17:06:40 UTC",
"project_id": 5,
"attachment": None,
"line_code": None,
"commit_id": "",
"noteable_id": 92,
"system": False,
"st_diff": None,
"url": "http://example.com/gitlab-org/gitlab-test/issues/17#note_1241"
},
"issue": {
"id": 92,
"title": "test",
"assignee_id": None,
"author_id": 1,
"project_id": 5,
"created_at": "2015-04-12 14:53:17 UTC",
"updated_at": "2015-04-26 08:28:42 UTC",
"position": 0,
"branch_name": None,
"description": "test",
"milestone_id": None,
"state": "closed",
"iid": 17
}
}
def test_bad_signature(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e"
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "badbadbad")
data = {}
response = client.post(url, json.dumps(data), content_type="application/json")
response_content = response.data
assert response.status_code == 400
assert "Bad signature" in response_content["_error_message"]
def test_ok_signature(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["111.111.111.111"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.111")
assert response.status_code == 204
def test_ok_empty_payload(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["111.111.111.111"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
response = client.post(url, "null", content_type="application/json",
REMOTE_ADDR="111.111.111.111")
assert response.status_code == 204
def test_ok_signature_ip_in_network(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["111.111.111.0/24"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url, json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.112")
assert response.status_code == 204
def test_ok_signature_invalid_network(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["131.103.20.160/27;165.254.145.0/26;104.192.143.0/24"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = json.dumps({"push": {"changes": [{"new": {"target": { "message": "test message"}}}]}})
response = client.post(url,
data,
content_type="application/json",
HTTP_X_EVENT_KEY="repo:push",
REMOTE_ADDR="104.192.143.193")
assert response.status_code == 400
assert "Bad signature" in response.data["_error_message"]
def test_blocked_project(client):
project = f.ProjectFactory(blocked_code=project_choices.BLOCKED_BY_STAFF)
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["111.111.111.111"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.111")
assert response.status_code == 451
def test_invalid_ip(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["111.111.111.111"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.112")
assert response.status_code == 400
def test_invalid_origin_ip_settings(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["testing"]
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.112")
assert response.status_code == 400
def test_valid_local_network_ip(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": ["192.168.1.1"],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="192.168.1.1")
assert response.status_code == 204
def test_not_ip_filter(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gitlab": {
"secret": "tpnIwJDz4e",
"valid_origin_ips": [],
}
})
url = reverse("gitlab-hook-list")
url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e")
data = {"test:": "data"}
response = client.post(url,
json.dumps(data),
content_type="application/json",
REMOTE_ADDR="111.111.111.111")
assert response.status_code == 204
def test_push_event_detected(client):
project = f.ProjectFactory()
url = reverse("gitlab-hook-list")
url = "%s?project=%s" % (url, project.id)
data = deepcopy(push_base_payload)
data["commits"] = [{
"message": "test message",
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
data["total_commits_count"] = 1
GitLabViewSet._validate_signature = mock.Mock(return_value=True)
with mock.patch.object(event_hooks.PushEventHook, "process_event") as process_event_mock:
response = client.post(url, json.dumps(data),
HTTP_X_GITHUB_EVENT="push",
content_type="application/json")
assert process_event_mock.call_count == 1
assert response.status_code == 204
def test_push_event_epic_processing(client):
creation_status = f.EpicStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_epics"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.EpicStatusFactory(project=creation_status.project)
epic = f.EpicFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #%s ok
bye!
""" % (epic.ref, new_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(epic.project, payload)
ev_hook.process_event()
epic = Epic.objects.get(id=epic.id)
assert epic.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_processing(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #%s ok
bye!
""" % (issue.ref, new_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue = Issue.objects.get(id=issue.id)
assert issue.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_processing(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #%s ok
bye!
""" % (task.ref, new_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_user_story_processing(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.UserStoryStatusFactory(project=creation_status.project)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #%s ok
bye!
""" % (user_story.ref, new_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
user_story = UserStory.objects.get(id=user_story.id)
assert user_story.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_mention(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(issue, user=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s ok
bye!
""" % (issue.ref),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue_history = get_history_queryset_by_model_instance(issue)
assert issue_history.count() == 1
assert issue_history[0].comment.startswith("This issue has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_task_mention(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(task, user=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s ok
bye!
""" % (task.ref),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task_history = get_history_queryset_by_model_instance(task)
assert task_history.count() == 1
assert task_history[0].comment.startswith("This task has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_user_story_mention(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(user_story, user=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s ok
bye!
""" % (user_story.ref),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
us_history = get_history_queryset_by_model_instance(user_story)
assert us_history.count() == 1
assert us_history[0].comment.startswith("This user story has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_multiple_actions(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue1 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
issue2 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #%s ok
test TG-%s #%s ok
bye!
""" % (issue1.ref, new_status.slug, issue2.ref, new_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook1 = event_hooks.PushEventHook(issue1.project, payload)
ev_hook1.process_event()
issue1 = Issue.objects.get(id=issue1.id)
issue2 = Issue.objects.get(id=issue2.id)
assert issue1.status.id == new_status.id
assert issue2.status.id == new_status.id
assert len(mail.outbox) == 2
def test_push_event_processing_case_insensitive(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test tg-%s #%s ok
bye!
""" % (task.ref, new_status.slug.upper()),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_bad_processing_non_existing_ref(client):
issue_status = f.IssueStatusFactory()
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-6666666 #%s ok
bye!
""" % (issue_status.slug),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue_status.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The referenced element doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_us_bad_processing_non_existing_status(client):
user_story = f.UserStoryFactory.create()
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (user_story.ref),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_bad_processing_non_existing_status(client):
issue = f.IssueFactory.create()
payload = deepcopy(push_base_payload)
payload["commits"] = [{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (issue.ref),
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"url": "http://example.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
}]
payload["total_commits_count"] = 1
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_issues_event_opened_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
Membership.objects.create(user=issue.owner, project=issue.project, role=f.RoleFactory.create(project=issue.project), is_admin=True)
notify_policy = NotifyPolicy.objects.get(user=issue.owner, project=issue.project)
notify_policy.notify_level = NotifyLevel.all
notify_policy.save()
payload = deepcopy(new_issue_base_payload)
payload["object_attributes"]["title"] = "test-title"
payload["object_attributes"]["description"] = "test-body"
payload["object_attributes"]["url"] = "http://gitlab.com/test/project/issues/11"
payload["object_attributes"]["action"] = "open"
payload["repository"]["homepage"] = "test"
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
ev_hook.process_event()
assert Issue.objects.count() == 2
assert len(mail.outbox) == 1
def test_issues_event_other_than_opened_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
payload = deepcopy(new_issue_base_payload)
payload["object_attributes"]["title"] = "test-title"
payload["object_attributes"]["description"] = "test-body"
payload["object_attributes"]["url"] = "http://gitlab.com/test/project/issues/11"
payload["object_attributes"]["action"] = "update"
payload["repository"]["homepage"] = "test"
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
ev_hook.process_event()
assert Issue.objects.count() == 1
assert len(mail.outbox) == 0
def test_issues_event_bad_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
payload = deepcopy(new_issue_base_payload)
del payload["object_attributes"]["title"]
del payload["object_attributes"]["description"]
del payload["object_attributes"]["url"]
payload["object_attributes"]["action"] = "open"
payload["repository"]["homepage"] = "test"
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "Invalid issue information"
assert Issue.objects.count() == 1
assert len(mail.outbox) == 0
def test_issue_comment_event_on_existing_issue_task_and_us(client):
project = f.ProjectFactory()
role = f.RoleFactory(project=project, permissions=["view_tasks", "view_issues", "view_us"])
f.MembershipFactory(project=project, role=role, user=project.owner)
user = f.UserFactory()
issue = f.IssueFactory.create(external_reference=["gitlab", "http://gitlab.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(issue, user=user)
task = f.TaskFactory.create(external_reference=["gitlab", "http://gitlab.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(task, user=user)
us = f.UserStoryFactory.create(external_reference=["gitlab", "http://gitlab.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(us, user=user)
payload = deepcopy(issue_comment_base_payload)
payload["user"]["username"] = "test"
payload["issue"]["iid"] = "11"
payload["issue"]["title"] = "test-title"
payload["object_attributes"]["noteable_type"] = "Issue"
payload["object_attributes"]["note"] = "Test body"
payload["repository"]["homepage"] = "http://gitlab.com/test/project"
mail.outbox = []
assert get_history_queryset_by_model_instance(issue).count() == 0
assert get_history_queryset_by_model_instance(task).count() == 0
assert get_history_queryset_by_model_instance(us).count() == 0
ev_hook = event_hooks.IssueCommentEventHook(issue.project, payload)
ev_hook.process_event()
issue_history = get_history_queryset_by_model_instance(issue)
assert issue_history.count() == 1
assert "Test body" in issue_history[0].comment
task_history = get_history_queryset_by_model_instance(task)
assert task_history.count() == 1
assert "Test body" in issue_history[0].comment
us_history = get_history_queryset_by_model_instance(us)
assert us_history.count() == 1
assert "Test body" in issue_history[0].comment
assert len(mail.outbox) == 3
def test_issue_comment_event_on_not_existing_issue_task_and_us(client):
issue = f.IssueFactory.create(external_reference=["gitlab", "10"])
take_snapshot(issue, user=issue.owner)
task = f.TaskFactory.create(project=issue.project, external_reference=["gitlab", "10"])
take_snapshot(task, user=task.owner)
us = f.UserStoryFactory.create(project=issue.project, external_reference=["gitlab", "10"])
take_snapshot(us, user=us.owner)
payload = deepcopy(issue_comment_base_payload)
payload["user"]["username"] = "test"
payload["issue"]["iid"] = "99999"
payload["issue"]["title"] = "test-title"
payload["object_attributes"]["noteable_type"] = "Issue"
payload["object_attributes"]["note"] = "test comment"
payload["repository"]["homepage"] = "test"
mail.outbox = []
assert get_history_queryset_by_model_instance(issue).count() == 0
assert get_history_queryset_by_model_instance(task).count() == 0
assert get_history_queryset_by_model_instance(us).count() == 0
ev_hook = event_hooks.IssueCommentEventHook(issue.project, payload)
ev_hook.process_event()
assert get_history_queryset_by_model_instance(issue).count() == 0
assert get_history_queryset_by_model_instance(task).count() == 0
assert get_history_queryset_by_model_instance(us).count() == 0
assert len(mail.outbox) == 0
def test_issues_event_bad_comment(client):
issue = f.IssueFactory.create(external_reference=["gitlab", "10"])
take_snapshot(issue, user=issue.owner)
payload = deepcopy(issue_comment_base_payload)
payload["user"]["username"] = "test"
payload["issue"]["iid"] = "10"
payload["issue"]["title"] = "test-title"
payload["object_attributes"]["noteable_type"] = "Issue"
del payload["object_attributes"]["note"]
payload["repository"]["homepage"] = "test"
ev_hook = event_hooks.IssueCommentEventHook(issue.project, payload)
mail.outbox = []
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "Invalid issue comment information"
assert Issue.objects.count() == 1
assert len(mail.outbox) == 0
def test_api_get_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
response = client.get(url)
assert response.status_code == 200
content = response.data
assert "gitlab" in content
assert content["gitlab"]["secret"] != ""
assert content["gitlab"]["webhooks_url"] != ""
def test_api_patch_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
data = {
"gitlab": {
"secret": "test_secret",
"url": "test_url",
}
}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
config = services.get_modules_config(project).config
assert "gitlab" in config
assert config["gitlab"]["secret"] == "test_secret"
assert config["gitlab"]["webhooks_url"] != "test_url"
def test_replace_gitlab_references():
ev_hook = event_hooks.BaseGitLabEventHook
assert ev_hook.replace_gitlab_references(None, "project-url", "#2") == "[GitLab#2](project-url/issues/2)"
assert ev_hook.replace_gitlab_references(None, "project-url", "#2 ") == "[GitLab#2](project-url/issues/2) "
assert ev_hook.replace_gitlab_references(None, "project-url", " #2 ") == " [GitLab#2](project-url/issues/2) "
assert ev_hook.replace_gitlab_references(None, "project-url", " #2") == " [GitLab#2](project-url/issues/2)"
assert ev_hook.replace_gitlab_references(None, "project-url", "#test") == "#test"
assert ev_hook.replace_gitlab_references(None, "project-url", None) == ""
|
py | b40c9558e06cd51ab90408d9a3e5d07791878190 | import pygame
from pygame import time
import random
import math
import os
class Dot:
def __init__(self, x = 0, y = 0, cXX = 0, cXY = 0, cYX = 0, cYY = 0, cXW = 0, cYW = 0):
if x == 0 and y == 0: self.pos = [random.randrange(0, 1000), random.randrange(0, 1000)]
else: self.pos = [x, y]
self.cXX = cXX
self.cXY = cXY
self.cYX = cYX
self.cYY = cYY
self.cXW = cXW
self.cYW = cYW
self.goal = [500, 500]
self.move = [0, 0]
self.result = 0
self.random_coef = 0.05
def find_move(self):
self.move[0] = (self.pos[0] - self.goal[0]) * self.cXX + (self.pos[1] - self.goal[1]) * self.cXY + self.cXW
self.move[1] = (self.pos[0] - self.goal[0]) * self.cYX + (self.pos[1] - self.goal[1]) * self.cYY + self.cYW
def get_pos(self): return (int(self.pos[0]), int(self.pos[1]))
def update(self):
self.find_move()
dx = (self.goal[0] - self.pos[0])
dy = (self.goal[1] - self.pos[1])
dxm = (self.goal[0] - self.pos[0] - self.move[0])
dym = (self.goal[1] - self.pos[1] - self.move[1])
self.result = (500 * math.sqrt(2) - math.sqrt(dxm * dxm + dym * dym))# * (math.sqrt(dx * dx + dy * dy) - math.sqrt(dxm * dxm + dym * dym))
self.pos[0] += self.move[0]
self.pos[1] += self.move[1]
def get_cXX(self): return self.cXX + self.random_coef * random.randrange(-1000, 1000) / 1000.
def get_cXY(self): return self.cXY + self.random_coef * random.randrange(-1000, 1000) / 1000.
def get_cYX(self): return self.cYX + self.random_coef * random.randrange(-1000, 1000) / 1000.
def get_cYY(self): return self.cYY + self.random_coef * random.randrange(-1000, 1000) / 1000.
def get_cXW(self): return self.cXW + self.random_coef * random.randrange(-1000, 1000) / 1000.
def get_cYW(self): return self.cYW + self.random_coef * random.randrange(-1000, 1000) / 1000.
def main():
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (50, 30)
pygame.init()
display = pygame.display.set_mode((1000, 1000))
display.fill((0, 128, 255))
run = True
clock = time.Clock()
number = 100
dot = []
for i in range(number):
dot.append(Dot())
counter = 1
while(run):
counter += 1
if counter < 1000:
if counter % 2 == 0:
best = 0
for i in range(1, number):
if dot[i].result > dot[best].result:
best = i
dot[0] = dot[best]
for i in range(1, number):
dot[i] = Dot(0,0,dot[0].get_cXX(), dot[0].get_cXY(), dot[0].get_cYX(), dot[0].get_cYY(), dot[0].get_cXW(), dot[0].get_cYW())
else:
for i in range(number): dot[i].update()
pygame.display.set_caption(str(counter))
else:
if counter % 100 == 0:
best = 0
for i in range(1, number):
if dot[i].result > dot[best].result:
best = i
dot[0] = dot[best]
for i in range(1, number):
dot[i] = Dot(0,0,dot[0].get_cXX(), dot[0].get_cXY(), dot[0].get_cYX(), dot[0].get_cYY(), dot[0].get_cXW(), dot[0].get_cYW())
else:
for i in range(number): dot[i].update()
pygame.display.set_caption(" XX: " + str(dot[0].cXX)
+ " XY: " + str(dot[0].cXY)
+ " YX: " + str(dot[0].cYX)
+ " YY: " + str(dot[0].cYY)
+ " XW: " + str(dot[0].cXW)
+ " YW: " + str(dot[0].cYW)
)
display.fill((0, 180, 200))
for i in range(number):
pygame.draw.circle(display, (255, 255, 255), dot[i].get_pos(), 2)
pygame.draw.circle(display, (4, 5, 5), dot[i].goal, 7)
pygame.display.flip()
clock.tick(100)
pygame.quit()
if __name__ == '__main__':
main() |
py | b40c97161863a17164527725f6bd731348961b89 | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class GoComics(_ParserScraper):
url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls):
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
cls('MyCage', 'mycage'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('AmericanChopSuey', 'american-chop-suey'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskACat', 'ask-a-cat'),
cls('AskAPortlySyndicatePerson', 'ask-a-portly-syndicate-person'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyTrump', 'baby-trump'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('BadReporter', 'badreporter'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('BrainSquirts', 'brain-squirts'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('BushyTales', 'bushy-tales'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheapThrillsCuisine', 'cheap-thrills-cuisine'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('ConnieToTheWonnie', 'connie-to-the-wonnie'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DanWasserman', 'danwasserman'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('Drive', 'drive'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('EdgeOfAdventure', 'edge-of-adventure'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('GnomeSyndicate', 'gnome-syndicate'),
cls('Goats', 'goats'),
cls('GoComicsFanArt', 'fan-art'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HowToCat', 'how-to-cat'),
cls('HUBRIS', 'hubris'),
cls('HumanCull', 'human-cull'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InspectorDangersCrimeQuiz', 'inspector-dangers-crime-quiz'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JimsJournal', 'jimsjournal'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JoeVanilla', 'joevanilla'),
cls('JoeyAlisonSayersComics', 'joey-alison-sayers-comics'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'),
cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('JustSayUncle', 'just-say-uncle'),
cls('KenCatalino', 'kencatalino'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'),
cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Magnificatz', 'magnificatz'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('Mo', 'mo'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('MomsCancer', 'moms-cancer'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MustardAndBoloney', 'mustard-and-boloney'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyCageNewAndOld', 'mycage'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('Pinkerton', 'pinkerton'),
cls('PirateMike', 'pirate-mike'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PopCultureShockTherapy', 'pop-culture-shock-therapy'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('PromisesPromises', 'promises-promises'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('Sheldon', 'sheldon'),
cls('ShenComix', 'shen-comix'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('Shoecabbage', 'shoecabbage'),
cls('ShutterbugFollies', 'shutterbug-follies'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('Speechless', 'speechless'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('Starling', 'starling'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBentPinky', 'the-bent-pinky'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheLastMechanicalMonster', 'the-last-mechanical-monster'),
cls('TheLeftyBoscoPictureShow', 'leftyboscopictureshow'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNorm40', 'the-norm-4-0'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheOtherEnd', 'the-other-end'),
cls('TheQuixoteSyndrome', 'the-quixote-syndrome'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TimEagan', 'tim-eagan'),
cls('TinySepuku', 'tinysepuku'),
cls('TOBY', 'toby'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('ToughTown', 'tough-town'),
cls('Trivquiz', 'trivquiz'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WebcomicName', 'webcomic-name'),
cls('WeePals', 'weepals'),
cls('WhyattCartoons', 'whyatt-cartoons'),
cls('Widdershins', 'widdershins'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('Winston', 'winston'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
)
|
py | b40c9785e58233d12970429b57d2b0b71a76675c | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 2 16:32:14 2017
@author: pfierens
"""
from os import listdir
from os.path import join, isdir
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from methods import *
from scipy import ndimage as im
class PCA(object):
trained = False
@staticmethod
def train(type, data):
images_directory = data['images_dir']; area = data['v_size'] * data['h_size']
images_per_person = data['images_quantity_per_person']; number_of_people = data['people_quantity']
training_n = data['training_n']; test_n = data['test_n']
subjects = [f for f in listdir(images_directory) if isdir(join(images_directory, f))]
images = np.zeros([training_n * number_of_people, area])
imagetst = np.zeros([test_n * number_of_people, area])
training_image = 0; test_image = 0; person_image = 0; subject_number = 0; training_names = []; test_names = []
for subject in subjects:
for k in range(1, images_per_person + 1):
a = im.imread(images_directory + '/' + subject + '/{}'.format(k) + '.pgm')
if person_image < training_n:
images[training_image, :] = (np.reshape(a, [1, area]) - 127.5) / 127.5
training_names.append(str(subject))
training_image += 1
else:
imagetst[test_image, :] = (np.reshape(a, [1, area]) - 127.5) / 127.5
test_names.append(str(subject))
test_image += 1
person_image += 1
subject_number += 1
if subject_number > number_of_people - 1:
break
person_image = 0
meanimage = np.mean(images, 0)
fig, axes = plt.subplots(1, 1)
axes.imshow(np.reshape(meanimage, [data['v_size'], data['h_size']]) * 255, cmap='gray')
fig.suptitle('Imagen media')
images = [images[k, :] - meanimage for k in range(images.shape[0])]
imagetst = [imagetst[k, :] - meanimage for k in range(imagetst.shape[0])]
images_matrix = np.asmatrix(images)
S, V = getSingluarValuesAndEigenVectors(images_matrix)
nmax = V.shape[0]
nmax = 100
accs = np.zeros([nmax, 1])
if type == 'test':
print "Testing..."
for neigen in range(1, nmax):
B = V[0:neigen, :]
improy = np.dot(images, np.transpose(B))
imtstproy = np.dot(imagetst, np.transpose(B))
clf = svm.LinearSVC()
clf.fit(improy, training_names)
accs[neigen] = clf.score(imtstproy, test_names)
print('Precisión con {0} autocaras: {1} %\n'.format(neigen, accs[neigen] * 100))
fig, axes = plt.subplots(1, 1)
axes.semilogy(range(nmax), (1 - accs) * 100)
axes.set_xlabel('No. autocaras')
axes.grid(which='Both')
fig.suptitle('Error')
elif type == 'predict':
print "Predicting"
picture = im.imread(data['path'])
fig, axes = plt.subplots(1, 1)
axes.imshow(picture, cmap='gray')
fig.suptitle('Image to predict')
plt.show()
picture = np.reshape((picture - 127.5) / 127.5, [1, data['h_size'] * data['v_size']])
B = V[0:60, :]
improy = np.dot(images, np.transpose(B))
clf = svm.LinearSVC()
clf.fit(improy, training_names)
picture -= meanimage
pictureProy = np.dot(picture, B.T)
sub = clf.predict(pictureProy)[0]
print("Subject is: {} \n".format(sub))
picture = im.imread(images_directory + '/' + sub + '/1.pgm')
fig, axes = plt.subplots(1, 1)
axes.imshow(picture, cmap='gray')
fig.suptitle('Subject Predicted')
plt.show()
else:
print "Error"
@staticmethod
def test(data):
PCA.train('test', data)
@staticmethod
def predict(data):
PCA.train('predict', data)
|
py | b40c984482eb2de4bc207564fc752c92725ca652 | from tex2py import tex2py
from typing import List
import os
from subprocess import Popen, PIPE
import json
from .exception import \
LatexParserException,\
LatexToTextException,\
MaxSectionSizeException,\
DetexBinaryAbsent
from .semantic_parsing import Section
def get_tex_tree(tex_path):
with open(tex_path,'r') as f:
data = f.read()
tex_root_node = tex2py(data)
return tex_root_node
def split_match(split_value:str,splitting_string:str,split_upto=0.5,split_bins=10):
"""split_match
Splits a Keep Splitting a `splitting_string` based on the value of `split_value`.
It does so by removing `split_upto`% of the string until there is a match. or return no match.
`split_upto` specifies the size after which it will stop splitting.
:param split_value: [String]
:param splitting_string: [description]
:param split_upto: [float], defaults to 0.5
:param split_bins: [int] the number bins inside which each `split_value` will fall under.
eg.
split_value = "Deep Learning Techniques for ASD Diagnosis and Rehabilitation'"
split_bins=3,
split_upto=0.5
then the text will be checked for matches against :
- ['Deep Learning Techniques for','Deep Learning Techniques for ASD','Deep Learning Techniques for ASD Diagnosis','Deep Learning Techniques for ASD Diagnosis and' ....]
- The purpose of doing this is to ensure a partial match of a string can help extract the split text
:returns splitted_text : List[String] : [s1,s2] or []
"""
split_value = split_value.split(' ') # This make it remove words instead of the characters.
sb = [i for i in range(split_bins)]
split_mul = (1-split_upto)/split_bins
# Spread the `split_bins` according to the how much the split needs to happen.
split_range = [1-float((i)*split_mul) for i in sb]
# index at which the new `split_value` will be determined. Order is descending to ensure largest match.
slice_indices = [int(len(split_value)*split_val) for split_val in split_range]
# creates the split strings.
split_values_to_checks = [' '.join(split_value[:index]) for index in slice_indices]
for split_val in split_values_to_checks:
if split_val == '': # In case of empty seperator leaave it.
continue
current_text_split = splitting_string.split(split_val)
if len(current_text_split) > 1:
return current_text_split
return []
class LatexToText():
"""LatexToText
This class will manage the conversion of the latex document into text.
It uses `detex` to extract the text from tex Files.
"""
detex_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),'detex')
def __init__(self,detex_path=None):
# check binary existance.
if not self.binary_exists(self.detex_path):
if detex_path is None:
raise DetexBinaryAbsent()
elif not self.binary_exists(detex_path):
raise DetexBinaryAbsent()
else:
self.detex_path = detex_path
@staticmethod
def binary_exists(detex_path):
try:
os.stat(detex_path)
except:
return False
return True
def __call__(self,latex_document_path):
try:
process = Popen([self.detex_path, latex_document_path], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
return output
except Exception as e:
print(e)
raise LatexToTextException()
class LatexInformationParser(object):
"""LatexInformationParser
This is the parent class responsible for extraction of Processed information from
Latex Based Documents. Process follows the below steps:
## `section_extraction`
Use the `section_extraction` method to extract the document information
sections from the single/document Latex setup. This returns a Sequential Tree like structure with sub sequences.
This will use `tex2py` like functions to extract the document structure from the tex documents.
## `text_extraction`:
Will extract text from the Latex File. uses `opendetex` to extract the text from latex.
## `collate_sections` :
This will collate the information text and sections extracted based on the strategy of the extraction.
"""
max_section_limit = 30 # Maximum number of sections to allow for extraction
def __init__(self,max_section_limit=20,detex_path=None):
self.max_section_limit = max_section_limit
self.text_extractor = LatexToText(detex_path=detex_path)
def section_extraction(self,tex_file_path) -> List[Section]:
raise NotImplementedError()
@staticmethod
def get_subsection_names(tex_node):
subsections = []
try:
subsections = list(tex_node.subsections)
subsections = [i.string for i in subsections]
except:
pass
return subsections
def text_extraction(self):
raise NotImplementedError()
def collate_sections(self):
raise NotImplementedError()
def from_arxiv_paper(self,paper):
raise NotImplementedError()
|
py | b40c9863ef981a27c8dd7586bc830eb84ced1e97 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_10 import models
class Performance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'service_usec_per_read_op_cache_reduction': 'float'
}
attribute_map = {
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'service_usec_per_read_op_cache_reduction': 'service_usec_per_read_op_cache_reduction'
}
required_args = {
}
def __init__(
self,
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
service_usec_per_read_op_cache_reduction=None, # type: float
):
"""
Keyword args:
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
service_usec_per_read_op_cache_reduction (float): The percentage reduction in `service_usec_per_read_op` due to data cache hits. For example, a value of 0.25 indicates that the value of `service_usec_per_read_op` is 25% lower than it would have been without any data cache hits.
"""
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if service_usec_per_read_op_cache_reduction is not None:
self.service_usec_per_read_op_cache_reduction = service_usec_per_read_op_cache_reduction
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Performance`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op_cache_reduction" and value is not None:
if value > 1.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, value must be less than or equal to `1.0`")
if value < 0.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Performance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Performance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40c999945bb34d374542faf2ad78ccd9c906d36 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
import pytest
import pandas as pd
import cirq
def test_repr():
v = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({'a': 2}),
measurements={'xy': np.array([[1, 0], [0, 1]])})
cirq.testing.assert_equivalent_repr(v)
def test_str():
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[0, 1], [0, 1], [0, 1], [1, 0], [0, 1]]),
'c': np.array([[0], [0], [1], [0], [1]])
})
assert str(result) == 'ab=00010, 11101\nc=00101'
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]),
'c': np.array([[0], [1], [2], [3], [4]])
})
assert str(result) == 'ab=13579, 2 4 6 8 10\nc=01234'
def test_df():
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[0, 1], [0, 1], [0, 1], [1, 0], [0, 1]],
dtype=np.bool),
'c': np.array([[0], [0], [1], [0], [1]], dtype=np.bool)
})
remove_end_measurements = pd.DataFrame(data={
'ab': [1, 1, 2],
'c': [0, 1, 0]
},
index=[1, 2, 3])
pd.testing.assert_frame_equal(result.data.iloc[1:-1],
remove_end_measurements)
# Frequency counting.
df = result.data
assert len(df[df['ab'] == 1]) == 4
assert df.c.value_counts().to_dict() == {0: 3, 1: 2}
def test_histogram():
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[0, 1], [0, 1], [0, 1], [1, 0], [0, 1]],
dtype=np.bool),
'c': np.array([[0], [0], [1], [0], [1]], dtype=np.bool)
})
assert result.histogram(key='ab') == collections.Counter({
1: 4,
2: 1
})
assert result.histogram(key='ab', fold_func=tuple) == collections.Counter({
(False, True): 4,
(True, False): 1
})
assert result.histogram(key='ab',
fold_func=lambda e: None) == collections.Counter({
None: 5,
})
assert result.histogram(key='c') == collections.Counter({
0: 3,
1: 2
})
def test_multi_measurement_histogram():
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[0, 1], [0, 1], [0, 1], [1, 0], [0, 1]],
dtype=np.bool),
'c': np.array([[0], [0], [1], [0], [1]], dtype=np.bool)
})
assert result.multi_measurement_histogram(keys=[]) == collections.Counter({
():
5
})
assert (result.multi_measurement_histogram(keys=['ab']) ==
collections.Counter({
(1,): 4,
(2,): 1,
}))
assert (result.multi_measurement_histogram(keys=['c']) ==
collections.Counter({
(0,): 3,
(1,): 2,
}))
assert (result.multi_measurement_histogram(keys=['ab', 'c']) ==
collections.Counter({
(1, 0,): 2,
(1, 1,): 2,
(2, 0,): 1,
}))
assert result.multi_measurement_histogram(keys=[],
fold_func=lambda e: None
) == collections.Counter({
None: 5,
})
assert result.multi_measurement_histogram(keys=['ab'],
fold_func=lambda e: None
) == collections.Counter({
None: 5,
})
assert result.multi_measurement_histogram(keys=['ab', 'c'],
fold_func=lambda e: None
) == collections.Counter({
None: 5,
})
assert result.multi_measurement_histogram(keys=['ab', 'c'],
fold_func=lambda e: tuple(
tuple(f) for f in e)
) == collections.Counter({
((False, True), (False,)): 2,
((False, True), (True,)): 2,
((True, False), (False,)): 1,
})
def test_trial_result_equality():
et = cirq.testing.EqualsTester()
et.add_equality_group(
cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={'a': np.array([[0]] * 5)}))
et.add_equality_group(
cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={'a': np.array([[0]] * 6)}))
et.add_equality_group(
cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={'a': np.array([[1]] * 5)}))
def test_qubit_keys_for_histogram():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.measure(a, b),
cirq.X(c),
cirq.measure(c),
)
results = cirq.Simulator().run(program=circuit, repetitions=100)
with pytest.raises(KeyError):
_ = results.histogram(key=a)
assert results.histogram(key=[a, b]) == collections.Counter({0: 100})
assert results.histogram(key=c) == collections.Counter({True: 100})
assert results.histogram(key=[c]) == collections.Counter({1: 100})
def test_text_diagram_jupyter():
result = cirq.TrialResult.from_single_parameter_set(
params=cirq.ParamResolver({}),
measurements={
'ab': np.array([[0, 1], [0, 1], [0, 1], [1, 0], [0, 1]],
dtype=np.bool),
'c': np.array([[0], [0], [1], [0], [1]], dtype=np.bool)
})
# Test Jupyter console output from
class FakePrinter:
def __init__(self):
self.text_pretty = ''
def text(self, to_print):
self.text_pretty += to_print
p = FakePrinter()
result._repr_pretty_(p, False)
assert p.text_pretty == 'ab=00010, 11101\nc=00101'
# Test cycle handling
p = FakePrinter()
result._repr_pretty_(p, True)
assert p.text_pretty == 'TrialResult(...)'
|
py | b40c99afe9e4faed01ba3f8559e5ca6a7df806c1 | # Generated by Django 3.0.8 on 2020-07-21 20:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0084_merge_20200611_1311"),
("api", "0084_task_reviewers"),
]
operations = []
|
py | b40c99c68f7b7709f288e658b450271248852fbd | import os
import sys
import json
import math
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_validation_set
from utils import Logger, ImbalancedDatasetSampler
from tensorboardX import SummaryWriter
def main():
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
model, _ = generate_model(opt)
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = VideoID()
validation_data = get_validation_set(opt, spatial_transform, temporal_transform,
target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
dump(val_loader, model, opt, validation_data.class_names)
"""
================================
Feature dump
================================
Based on test.py
Predict 512 dim feature via pretrained model
Then dump to JSON file
"""
def dump(data_loader, model, opt, class_names):
print('Feature dump')
model.eval()
segment_to_feature = {}
segment_index = 0
with torch.no_grad():
for _, (inputs, targets) in enumerate(data_loader):
outputs = model(inputs)
assert outputs.shape[1] == 512
for j in range(outputs.size(0)):
label_video = targets[j]
print(
f'Dump feature for {label_video}, segment_index: {segment_index}')
label, path = label_video.split('/')
folder, file_name = path.split('__')
feature = outputs[j].tolist()
segment_to_feature[f'{label_video}_{segment_index}'] = {
'folder': folder,
'file_name': file_name,
'label': label,
'feature': feature,
'feature_dim': len(feature),
'segment_index': segment_index
}
segment_index += 1
with open(
os.path.join(opt.result_path, 'segment_to_feature_34.json'),
'w') as f:
json.dump(segment_to_feature, f)
if __name__ == '__main__':
main()
|
py | b40c99ef9cfff4190ec4a0d1b5e6a8eb3ad7f66c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request, urllib.parse, urllib.error
import json
import collect_data
from collect_data import style
from collect_data import sinput
import get_wd_bstgs
def catpages(catname):
"""Collect file and category pages from a Wikimedia Commons \
category and return a dictionary with fields for either the file \
or the category title and the full title of the page.
"""
params = urllib.parse.urlencode({'language': 'commons', 'project':
'wikimedia', 'categories': catname, 'ns[6]': '1', 'ns[14]': '1',
'format': 'json', 'doit': '1'})
url = ('https://petscan.wmflabs.org/?' + params)
f = urllib.request.urlopen(url)
pages = json.loads(f.read().decode('utf-8'))
pages = pages['*'][0]['a']['*']
simplepages = []
for i in range(len(pages)):
simplepages.append({'fulltitle': pages[i]['nstext'] + ':' +
pages[i]['title'].replace('_', ' ')})
simplepages[i]['statements'] = {}
if pages[i]['nstext'] == 'File':
simplepages[i]['statements']['P18'] = [{}]
simplepages[i]['statements']['P18'][0]['value'] = (
pages[i]['title'].replace('_', ' '))
elif pages[i]['nstext'] == 'Category':
simplepages[i]['statements']['P373'] = [{}]
simplepages[i]['statements']['P373'][0]['value'] = (
pages[i]['title'].replace('_', ' '))
return simplepages
def invnos_for_cat(catname, writefiles=False):
"""Take the pages collected by catpages, let the user look for \
corresponding inventory numbers and return a dictionary with the \
pairs.
"""
pages = catpages(catname)
augmenteddictlist = []
qsstring = ''
overwritejson = False
overwritejsonplus = False
overwriteqs = False
i = 0
# Add inventory numbers
while i < len(pages):
params = urllib.parse.urlencode({'action': 'raw',
'title': pages[i]['fulltitle']})
url = 'https://commons.wikimedia.org/w/index.php?' + params
f = urllib.request.urlopen(url)
f = f.read().decode('utf-8')
print('\n', pages[i]['fulltitle'], '\n', style.source + f + style.end)
answer = ''
while answer == '': # TODO: use collect_data.add_statement here!
answer = sinput('Please insert the inventory number! (Or input ' +
'"exit" to exit or "skip" to delete the entry!\n')
# Stop looking for inventory numbers
if answer == 'exit':
break
# Remove the page from the list
elif answer == 'skip':
del pages[i]
print('The page has been skipped.')
# Mark the page as not applicable # TODO: new hidden functionality
elif answer == 'na':
newcompletedpage = '"' + pages[i]['fulltitle'] + '", "na"\n'
del pages[i]
# Select from different pages for one object
elif 'P217' in pages[i-1]['statements'] and answer in (
page['statements']['P217'][0]['value'] for page in pages[:i]):
# FIXME: using just the first P217 statement
for j in range(i):
if answer == pages[j]['statements']['P217']['value']:
print('The following two pages belong to '
'the object with the inventory number ' + answer +
': \n' + style.select + '1 ' + pages[j]['fulltitle'] +
'\n2 ' + pages[i]['fulltitle'] + style.end)
selection = ''
while selection != '1' and selection != '2':
selection = sinput('Please enter the number of the ' +
'preferred one!\n')
selection = int(selection) - 1
duplicates = [pages[j],pages[i]]
print('duplicates: ', duplicates)
commoninvno = pages[j]['statements']['P217']
pages[j] = duplicates[selection]
pages[j]['statements']['P217'] = commoninvno
# TODO: simplify perhaps
# betterdup = duplicates.pop(selection)
# pages[i]['nstext'] = betterdup['nstext']
# pages[i]['title'] = betterdup['title']
# if 'notusedfiles' in pages[j].keys():
# pages[i]['notusedfiles'] += duplicates
# else:
# pages[i]['notusedfiles'] = duplicates
# TODO: improve evtl
print('i (eig neu): ',pages[i],'\nj (eig alt): ',pages[j])
del pages[i]
# Simply add the inventory number and pass info to collect_data.unite
else:
pages[i]['statements']['P217'] = [{'value': answer}]
uniteddict = collect_data.unite(pages[i])
augmenteddictlist.append(uniteddict)
qsstring += collect_data.artworkjson2qs(uniteddict) + '\n'
i += 1
# Write write the JSON, the augmented JSON and QuickStatements
# results to files
if writefiles == True:
filenametrunk = 'data/objects-' + catname
overwritejson = collect_data.try_write_file(filenametrunk +
'.json', json.dumps(pages, ensure_ascii=False, indent=4,
sort_keys=True), overwrite=overwritejson)
overwritejsonplus = collect_data.try_write_file(filenametrunk +
'_plus.json', json.dumps(augmenteddictlist,
ensure_ascii=False, indent=4, sort_keys=True),
overwrite=overwritejsonplus)
overwriteqs = collect_data.try_write_file(filenametrunk +
'_qs.txt', qsstring, overwrite=overwriteqs)
with open('data/commons_completed_pages.csv', 'a') as f:
f.write(newcompletedpage)
return pages, augmenteddictlist, qsstring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('catname')
args = parser.parse_args()
# Renew Wikidata items
get_wd_bstgs.get_wd_bstgs()
# Fetch data from the Commons category, integrate it and write it to files
invnos_for_cat(args.catname, writefiles=True)
|
py | b40c9a6231f21a81b4a672b5c0d8bcf9a83425c4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
from enum import Enum
def compare_speed(fn1, fn2, times):
start = datetime.now()
for i in range(times):
fn1()
end = datetime.now()
print("fn1", end - start)
start = datetime.now()
for i in range(times):
fn2()
end = datetime.now()
print("fn2", end - start)
def test_enum():
class E(Enum):
a = 'aaa'
b = 2
c = 3
e = E.a
print(e == 'aaa')
def test_fstr():
import dis
def fstring():
a = None
b = 2
return f'a:{a}, b:{b}'
dis.dis(fstring)
def test_logging():
import logging
logging.basicConfig(level=logging.ERROR)
logging.debug('asdfsaf')
logging.info('asdfsaf')
logging.warning('asdfsaf')
logging.error('asdfsaf')
if __name__ == '__main__':
import tokenize, token
# a = 'a\n b\nc\rd\r\ne'
# print(a.splitlines(keepends=True))
# test_fstr()
# test_func()
# test_logging()
a = 1
while a <= 4:
print(a)
a += 1
else:
print('else', a)
print(type(NotImplemented))
|
py | b40c9a9feeeb946a9fa109b4d5d6626784599de6 | from django.shortcuts import render
from wiki.models import Page
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
class PageList(ListView):
"""
CHALLENGES:
1. On GET, display a homepage that shows all Pages in your wiki.
2. Replace this CHALLENGE text with a descriptive docstring for PageList.
3. Replace pass below with the code to render a template named `list.html`.
"""
model = Page
def get(self, request):
""" Returns a list of wiki pages. """
pages_list = list(Page.objects.all())
model = Page
return render(request, 'wiki/list.html', {'pages_list': pages_list})
class PageDetailView(DetailView):
"""
CHALLENGES:
1. On GET, render a template named `page.html`.
2. Replace this docstring with a description of what thos accomplishes.
STRETCH CHALLENGES:
1. Import the PageForm class from forms.py.
- This ModelForm enables editing of an existing Page object in the database.
2. On GET, render an edit form below the page details.
3. On POST, check if the data in the form is valid.
- If True, save the data, and redirect back to the DetailsView.
- If False, display all the errors in the template, above the form fields.
4. Instead of hard-coding the path to redirect to, use the `reverse` function to return the path.
5. After successfully editing a Page, use Django Messages to "flash" the user a success message
- Message Content: REPLACE_WITH_PAGE_TITLE has been successfully updated.
"""
model = Page
def get(self, request, slug):
""" Returns a specific of wiki page by slug. """
single_page = Page.objects.get(slug=slug)
return render(request, 'wiki/page.html', {'page' : single_page} )
def post(self, request, slug):
pass
|
py | b40c9ae7e1ca1ae1045d07b937d248557bcc2596 | """
Author: Jagriti Goswami
Date: 30th August 2020
License: MIT License
====================================================================
Checks if given pydb and table already exists or not
param host: host name
param user: user name
param password: password
param db_name: filename to send to pydb
param tb_name: name of the table where data will be stored
"""
# =================================================================
import pymysql
import pandas as pd
import sys
def exists_db(host, user, password, db_name):
"""
Return True if pydb exists, else return False.
:param host: host
:param user: user
:param password: password
:param db_name: pydb name to check if exists or not
:return: True if exists, else return False
"""
# Create a connection object
connection = pymysql.connect(host=host,
user=user,
password=password,
autocommit=True,
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor)
# print('Connected to DB: {}'.format(host))
# Create a cursor object
cursor = connection.cursor()
# check if pydb exists
sql_query = "SHOW DATABASES"
cursor.execute(sql_query)
for db in cursor:
# print(db.values())
for val in db.values():
if val == db_name:
return True
return False
def exists_tb(host, user, password, db_name, tb_name):
"""
Return True if table exists, else return False.
:param host: host
:param user: user
:param password: password
:param db_name: name of the pydb
:param tb_name: table name to check if exists or not
:return: True if exists, else return False
"""
# Create a connection object
connection = pymysql.connect(host=host,
user=user,
password=password,
database=db_name,
autocommit=True,
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor)
# print('Connected to DB: {}'.format(host))
# Create a cursor object
cursor = connection.cursor()
# check if table exists
sql_query = "SHOW TABLES"
cursor.execute(sql_query)
for tb in cursor:
# print(tb.values())
for val in tb.values():
if val == tb_name:
return True
return False
|
py | b40c9d7099cc91d85d1158cbcc7df0882d91fe90 | import unittest
import utilities.src.latlon as latlon
class LatLonTest(unittest.TestCase):
p1: latlon.Point
p2: latlon.Point
def setUp(self):
self.p1 = latlon.Point(1.14, 1.21)
self.p2 = latlon.Point(1.52, 1.35)
def test_distance(self):
self.assertAlmostEqual(45.03, latlon.distance(self.p1, self.p2), delta=0.6)
def get_lanlonkey(self):
expected = [self.p1, self.p2]
keyfunction = latlon.get_lanlonkey(latlon.Point(1, 1))
res = sorted([self.p2, self.p1], key=keyfunction)
self.assertEqual(expected, res)
if __name__ == "__main__":
unittest.main()
|
py | b40c9d989405f21341ab9144da79c88fe21ab52d | #!/usr/bin/env python
# coding: utf-8
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
sampled = 10000 # 高速化のため
x_test = x_test[:sampled]
t_test = t_test[:sampled]
print("caluculate accuracy (float64) ... ")
print(network.accuracy(x_test, t_test))
# float16に型変換
x_test = x_test.astype(np.float16)
for param in network.params.values():
param[...] = param.astype(np.float16)
print("caluculate accuracy (float16) ... ")
print(network.accuracy(x_test, t_test))
|
py | b40c9effde540859bf96a2816d91fe06e96fe1bc | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import os
keypath = "./ml-job-finder-2fa3b623837c.json"
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = keypath
project_id = 'ml-job-finder'
# Use app-default credentials
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred, {
'projectId': project_id,
})
db = firestore.client()
|
py | b40c9f4f09e98b7de181917626d73a4c4c92af9d | from sparse_conv2 import sparse_conv2
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
from collections import OrderedDict
from numba import jit
def construct_pyramid(response_maps, upper_bound, kernel_size = 4):
h, w = response_maps[0]
maps = response_maps[1]
maph, mapw = maps[0].shape
pyramid_layer = {'size': (h, w), 'map_size': (maph, mapw) , 'response_maps':maps, 'weights': np.ones(h*w),
'kernel_size': kernel_size, 'step': 1, 'pool_idx': None, 'reverse_idx':None}
pyramid = [pyramid_layer]
while (pyramid[-1]['kernel_size']*2) <= upper_bound:
next_layer = get_next_layer(pyramid[-1])
pyramid.append(next_layer)
print(pyramid[-1]['kernel_size']*2)
return pyramid
def get_next_layer(layer):
step = layer['step']*2 - 1 if layer['step'] > 1 else 2
kernel_size = layer['kernel_size']*2
maps = layer['response_maps']
z, y, x = maps.shape
t = np.zeros((1, z, y, x))
t[0] = maps
tensor_map = torch.from_numpy(t)
pooled_map, pool_idx = F.max_pool2d(V(tensor_map), 3, stride=2, return_indices=True, ceil_mode=True, padding=1)
maps = pooled_map.data.numpy()[0]
layer['response_maps'] = maps # so that we could only store pooled map to save memory usage
layer['pool_idx'] = pool_idx
h, w, maps, weights, reverse_idx = sparse_conv2((layer['size'][0], layer['size'][1], maps), aggregation, layer['weights'], step)
maph, mapw = maps[0].shape
return {'size': (h, w), 'map_size': (maph, mapw), 'response_maps':maps, 'weights': weights, 'kernel_size': kernel_size,
'step': step, 'pool_idx': None, 'reverse_idx': reverse_idx}
def aggregation(imgs, weights):
edge_correction_factor = get_weights_mat(weights, 0.9) #conv.cpp:_sparse_conv
#next_layer = _aggregation_mat(imgs, weights)
next_layer = _aggregation_slice(imgs, weights)
return edge_correction(next_layer, edge_correction_factor)**1.4
def get_weights_mat(weights, inv):
weights_mat = np.zeros((3, 3))
#print(weights)
weights_mat[1:, 1:] = weights[0] #top left
weights_mat[1:, :-1] += weights[1] #top right
weights_mat[:-1, 1:] += weights[2] #bottom left
weights_mat[:-1, :-1] += weights[3] #bottom right
for i in range(3):
for j in range(3):
x = weights_mat[i, j]
if x != 0:
weights_mat[i, j] = (1/x)**(0.9)
return weights_mat
def edge_correction(img, f):
img[0,0] *= f[0,0]
img[0,-1] *= f[0,-1]
img[-1,0] *= f[-1,0]
img[-1,-1] *= f[-1,-1]
img[0,1:-1] *= f[0, 1]
img[-1,1:-1] *= f[-1, 1]
img[1:-1,0] *= f[1, 0]
img[1:-1,-1] *= f[1, -1]
img[2:-1,2:-1] *= f[1, 1]
return img
def _aggregation_mat(imgs, weights):
#print(imgs[0].shape)
h, w = imgs[0].shape
mask = np.zeros((4, h*w))
mask[0, 0:(h-1)*w] = np.array([[weights[0] for n in range(w-1)]+[0] for m in range(h-1)]).flatten()
mask[1, 0:(h-1)*w] = np.roll(np.array([[weights[1] for n in range(w-1)]+[0] for m in range(h-1)]).flatten(), 1)
mask[2, w:h*w] = np.array([[weights[2] for n in range(w-1)]+[0] for m in range(h-1)]).flatten()
mask[3, w:h*w] = np.roll(np.array([[weights[3] for n in range(w-1)]+[0] for m in range(h-1)]).flatten(), 1)
im1 = np.roll(imgs[0].flatten()*mask[0], w+1)
im2 = np.roll(imgs[1].flatten()*mask[1], w-1)
im3 = np.roll(imgs[2].flatten()*mask[2], -(w-1))
im4 = np.roll(imgs[3].flatten()*mask[3], -(w+1))
return np.reshape((im1+im2+im3+im4), (h, w))
def _aggregation_slice(imgs, wights):
res = np.zeros(imgs[0].shape)
res[1:, 1:] += imgs[0][:-1, :-1]*wights[0]
res[1:, :-1] += imgs[1][:-1, 1:]*wights[1]
res[:-1, 1:] += imgs[2][1:, :-1]*wights[2]
res[:-1, :-1] += imgs[3][1:, 1:]*wights[3]
return res
|
py | b40ca004b6c136b9aa23822c89dd6e9e227e3e6f | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume Code.
"""
from nova import context
from nova import exception
from nova import db
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.volume')
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
@staticmethod
def _create_volume(size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)['id']
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
volume_id = self._create_volume()
self.volume.create_volume(self.context, volume_id)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
return True
try:
volume_id = self._create_volume('1001')
self.volume.create_volume(self.context, volume_id)
self.fail("Should have thrown TypeError")
except TypeError:
pass
def test_too_many_volumes(self):
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
self.volume.create_volume(self.context, volume_id)
vols.append(volume_id)
volume_id = self._create_volume()
self.assertRaises(db.NoMoreTargets,
self.volume.create_volume,
self.context,
volume_id)
db.volume_destroy(context.get_admin_context(), volume_id)
for volume_id in vols:
self.volume.delete_volume(self.context, volume_id)
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id']
mountpoint = "/dev/sdf"
volume_id = self._create_volume()
self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
db.volume_attached(self.context, volume_id, instance_id,
mountpoint)
else:
self.compute.attach_volume(self.context,
instance_id,
volume_id,
mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
instance_ref = db.volume_get_instance(self.context, volume_id)
self.assertEqual(instance_ref['id'], instance_id)
self.assertRaises(exception.Error,
self.volume.delete_volume,
self.context,
volume_id)
if FLAGS.fake_tests:
db.volume_detached(self.context, volume_id)
else:
self.compute.detach_volume(self.context,
instance_id,
volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.Error,
db.volume_get,
self.context,
volume_id)
db.instance_destroy(self.context, instance_id)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
d = self.volume.create_volume(self.context, volume_id)
_check(d)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
|
py | b40ca012abd9719ac8a70c5d82c9655982a4f63b | from pydataweaver.lib.defaults import ENCODING
from pydataweaver.lib.models import Engine
class engine(Engine):
"""Engine instance for PostgreSQL."""
name = "PostgreSQL"
abbreviation = "postgres"
max_int = 2147483647
placeholder = "%s"
required_opts = [
("user", "Enter your PostgreSQL username", "postgres"),
("password", "Enter your password", ""),
("host", "Enter your PostgreSQL host", "localhost"),
("port", "Enter your PostgreSQL port", 5432),
("database", "Enter your PostgreSQL database name", "postgres"),
("database_name", "Format of schema name", "{db}"),
("table_name", "Format of table name", "{db}.{table}"),
]
def create_db_statement(self):
"""In PostgreSQL, the equivalent of a SQL database is a schema.
CREATE SCHEMA table_name;
"""
return Engine.create_db_statement(self).replace("DATABASE", "SCHEMA")
def create_db(self):
"""Create Engine database."""
try:
Engine.create_db(self)
except:
self.connection.rollback()
pass
def drop_statement(self, objecttype, objectname):
"""In PostgreSQL, the equivalent of a SQL database is a schema."""
statement = Engine.drop_statement(self, objecttype, objectname)
statement += " CASCADE;"
return statement.replace(" DATABASE ", " SCHEMA ")
def get_connection(self):
"""
Get db connection.
Please update the encoding lookup table if the required encoding is not present.
"""
import psycopg2 as dbapi
self.get_input()
conn = dbapi.connect(
host=self.opts["host"],
port=int(self.opts["port"]),
user=self.opts["user"],
password=self.opts["password"],
database=self.opts["database"],
)
encoding = ENCODING.lower()
if self.script.encoding:
encoding = self.script.encoding.lower()
encoding_lookup = {"iso-8859-1": "Latin1", "latin-1": "Latin1", "utf-8": "UTF8"}
db_encoding = encoding_lookup.get(encoding)
conn.set_client_encoding(db_encoding)
return conn
|
py | b40ca10cbb51ce85d49c2119c6d9ca05fa03ca28 | import os
import itertools
import platform
import subprocess
import sys
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import FindTool
from lit.llvm.subst import ToolSubst
def use_lldb_substitutions(config):
# Set up substitutions for primary tools. These tools must come from config.lldb_tools_dir
# which is basically the build output directory. We do not want to find these in path or
# anywhere else, since they are specifically the programs which are actually being tested.
dsname = 'debugserver' if platform.system() in ['Darwin'] else 'lldb-server'
dsargs = [] if platform.system() in ['Darwin'] else ['gdbserver']
lldbmi = ToolSubst('%lldbmi',
command=FindTool('lldb-mi'),
extra_args=['--synchronous'],
unresolved='ignore')
build_script = os.path.dirname(__file__)
build_script = os.path.join(build_script, 'build.py')
build_script_args = [build_script,
'--compiler=any', # Default to best compiler
'--arch=' + str(config.lldb_bitness)]
if config.lldb_lit_tools_dir:
build_script_args.append('--tools-dir={0}'.format(config.lldb_lit_tools_dir))
if config.lldb_tools_dir:
build_script_args.append('--tools-dir={0}'.format(config.lldb_tools_dir))
lldb_init = os.path.join(config.test_exec_root, 'lit-lldb-init')
primary_tools = [
ToolSubst('%lldb',
command=FindTool('lldb'),
extra_args=['--no-lldbinit', '-S', lldb_init]),
ToolSubst('%lldb-init',
command=FindTool('lldb'),
extra_args=['-S', lldb_init]),
lldbmi,
ToolSubst('%debugserver',
command=FindTool(dsname),
extra_args=dsargs,
unresolved='ignore'),
'lldb-test',
'lldb-instr',
ToolSubst('%build',
command="'" + sys.executable + "'",
extra_args=build_script_args)
]
llvm_config.add_tool_substitutions(primary_tools,
[config.lldb_tools_dir])
# lldb-mi always fails without Python support
if lldbmi.was_resolved and not config.lldb_disable_python:
config.available_features.add('lldb-mi')
def _use_msvc_substitutions(config):
# If running from a Visual Studio Command prompt (e.g. vcvars), this will
# detect the include and lib paths, and find cl.exe and link.exe and create
# substitutions for each of them that explicitly specify /I and /L paths
cl = lit.util.which('cl')
link = lit.util.which('link')
if not cl or not link:
return
cl = '"' + cl + '"'
link = '"' + link + '"'
includes = os.getenv('INCLUDE', '').split(';')
libs = os.getenv('LIB', '').split(';')
config.available_features.add('msvc')
compiler_flags = ['"/I{}"'.format(x) for x in includes if os.path.exists(x)]
linker_flags = ['"/LIBPATH:{}"'.format(x) for x in libs if os.path.exists(x)]
tools = [
ToolSubst('%msvc_cl', command=cl, extra_args=compiler_flags),
ToolSubst('%msvc_link', command=link, extra_args=linker_flags)]
llvm_config.add_tool_substitutions(tools)
return
def use_support_substitutions(config):
# Set up substitutions for support tools. These tools can be overridden at the CMake
# level (by specifying -DLLDB_LIT_TOOLS_DIR), installed, or as a last resort, we can use
# the just-built version.
flags = []
if platform.system() in ['Darwin']:
try:
out = subprocess.check_output(['xcrun', '--show-sdk-path']).strip()
res = 0
except OSError:
res = -1
if res == 0 and out:
sdk_path = lit.util.to_string(out)
llvm_config.lit_config.note('using SDKROOT: %r' % sdk_path)
flags = ['-isysroot', sdk_path]
elif platform.system() in ['OpenBSD', 'Linux']:
flags = ['-pthread']
config.target_shared_library_suffix = '.dylib' if platform.system() in ['Darwin'] else '.so'
config.substitutions.append(('%target-shared-library-suffix', config.target_shared_library_suffix))
# Swift support
swift_args = ['-module-cache-path',
os.path.join(os.path.dirname(config.lldb_libs_dir),
'lldb-test-build.noindex',
'module-cache-clang')]
swift_driver_args = []
if platform.system() in ['Darwin']:
swift_args += ['-sdk', sdk_path]
swift_driver_args += ['-toolchain-stdlib-rpath']
tools = [
ToolSubst(
'%target-swiftc', command=config.swiftc,
extra_args=swift_args + swift_driver_args),
ToolSubst(
'%target-swift-frontend', command=config.swiftc[:-1],
extra_args=(['-frontend'] + swift_args))
]
llvm_config.add_tool_substitutions(tools)
additional_tool_dirs=[]
if config.lldb_lit_tools_dir:
additional_tool_dirs.append(config.lldb_lit_tools_dir)
llvm_config.use_clang(additional_flags=flags,
additional_tool_dirs=additional_tool_dirs,
required=True)
if sys.platform == 'win32':
_use_msvc_substitutions(config)
have_lld = llvm_config.use_lld(additional_tool_dirs=additional_tool_dirs,
required=False)
if have_lld:
config.available_features.add('lld')
support_tools = ['yaml2obj', 'obj2yaml', 'llvm-pdbutil',
'llvm-mc', 'llvm-readobj', 'llvm-objdump',
'llvm-objcopy']
additional_tool_dirs += [config.lldb_tools_dir, config.llvm_tools_dir]
llvm_config.add_tool_substitutions(support_tools, additional_tool_dirs)
|
py | b40ca10d74149595c49bf66b3d3c14366d3699e8 | import pytest
from questionanswering.squad import position_labels, nearest
class TestNearest:
def test_span_not_exists(self):
assert nearest(s="z", t="0123456789", start=1) == -1
def test_single_span_exists(self):
assert nearest(s="4", t="0 1 2 3 4 5 6 7 8 9", start=0) == 8
assert nearest(s="4", t="0 1 2 3 4 5 6 7 8 9", start=18) == 8
assert nearest(s="0", t="0 1 2 3 4 5 6 7 8 9", start=0) == 0
assert nearest(s="0", t="0 1 2 3 4 5 6 7 8 9", start=18) == 0
assert nearest(s="9", t="0 1 2 3 4 5 6 7 8 9", start=0) == 18
assert nearest(s="9", t="0 1 2 3 4 5 6 7 8 9", start=18) == 18
def test_multiple_spans_exist(self):
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=0) == 0
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=2) == 0
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=4) == 0
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=6) == 8
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=8) == 8
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=10) == 8
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=12) == 8
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=14) == 18
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=16) == 18
assert nearest(s="x", t="x 1 2 3 x 5 6 7 8 x", start=18) == 18
class TestPositionLabels:
def test_answer_not_exists(self):
assert (
position_labels(
offset_mapping=[[(0, 1), (0, 1)], [(0, 1), (0, 1)]],
overflow_to_sample_mapping=[0, 1],
answer_start=[-1, -1],
answer_length=[0, 0],
)
== ([0, 0], [0, 0])
)
def test_answer_exists(self):
assert (
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
],
overflow_to_sample_mapping=[0, 1, 2, 3, 4, 5],
answer_start=[0, 10, 20, 0, 10, 0],
answer_length=[10, 10, 10, 20, 20, 30],
)
== ([1, 2, 3, 1, 2, 1], [1, 2, 3, 2, 3, 3])
)
def test_answer_exists_in_multiple_windows(self):
assert (
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (20, 30), (30, 40), (40, 50), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (20, 30), (30, 40), (40, 50), (0, 0)],
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (20, 30), (30, 40), (40, 50), (0, 0)],
],
overflow_to_sample_mapping=[0, 0, 1, 1, 2, 2],
answer_start=[0, 20, 40],
answer_length=[10, 10, 10],
)
== ([1, 0, 3, 1, 0, 3], [1, 0, 3, 1, 0, 3])
)
def test_throw_error_when_end_comes_before_start(self):
with pytest.raises(ValueError, match=r"end must not come before start"):
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
],
overflow_to_sample_mapping=[0],
answer_start=[20],
answer_length=[-10],
)
def test_throw_error_when_answer_cannot_be_found(self):
with pytest.raises(ValueError, match=r"answer span cannot be found"):
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
],
overflow_to_sample_mapping=[0],
answer_start=[1],
answer_length=[10],
)
with pytest.raises(ValueError, match=r"answer span cannot be found"):
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
],
overflow_to_sample_mapping=[0],
answer_start=[0],
answer_length=[9],
)
with pytest.raises(ValueError, match=r"answer span cannot be found"):
position_labels(
offset_mapping=[
[(0, 0), (0, 10), (10, 20), (20, 30), (0, 0)],
[(0, 0), (20, 30), (30, 40), (40, 50), (0, 0)],
],
overflow_to_sample_mapping=[0, 0],
answer_start=[60],
answer_length=[10],
)
|
py | b40ca12a31b7d08b530638ac0ea835092a37a8d8 | test_case = int(input())
for i in range(1, test_case + 1):
number = int(input())
total = 0
for num in range(1, number + 1):
if num % 2:
total += num
else:
total -= num
print('#{} {}'.format(i, total))
|
py | b40ca1db970db8dd075dceff70dc09f72ede7971 | """PyAlaCarte and PyAlaMode editors."""
__author__ = "Patrick K. O'Brien <[email protected]>"
__cvsid__ = "$Id: editor.py 63478 2010-02-13 22:59:44Z RD $"
__revision__ = "$Revision: 63478 $"[11:-2]
import wx
from buffer import Buffer
import crust
import dispatcher
import editwindow
import frame
from shell import Shell
import version
class EditorFrame(frame.Frame):
"""Frame containing one editor."""
def __init__(self, parent=None, id=-1, title='PyAlaCarte',
pos=wx.DefaultPosition, size=(800, 600),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE,
filename=None):
"""Create EditorFrame instance."""
frame.Frame.__init__(self, parent, id, title, pos, size, style)
self.buffers = {}
self.buffer = None # Current buffer.
self.editor = None
self._defaultText = title + ' - the tastiest Python editor.'
self._statusText = self._defaultText
self.SetStatusText(self._statusText)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self._setup()
if filename:
self.bufferCreate(filename)
def _setup(self):
"""Setup prior to first buffer creation.
Useful for subclasses."""
pass
def setEditor(self, editor):
self.editor = editor
self.buffer = self.editor.buffer
self.buffers[self.buffer.id] = self.buffer
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaCarte'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnClose(self, event):
"""Event handler for closing."""
for buffer in self.buffers.values():
self.buffer = buffer
if buffer.hasChanged():
cancel = self.bufferSuggestSave()
if cancel and event.CanVeto():
event.Veto()
return
self.Destroy()
def OnIdle(self, event):
"""Event handler for idle time."""
self._updateStatus()
if hasattr(self, 'notebook'):
self._updateTabText()
self._updateTitle()
event.Skip()
def _updateStatus(self):
"""Show current status information."""
if self.editor and hasattr(self.editor, 'getStatus'):
status = self.editor.getStatus()
text = 'File: %s | Line: %d | Column: %d' % status
else:
text = self._defaultText
if text != self._statusText:
self.SetStatusText(text)
self._statusText = text
def _updateTabText(self):
"""Show current buffer information on notebook tab."""
## suffix = ' **'
## notebook = self.notebook
## selection = notebook.GetSelection()
## if selection == -1:
## return
## text = notebook.GetPageText(selection)
## window = notebook.GetPage(selection)
## if window.editor and window.editor.buffer.hasChanged():
## if text.endswith(suffix):
## pass
## else:
## notebook.SetPageText(selection, text + suffix)
## else:
## if text.endswith(suffix):
## notebook.SetPageText(selection, text[:len(suffix)])
def _updateTitle(self):
"""Show current title information."""
title = self.GetTitle()
if self.bufferHasChanged():
if title.startswith('* '):
pass
else:
self.SetTitle('* ' + title)
else:
if title.startswith('* '):
self.SetTitle(title[2:])
def hasBuffer(self):
"""Return True if there is a current buffer."""
if self.buffer:
return True
else:
return False
def bufferClose(self):
"""Close buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferDestroy()
cancel = False
return cancel
def bufferCreate(self, filename=None):
"""Create new buffer."""
self.bufferDestroy()
buffer = Buffer()
self.panel = panel = wx.Panel(parent=self, id=-1)
panel.Bind (wx.EVT_ERASE_BACKGROUND, lambda x: x)
editor = Editor(parent=panel)
panel.editor = editor
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(editor.window, 1, wx.EXPAND)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
sizer.Layout()
buffer.addEditor(editor)
buffer.open(filename)
self.setEditor(editor)
self.editor.setFocus()
self.SendSizeEvent()
def bufferDestroy(self):
"""Destroy the current buffer."""
if self.buffer:
for editor in self.buffer.editors.values():
editor.destroy()
self.editor = None
del self.buffers[self.buffer.id]
self.buffer = None
self.panel.Destroy()
def bufferHasChanged(self):
"""Return True if buffer has changed since last save."""
if self.buffer:
return self.buffer.hasChanged()
else:
return False
def bufferNew(self):
"""Create new buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = openSingle(directory=filedir)
if result.path:
self.bufferCreate(result.path)
cancel = False
return cancel
## def bufferPrint(self):
## """Print buffer."""
## pass
## def bufferRevert(self):
## """Revert buffer to version of file on disk."""
## pass
def bufferSave(self):
"""Save buffer to its file."""
if self.buffer.doc.filepath:
self.buffer.save()
cancel = False
else:
cancel = self.bufferSaveAs()
return cancel
def bufferSaveAs(self):
"""Save buffer to a new filename."""
if self.bufferHasChanged() and self.buffer.doc.filepath:
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = saveSingle(directory=filedir)
if result.path:
self.buffer.saveAs(result.path)
cancel = False
else:
cancel = True
return cancel
def bufferSuggestSave(self):
"""Suggest saving changes. Return True if user selected Cancel."""
result = messageDialog(parent=None,
message='%s has changed.\n'
'Would you like to save it first'
'?' % self.buffer.name,
title='Save current file?')
if result.positive:
cancel = self.bufferSave()
else:
cancel = result.text == 'Cancel'
return cancel
def updateNamespace(self):
"""Update the buffer namespace for autocompletion and calltips."""
if self.buffer.updateNamespace():
self.SetStatusText('Namespace updated')
else:
self.SetStatusText('Error executing, unable to update namespace')
class EditorNotebookFrame(EditorFrame):
"""Frame containing one or more editors in a notebook."""
def __init__(self, parent=None, id=-1, title='PyAlaMode',
pos=wx.DefaultPosition, size=(800, 600),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE,
filename=None):
"""Create EditorNotebookFrame instance."""
self.notebook = None
EditorFrame.__init__(self, parent, id, title, pos,
size, style, filename)
if self.notebook:
dispatcher.connect(receiver=self._editorChange,
signal='EditorChange', sender=self.notebook)
def _setup(self):
"""Setup prior to first buffer creation.
Called automatically by base class during init."""
self.notebook = EditorNotebook(parent=self)
intro = 'Py %s' % version.VERSION
import imp
module = imp.new_module('__main__')
import __builtin__
module.__dict__['__builtins__'] = __builtin__
namespace = module.__dict__.copy()
self.crust = crust.Crust(parent=self.notebook, intro=intro, locals=namespace)
self.shell = self.crust.shell
# Override the filling so that status messages go to the status bar.
self.crust.filling.tree.setStatusText = self.SetStatusText
# Override the shell so that status messages go to the status bar.
self.shell.setStatusText = self.SetStatusText
# Fix a problem with the sash shrinking to nothing.
self.crust.filling.SetSashPosition(200)
self.notebook.AddPage(page=self.crust, text='*Shell*', select=True)
self.setEditor(self.crust.editor)
self.crust.editor.SetFocus()
def _editorChange(self, editor):
"""Editor change signal receiver."""
self.setEditor(editor)
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaMode'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def _updateTitle(self):
"""Show current title information."""
pass
## title = self.GetTitle()
## if self.bufferHasChanged():
## if title.startswith('* '):
## pass
## else:
## self.SetTitle('* ' + title)
## else:
## if title.startswith('* '):
## self.SetTitle(title[2:])
def bufferCreate(self, filename=None):
"""Create new buffer."""
buffer = Buffer()
panel = wx.Panel(parent=self.notebook, id=-1)
panel.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: x)
editor = Editor(parent=panel)
panel.editor = editor
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(editor.window, 1, wx.EXPAND)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
sizer.Layout()
buffer.addEditor(editor)
buffer.open(filename)
self.setEditor(editor)
self.notebook.AddPage(page=panel, text=self.buffer.name, select=True)
self.editor.setFocus()
def bufferDestroy(self):
"""Destroy the current buffer."""
selection = self.notebook.GetSelection()
## print "Destroy Selection:", selection
if selection > 0: # Don't destroy the PyCrust tab.
if self.buffer:
del self.buffers[self.buffer.id]
self.buffer = None # Do this before DeletePage().
self.notebook.DeletePage(selection)
def bufferNew(self):
"""Create new buffer."""
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = openMultiple(directory=filedir)
for path in result.paths:
self.bufferCreate(path)
cancel = False
return cancel
class EditorNotebook(wx.Notebook):
"""A notebook containing a page for each editor."""
def __init__(self, parent):
"""Create EditorNotebook instance."""
wx.Notebook.__init__(self, parent, id=-1, style=wx.CLIP_CHILDREN)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging, id=self.GetId())
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged, id=self.GetId())
self.Bind(wx.EVT_IDLE, self.OnIdle)
def OnIdle(self, event):
"""Event handler for idle time."""
self._updateTabText()
event.Skip()
def _updateTabText(self):
"""Show current buffer display name on all but first tab."""
size = 3
changed = ' **'
unchanged = ' --'
selection = self.GetSelection()
if selection < 1:
return
text = self.GetPageText(selection)
window = self.GetPage(selection)
if not window.editor:
return
if text.endswith(changed) or text.endswith(unchanged):
name = text[:-size]
else:
name = text
if name != window.editor.buffer.name:
text = window.editor.buffer.name
if window.editor.buffer.hasChanged():
if text.endswith(changed):
text = None
elif text.endswith(unchanged):
text = text[:-size] + changed
else:
text += changed
else:
if text.endswith(changed):
text = text[:-size] + unchanged
elif text.endswith(unchanged):
text = None
else:
text += unchanged
if text is not None:
self.SetPageText(selection, text)
self.Refresh() # Needed on Win98.
def OnPageChanging(self, event):
"""Page changing event handler."""
event.Skip()
def OnPageChanged(self, event):
"""Page changed event handler."""
new = event.GetSelection()
window = self.GetPage(new)
dispatcher.send(signal='EditorChange', sender=self,
editor=window.editor)
window.SetFocus()
event.Skip()
class EditorShellNotebookFrame(EditorNotebookFrame):
"""Frame containing a notebook containing EditorShellNotebooks."""
def __init__(self, parent=None, id=-1, title='PyAlaModeTest',
pos=wx.DefaultPosition, size=(600, 400),
style=wx.DEFAULT_FRAME_STYLE,
filename=None, singlefile=False):
"""Create EditorShellNotebookFrame instance."""
self._singlefile = singlefile
EditorNotebookFrame.__init__(self, parent, id, title, pos,
size, style, filename)
def _setup(self):
"""Setup prior to first buffer creation.
Called automatically by base class during init."""
if not self._singlefile:
self.notebook = EditorNotebook(parent=self)
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaModePlus'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def bufferCreate(self, filename=None):
"""Create new buffer."""
if self._singlefile:
self.bufferDestroy()
notebook = EditorShellNotebook(parent=self,
filename=filename)
self.notebook = notebook
else:
notebook = EditorShellNotebook(parent=self.notebook,
filename=filename)
self.setEditor(notebook.editor)
if not self._singlefile:
self.notebook.AddPage(page=notebook, text=self.buffer.name,
select=True)
self.editor.setFocus()
def bufferDestroy(self):
"""Destroy the current buffer."""
if self.buffer:
self.editor = None
del self.buffers[self.buffer.id]
self.buffer = None # Do this before DeletePage().
if self._singlefile:
self.notebook.Destroy()
self.notebook = None
else:
selection = self.notebook.GetSelection()
## print "Destroy Selection:", selection
self.notebook.DeletePage(selection)
def bufferNew(self):
"""Create new buffer."""
if self._singlefile and self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
if self._singlefile and self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
if self._singlefile:
result = openSingle(directory=filedir)
if result.path:
self.bufferCreate(result.path)
else:
result = openMultiple(directory=filedir)
for path in result.paths:
self.bufferCreate(path)
cancel = False
return cancel
class EditorShellNotebook(wx.Notebook):
"""A notebook containing an editor page and a shell page."""
def __init__(self, parent, filename=None):
"""Create EditorShellNotebook instance."""
wx.Notebook.__init__(self, parent, id=-1)
usePanels = True
if usePanels:
editorparent = editorpanel = wx.Panel(self, -1)
shellparent = shellpanel = wx.Panel(self, -1)
else:
editorparent = self
shellparent = self
self.buffer = Buffer()
self.editor = Editor(parent=editorparent)
self.buffer.addEditor(self.editor)
self.buffer.open(filename)
self.shell = Shell(parent=shellparent, locals=self.buffer.interp.locals,
style=wx.CLIP_CHILDREN | wx.SUNKEN_BORDER)
self.buffer.interp.locals.clear()
if usePanels:
self.AddPage(page=editorpanel, text='Editor', select=True)
self.AddPage(page=shellpanel, text='Shell')
# Setup sizers
editorsizer = wx.BoxSizer(wx.VERTICAL)
editorsizer.Add(self.editor.window, 1, wx.EXPAND)
editorpanel.SetSizer(editorsizer)
editorpanel.SetAutoLayout(True)
shellsizer = wx.BoxSizer(wx.VERTICAL)
shellsizer.Add(self.shell, 1, wx.EXPAND)
shellpanel.SetSizer(shellsizer)
shellpanel.SetAutoLayout(True)
else:
self.AddPage(page=self.editor.window, text='Editor', select=True)
self.AddPage(page=self.shell, text='Shell')
self.editor.setFocus()
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged, id=self.GetId())
def OnPageChanged(self, event):
"""Page changed event handler."""
selection = event.GetSelection()
if selection == 0:
self.editor.setFocus()
else:
self.shell.SetFocus()
event.Skip()
def SetFocus(self):
wx.Notebook.SetFocus(self)
selection = self.GetSelection()
if selection == 0:
self.editor.setFocus()
else:
self.shell.SetFocus()
class Editor:
"""Editor having an EditWindow."""
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.CLIP_CHILDREN | wx.SUNKEN_BORDER):
"""Create Editor instance."""
self.window = EditWindow(self, parent, id, pos, size, style)
self.id = self.window.GetId()
self.buffer = None
# Assign handlers for keyboard events.
self.window.Bind(wx.EVT_CHAR, self.OnChar)
self.window.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
def _setBuffer(self, buffer, text):
"""Set the editor to a buffer. Private callback called by buffer."""
self.buffer = buffer
self.autoCompleteKeys = buffer.interp.getAutoCompleteKeys()
self.clearAll()
self.setText(text)
self.emptyUndoBuffer()
self.setSavePoint()
def destroy(self):
"""Destroy all editor objects."""
self.window.Destroy()
def clearAll(self):
self.window.ClearAll()
def emptyUndoBuffer(self):
self.window.EmptyUndoBuffer()
def getStatus(self):
"""Return (filepath, line, column) status tuple."""
if self.window:
pos = self.window.GetCurrentPos()
line = self.window.LineFromPosition(pos) + 1
col = self.window.GetColumn(pos)
if self.buffer:
name = self.buffer.doc.filepath or self.buffer.name
else:
name = ''
status = (name, line, col)
return status
else:
return ('', 0, 0)
def getText(self):
"""Return contents of editor."""
return self.window.GetText()
def hasChanged(self):
"""Return True if contents have changed."""
return self.window.GetModify()
def setFocus(self):
"""Set the input focus to the editor window."""
self.window.SetFocus()
def setSavePoint(self):
self.window.SetSavePoint()
def setText(self, text):
"""Set contents of editor."""
self.window.SetText(text)
def OnChar(self, event):
"""Keypress event handler.
Only receives an event if OnKeyDown calls event.Skip() for the
corresponding event."""
key = event.GetKeyCode()
if key in self.autoCompleteKeys:
# Usually the dot (period) key activates auto completion.
if self.window.AutoCompActive():
self.window.AutoCompCancel()
self.window.ReplaceSelection('')
self.window.AddText(chr(key))
text, pos = self.window.GetCurLine()
text = text[:pos]
if self.window.autoComplete:
self.autoCompleteShow(text)
elif key == ord('('):
# The left paren activates a call tip and cancels an
# active auto completion.
if self.window.AutoCompActive():
self.window.AutoCompCancel()
self.window.ReplaceSelection('')
self.window.AddText('(')
text, pos = self.window.GetCurLine()
text = text[:pos]
self.autoCallTipShow(text)
else:
# Allow the normal event handling to take place.
event.Skip()
def OnKeyDown(self, event):
"""Key down event handler."""
key = event.GetKeyCode()
# If the auto-complete window is up let it do its thing.
if self.window.AutoCompActive():
event.Skip()
return
controlDown = event.ControlDown()
altDown = event.AltDown()
shiftDown = event.ShiftDown()
# Let Ctrl-Alt-* get handled normally.
if controlDown and altDown:
event.Skip()
# Increase font size.
elif controlDown and key in (ord(']'),):
dispatcher.send(signal='FontIncrease')
# Decrease font size.
elif controlDown and key in (ord('['),):
dispatcher.send(signal='FontDecrease')
# Default font size.
elif controlDown and key in (ord('='),):
dispatcher.send(signal='FontDefault')
else:
event.Skip()
def autoCompleteShow(self, command):
"""Display auto-completion popup list."""
list = self.buffer.interp.getAutoCompleteList(command,
includeMagic=self.window.autoCompleteIncludeMagic,
includeSingle=self.window.autoCompleteIncludeSingle,
includeDouble=self.window.autoCompleteIncludeDouble)
if list:
options = ' '.join(list)
offset = 0
self.window.AutoCompShow(offset, options)
def autoCallTipShow(self, command):
"""Display argument spec and docstring in a popup window."""
if self.window.CallTipActive():
self.window.CallTipCancel()
(name, argspec, tip) = self.buffer.interp.getCallTip(command)
if tip:
dispatcher.send(signal='Shell.calltip', sender=self, calltip=tip)
if not self.window.autoCallTip:
return
startpos = self.window.GetCurrentPos()
if argspec:
self.window.AddText(argspec + ')')
endpos = self.window.GetCurrentPos()
self.window.SetSelection(startpos, endpos)
if tip:
tippos = startpos - (len(name) + 1)
fallback = startpos - self.GetColumn(startpos)
# In case there isn't enough room, only go back to the
# fallback.
tippos = max(tippos, fallback)
self.CallTipShow(tippos, tip)
class EditWindow(editwindow.EditWindow):
"""EditWindow based on StyledTextCtrl."""
def __init__(self, editor, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.CLIP_CHILDREN | wx.SUNKEN_BORDER):
"""Create EditWindow instance."""
editwindow.EditWindow.__init__(self, parent, id, pos, size, style)
self.editor = editor
class DialogResults:
"""DialogResults class."""
def __init__(self, returned):
"""Create wrapper for results returned by dialog."""
self.returned = returned
self.positive = returned in (wx.ID_OK, wx.ID_YES)
self.text = self._asString()
def __repr__(self):
return str(self.__dict__)
def _asString(self):
returned = self.returned
if returned == wx.ID_OK:
return "Ok"
elif returned == wx.ID_CANCEL:
return "Cancel"
elif returned == wx.ID_YES:
return "Yes"
elif returned == wx.ID_NO:
return "No"
def fileDialog(parent=None, title='Open', directory='', filename='',
wildcard='All Files (*.*)|*.*',
style=wx.OPEN | wx.MULTIPLE):
"""File dialog wrapper function."""
dialog = wx.FileDialog(parent, title, directory, filename,
wildcard, style)
result = DialogResults(dialog.ShowModal())
if result.positive:
result.paths = dialog.GetPaths()
else:
result.paths = []
dialog.Destroy()
return result
def openSingle(parent=None, title='Open', directory='', filename='',
wildcard='All Files (*.*)|*.*', style=wx.OPEN):
"""File dialog wrapper function."""
dialog = wx.FileDialog(parent, title, directory, filename,
wildcard, style)
result = DialogResults(dialog.ShowModal())
if result.positive:
result.path = dialog.GetPath()
else:
result.path = None
dialog.Destroy()
return result
def openMultiple(parent=None, title='Open', directory='', filename='',
wildcard='All Files (*.*)|*.*',
style=wx.OPEN | wx.MULTIPLE):
"""File dialog wrapper function."""
return fileDialog(parent, title, directory, filename, wildcard, style)
def saveSingle(parent=None, title='Save', directory='', filename='',
wildcard='All Files (*.*)|*.*',
style=wx.SAVE | wx.OVERWRITE_PROMPT):
"""File dialog wrapper function."""
dialog = wx.FileDialog(parent, title, directory, filename,
wildcard, style)
result = DialogResults(dialog.ShowModal())
if result.positive:
result.path = dialog.GetPath()
else:
result.path = None
dialog.Destroy()
return result
def directory(parent=None, message='Choose a directory', path='', style=0,
pos=wx.DefaultPosition, size=wx.DefaultSize):
"""Dir dialog wrapper function."""
dialog = wx.DirDialog(parent, message, path, style, pos, size)
result = DialogResults(dialog.ShowModal())
if result.positive:
result.path = dialog.GetPath()
else:
result.path = None
dialog.Destroy()
return result
def messageDialog(parent=None, message='', title='Message box',
style=wx.YES_NO | wx.CANCEL | wx.CENTRE | wx.ICON_QUESTION,
pos=wx.DefaultPosition):
"""Message dialog wrapper function."""
dialog = wx.MessageDialog(parent, message, title, style, pos)
result = DialogResults(dialog.ShowModal())
dialog.Destroy()
return result
|
py | b40ca200f86308c51b429a902f9a40806982ffc0 | from im2mesh.vnn_onet import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
] |
py | b40ca55dbd92b953985ad13e61df56a69c15596c | from dataclasses import dataclass
from typing import List
from sector.types.blockchain_format.coin import Coin
from sector.types.blockchain_format.program import SerializedProgram, INFINITE_COST
from sector.util.chain_utils import additions_for_solution
from sector.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class CoinSolution(Streamable):
"""
This is a rather disparate data structure that validates coin transfers. It's generally populated
with data from different sources, since burned coins are identified by name, so it is built up
more often that it is streamed.
"""
coin: Coin
puzzle_reveal: SerializedProgram
solution: SerializedProgram
def additions(self) -> List[Coin]:
return additions_for_solution(self.coin.name(), self.puzzle_reveal, self.solution, INFINITE_COST)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.