filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13960 | # Crie um programa onde o usuario possa
# digitar _sete valores numericos_ e
# cadastre-os em uma _lista unica_ que mantenha
# separados of valores _pares_ e _impares_
# No final, mostre os valores pares e impares
# em ordem crescente
print('Me de sete valores por favor')
valores = [[], []]
for num in range(7):
valor = int(input(f'Me de valor {num + 1}: '))
if valor % 2 == 0:
valores[0].append(valor)
else:
valores[1].append(valor)
print(f'Beleza na Veneza? Os pares são: {sorted(valores[0])}')
print(f'Os impares são: {sorted(valores[1])}')
|
the-stack_0_13961 | from enum import Enum
from spectroscope.model.update import Action
from spectroscope.model.database import RaiseUpdateKeys
from spectroscope.module import ConfigOption, Plugin
from spectroscope.constants import enums
import spectroscope
from typing import List
from pymongo import MongoClient, UpdateOne, DeleteOne
from pymongo.results import BulkWriteResult
from pymongo.errors import ConnectionFailure
log = spectroscope.log()
import spectroscope
log = spectroscope.log()
class Mongodb(Plugin):
_consumed_types = [RaiseUpdateKeys]
config_options = [
ConfigOption(
name="uri_endpoint",
param_type=str,
description="Endpoint to database server",
),
ConfigOption(
name="rs_name",
param_type=str,
description="replica set name of the mongodb",
),
ConfigOption(
name="db_name",
param_type=str,
description="Name of database",
),
ConfigOption(
name="col_name",
param_type=str,
description="Name of collection",
),
]
def __init__(self, uri_endpoint: str, rs_name: str, db_name: str, col_name: str):
try:
self._client = MongoClient(uri_endpoint, replicaset=rs_name)
self._database = self._client[db_name]
self._collection = self._database[col_name]
except ConnectionFailure as e:
log.error("failed to connect to {}. {}".format(self.uri_endpoint, e))
raise e
self._handlers = {RaiseUpdateKeys: self._action}
@classmethod
def register(cls, **kwargs):
return cls(
uri_endpoint=kwargs["uri_endpoint"],
rs_name=kwargs["rs_name"],
db_name=kwargs.get("db_name", "spectroscope"),
col_name=kwargs.get("col_name", "validators"),
)
def _create_updates(self, validator_keys: List[str], status: int):
request = []
for key in validator_keys:
request.append(
UpdateOne(
{"_id": key},
{"$setOnInsert": {"_id": key, "status": status}},
upsert=True,
)
)
return request
def _create_deletions(self, validator_keys: List[str]):
request = []
for key in validator_keys:
request.append(DeleteOne({"_id": key}))
return request
def _add(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_updates(validator_keys, status), ordered=False
)
if not result.acknowledged:
return []
return result.upserted_count
def _up(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_updates(validator_keys, status), ordered=False
)
if not result.acknowledged:
return []
return result.modified_count
def _del(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_deletions(validator_keys), ordered=False
)
if not result.acknowledged:
return []
return result.deleted_count
def _get(self, validator_keys: List[str], status: int):
validators = []
if not validator_keys:
validators = self._collection.find({}, {"validator_key": 1})
else:
validators = self._collection.find(
{"validator_key": {"$in": validator_keys}}, {"validator_key": 1}
)
return [x["validator_key"] for x in validators]
def _action(
self, validator_keys: List[str], status: int, update_type: int, **kwargs
):
if enums.RequestTypes.ADD.value == update_type:
return self._add(validator_keys, status)
elif enums.RequestTypes.UP.value == update_type:
return self._up(validator_keys, status)
elif enums.RequestTypes.DEL.value == update_type:
return self._del(validator_keys, status)
elif enums.RequestTypes.GET.value == update_type:
return self._get(validator_keys, status)
def consume(self, events: List[Action]):
result = []
for event in events:
result.append(self._handlers[type(event)](**event.update.get_dict()))
return result
|
the-stack_0_13962 |
from setuptools import setup, find_packages
version = '5.3.4'
setup(
name="alerta-hipchat",
version=version,
description='Alerta plugin for HipChat',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='Nick Satterly',
author_email='[email protected]',
packages=find_packages(),
py_modules=['alerta_hipchat'],
install_requires=[
'requests',
'jinja2'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'hipchat = alerta_hipchat:SendRoomNotification'
]
}
)
|
the-stack_0_13972 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import List
import torch
import torch.nn as nn
import torch.nn.init as init
from reagent.models.base import ModelBase
logger = logging.getLogger(__name__)
def gaussian_fill_w_gain(tensor, activation, dim_in, min_std=0.0) -> None:
""" Gaussian initialization with gain."""
gain = math.sqrt(2) if (activation == "relu" or activation == "leaky_relu") else 1
init.normal_(tensor, mean=0, std=max(gain * math.sqrt(1 / dim_in), min_std))
ACTIVATION_MAP = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leaky_relu": nn.LeakyReLU,
"linear": nn.Identity,
"sigmoid": nn.Sigmoid,
}
class FullyConnectedNetwork(ModelBase):
def __init__(
self,
layers,
activations,
*,
use_batch_norm=False,
min_std=0.0,
dropout_ratio=0.0,
use_layer_norm=False,
normalize_output=False,
) -> None:
super().__init__()
self.input_dim = layers[0]
modules: List[nn.Module] = []
assert len(layers) == len(activations) + 1
for i, ((in_dim, out_dim), activation) in enumerate(
zip(zip(layers, layers[1:]), activations)
):
# Add BatchNorm1d
if use_batch_norm:
modules.append(nn.BatchNorm1d(in_dim))
# Add Linear
linear = nn.Linear(in_dim, out_dim)
gaussian_fill_w_gain(linear.weight, activation, in_dim, min_std=min_std)
init.constant_(linear.bias, 0) # type: ignore
modules.append(linear)
# Add LayerNorm
if use_layer_norm and (normalize_output or i < len(activations) - 1):
modules.append(nn.LayerNorm(out_dim)) # type: ignore
# Add activation
if activation in ACTIVATION_MAP:
modules.append(ACTIVATION_MAP[activation]())
else:
# See if it matches any of the nn modules
modules.append(getattr(nn, activation)())
# Add Dropout
if dropout_ratio > 0.0 and (normalize_output or i < len(activations) - 1):
modules.append(nn.Dropout(p=dropout_ratio))
self.dnn = nn.Sequential(*modules) # type: ignore
def input_prototype(self):
return torch.randn(1, self.input_dim)
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Forward pass for generic feed-forward DNNs. Assumes activation names
are valid pytorch activation names.
:param input tensor
"""
return self.dnn(input)
|
the-stack_0_13973 | # qubit number=2
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += Y(1) # number=2
prog += Y(1) # number=4
prog += Y(1) # number=3
prog += RX(2.0860175219836226,1) # number=7
prog += X(0) # number=5
prog += X(0) # number=6
prog += Y(0) # number=8
prog += Y(0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil169.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_13975 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for GTalk Pyauto tests."""
import os
import sys
def _SetupPaths():
"""Setting path to find pyauto_functional.py."""
gtalk_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(gtalk_dir)
sys.path.append(os.path.normpath(os.path.join(gtalk_dir, os.pardir)))
_SetupPaths()
from pyauto_functional import Main
if __name__ == '__main__':
Main()
|
the-stack_0_13976 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class PatchStatus(webapp.RequestHandler):
def get(self, queue_name, attachment_id):
statuses = QueueStatus.all().filter('queue_name =', queue_name).filter('active_patch_id =', int(attachment_id)).order('-date').fetch(1)
if not statuses:
self.error(404)
return
self.response.out.write(statuses[0].message)
|
the-stack_0_13978 | """Get info from gce metadata and put it into grains store."""
from __future__ import print_function
from __future__ import unicode_literals
import json
import six
def _decode_list(data):
"""Decode list items from unicode to normal strings."""
ret = []
for item in data:
if isinstance(item, six.text_type):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
ret.append(item)
return ret
def _decode_dict(data):
"""Decode dictionary keys and values from unicode to normal strings."""
ret = {}
for key, value in data.items():
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8')
if isinstance(key, six.binary_type):
key = key.decode('utf-8')
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
ret[key] = value
return ret
def gce_metadata():
"""
Fetch all metadata from GCE.
Also fills in some legacy grain data
"""
ret = {}
http = six.moves.http_client.HTTPConnection('metadata.google.internal')
http.request('GET', '/computeMetadata/v1/instance/?recursive=true', None,
{'Metadata-Flavor': 'Google'})
resp = http.getresponse()
json_str = resp.read().decode('utf-8')
metadata = json.loads(json_str, object_hook=_decode_dict)
ipv4 = metadata['networkInterfaces'][0]['accessConfigs'][0]['externalIp']
ret['pub_fqdn_ipv4'] = ret['external_ip'] = ipv4
ret['tags'] = ret['roles'] = metadata['tags']
ret['zone'] = metadata['zone']
ret['gce'] = metadata
return ret
if __name__ == '__main__':
print(gce_metadata())
|
the-stack_0_13979 | #!/usr/bin/env python
# encoding: utf-8
import re
import datetime
def time_fix(time_string):
now_time = datetime.datetime.now()
if '分钟前' in time_string:
minutes = re.search(r'^(\d+)分钟', time_string).group(1)
created_at = now_time - datetime.timedelta(minutes=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M')
if '小时前' in time_string:
minutes = re.search(r'^(\d+)小时', time_string).group(1)
created_at = now_time - datetime.timedelta(hours=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M')
if '今天' in time_string:
return time_string.replace('今天', now_time.strftime('%Y-%m-%d'))
if '月' in time_string:
time_string = time_string.replace('月', '-').replace('日', '')
time_string = str(now_time.year) + '-' + time_string
return time_string
return time_string
keyword_re = re.compile('<span class="kt">|</span>|原图|<!-- 是否进行翻译 -->|<span class="cmt">|\[组图共.张\]')
emoji_re = re.compile('<img alt="|" src="//h5\.sinaimg(.*?)/>')
white_space_re = re.compile('<br />')
div_re = re.compile('</div>|<div>')
image_re = re.compile('<img(.*?)/>')
url_re = re.compile('<a href=(.*?)>|</a>')
def extract_weibo_content(weibo_html):
s = weibo_html
if 'class="ctt">' in s:
s = s.split('class="ctt">', maxsplit=1)[1]
s = emoji_re.sub('', s)
s = url_re.sub('', s)
s = div_re.sub('', s)
s = image_re.sub('', s)
if '<span class="ct">' in s:
s = s.split('<span class="ct">')[0]
splits = s.split('赞[')
if len(splits) == 2:
s = splits[0]
if len(splits) == 3:
origin_text = splits[0]
retweet_text = splits[1].split('转发理由:')[1]
s = origin_text + '转发理由:' + retweet_text
s = white_space_re.sub(' ', s)
s = keyword_re.sub('', s)
s = s.replace('\xa0', '')
s = s.strip(':')
s = s.strip()
return s
def extract_comment_content(comment_html):
s = comment_html
if 'class="ctt">' in s:
s = s.split('class="ctt">', maxsplit=1)[1]
s = s.split('举报', maxsplit=1)[0]
s = emoji_re.sub('', s)
s = keyword_re.sub('', s)
s = url_re.sub('', s)
s = div_re.sub('', s)
s = image_re.sub('', s)
s = white_space_re.sub(' ', s)
s = s.replace('\xa0', '')
s = s.strip(':')
s = s.strip()
return s |
the-stack_0_13980 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import re
import glob
def file_to_df(file_path):
pattern = "genome=([a-zA-Z0-9]+)_.*_run=(\d+)"
genome, run = re.search(pattern, file_path).groups()
df = pd.read_csv(file_path)
df["fraction_stashed"] = df["nr_stashed"] / max(df["nr_qgrams"])
df["Time per access (μs)"] = (df["access_time (ns)"] / df["nr_qgrams"]) / (10**3)
df["Total Runtime (s)"] = df["total_time (ns)"] / (10**9)
df["Total Access Time (s)"] = df["access_time (ns)"] / (10**9)
df["h"] = [f"H={h}" for h in df.h]
df["run"] = int(run)
df["genome"] = genome
return df
def get_df():
dfs = []
for index_file in snakemake.input.stats:
dfs.append(file_to_df(index_file))
df = pd.concat(dfs)
df["genome"] = pd.Categorical(df["genome"], ["mxanthus", "pfalciparum", "hops", "hg38"])
df["h"] = pd.Categorical(df["h"], ["H=8", "H=16", "H=24"])
df.sort_values("genome")
return df
def plot():
df = get_df()
df = df.rename(
columns={
"h": "Hopscotch Neighborhood",
"total_time (ns)": "Total Runtime (ns)",
"genome": "Genome",
}
)
sns.set(
style="whitegrid",
font_scale=1.2,
)
sns.despine()
g = sns.catplot(
x="Hopscotch Neighborhood",
y="Time per access (μs)",
row="q",
col="Genome",
hue="hf",
kind="point",
size=5,
aspect=0.8,
data=df,
legend_out=False,
margin_titles=True,
dodge=True,
)
plt.savefig(snakemake.output.access_time_pdf)
if __name__ == "__main__":
plot()
|
the-stack_0_13983 | def output():
print('\n'"Customer Code: ", a)
print("Beginning Meter Reading: ", b)
print("Ending Meter Reading: ", c)
print("Gallons of Water Used: ", gallons_used)
print("Amount Billed: $", bill,'\n')
while True:
a = input("Enter code:\n ")
a = a.lower() #Changing the customer code to lower case
if a == 'r' or a == 'c' or a == 'i': #To continue, the customer code should be either r or c or i
b = input("Enter Beginning meter reading:\n ") #Getting Customer's Beginning meter number
if len(b) <= 9: #Checking the length of the Beginning meter number
c = input("Enter Ending meter reading:\n ") #Getting the Custmer's Ending meter number
if len(c) <= 9: #Checking the lenth of Customer's Ending meter number
bmn = int(b) #Converting the beginning and ending meter numbers to integers
emn = int(c)
gallons = emn - bmn #Calculating for the gallons as the difference of the meter numbers
gallons_used = gallons * 0.1 #Determining the gallons used as tenths
gallons_used = round(gallons_used, 2)
if a == 'r': #Considering whether the customer's code was r for residents
amount = gallons_used * 0.0005 #Calculating the amount to be paid for all gallons used
amount_billed = 5 + amount #Totaling the amount to be billed to the customer adding the standard payment of $5.00
bill = float(round(amount_billed, 2))
output() #calling the output function to display the customer's particulars
continue #Using continue to tell the program, that if the customer code was not r, go to the next
elif a == 'c':
if gallons_used <= 4000000:
amount_billed = 1000.00
bill = float(round(amount_billed, 2))
else:
excess_gallons = gallons_used - 4000000
amount = excess_gallons * 0.00025
amount_billed = 1000.00 + amount
bill = float(round(amount_billed, 2))
output()
continue
elif a == 'i':
if gallons_used <= 4000000:
amount_billed = 1000.00
bill = float(round(amount_billed, 2))
elif gallons_used > 4000000 and gallons_used <= 10000000:
amount_billed = 2000.00
bill = float(round(amount_billed, 2))
elif gallons_used > 10000000:
excess_gallons = gallons_used - 10000000
amount = excess_gallons * 0.00025
amount_billed = 2000.00 + amount
bill = float(round(amount_billed, 2))
output()
else:
print("Invalid Ending Meter Reading")
continue
else:
print("Invalid Beginning Meter Number")
continue
else:
print("Invalid Customer Code")
continue |
the-stack_0_13984 | from django import template
import datetime
from website.models import *
register = template.Library()
# tag nay dung trong gio hang
@register.simple_tag(takes_context=True)
def get_image_product(context, id_product):
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
id_origin_product = id_product
image = Product_Image.objects.filter(product_id_id=id_origin_product).order_by('image_id_id').first()
return '/product' + image.image_id.image_link.url
@register.simple_tag(takes_context=True)
def get_price_discount(context, price, discount):
return price * (100 - discount)/100
|
the-stack_0_13985 | '''
Author: alex
Created Time: 2020年08月20日 星期四 16时09分37秒
'''
import cv2
import numpy as np
def remove_watermark(image, thr=200, convol=3):
"""
简单粗暴去水印,可将将pdf或者扫描件中水印去除
使用卷积来优化计算
:param image: 输入图片,cv格式灰度图像
:param thr: 去除图片中像素阈值
:param convol: 卷积窗口的大小
:return: 返回np.array格式图片
"""
distance = int((convol - 1) / 2) # 为了执行卷积,对图像连缘进行像素扩充
# 使用白色来进行边缘像素扩充
image = cv2.copyMakeBorder(image, distance, distance, distance, distance,
cv2.BORDER_CONSTANT, value=255)
mask = (image < 200).astype(int)
# 单位矩阵卷积操作
mask = cv2.boxFilter(mask, -1, (convol, convol), normalize=False)
mask = (mask >= 1).astype(int) # 掩膜构建完成,>=1表示窗口内有黑点
image[np.where(mask == 0)] = 255 # 掩膜中为0的位置赋值为255,白色,达到去水印效果
h, w = image.shape[:2]
image = image[distance:h - distance, distance:w - distance]
return image
def bak_remove_watermark(image, thr=200, distance=1):
"""
简单粗暴去水印,可将将pdf或者扫描件中水印去除
:param image: 输入图片,Image格式
:param thr: 去除图片中像素阈值
:param distance: 去除图片中像素距离
:return: 返回np.arrayg格式图片
"""
w, h = image.size
rgb_im = image.convert('RGB')
for x in range(0, w - 1):
for y in range(0, h - 1):
if not hasBlackAround(x, y, distance, rgb_im, thr=thr):
rgb_im.putpixel((x, y), (255, 255, 255))
return rgb_im
def hasBlackAround(x, y, distance, img, thr=200):
w, h = img.size
startX = max(0, x-distance)
startY = max(0, y-distance)
endX = min(w-1, x+distance)
endY = min(h-1, y+distance)
for j in range(startX, endX):
for k in range(startY, endY):
r, g, b = img.getpixel((j, k))
if r < thr and g < thr and b < thr:
# 满足条件的点黑点
return True
return False
if __name__ == '__main__':
from PIL import Image
debug = False
image_path = "gf-png/gf1.png"
img = Image.open(image_path)
res_img = remove_watermark(img, thr=100, distance=1)
|
the-stack_0_13989 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import glob
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_GCC_TOOLCHAIN_PATH = ''
_DEFAULT_GCC_TOOLCHAIN_TARGET = ''
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_plugin_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
_ = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow_plugin', 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def get_python_lib_name(environ_cp):
python_bin_path = environ_cp['PYTHON_BIN_PATH']
path_list = python_bin_path.split(os.sep)[:-2]
path_list.append('lib')
py_lib_path = os.sep.join(path_list)
for _, _, files in os.walk(py_lib_path):
for name in files:
if str(name).startswith('libpython') and str(name).endswith('.so'):
# strip libxxx.so to get xxx
return str(name).strip()[3:-3]
def get_python_link_path(environ_cp):
# TODO(quintin): we need to link libpythonx.y.so for _pywrap_tensorflow_internal.so
# once google change CAPI symbols into libtensorflow.so, we don't need this
python_bin_path = environ_cp['PYTHON_BIN_PATH']
path_list = python_bin_path.split(os.sep)[:-2]
path_list.append('lib')
py_lib_path = os.sep.join(path_list)
return py_lib_path
def create_build_configuration(environ_cp):
tf_header_dir = environ_cp['PYTHON_LIB_PATH'] + "/tensorflow/include"
tf_shared_lib_dir = environ_cp['PYTHON_LIB_PATH'] + "/tensorflow/"
write_action_env_to_bazelrc("TF_HEADER_DIR", tf_header_dir)
write_action_env_to_bazelrc("TF_SHARED_LIBRARY_DIR", tf_shared_lib_dir)
write_action_env_to_bazelrc("TF_CXX11_ABI_FLAG", 1)
write_action_env_to_bazelrc("PYTHON_LINK_LIB_NAME", get_python_lib_name(environ_cp))
write_action_env_to_bazelrc("PYTHON_LINK_PATH", get_python_link_path(environ_cp))
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow plug-in with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow plug-in.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if os.path.exists("./.bazelversion"):
curr_version = run_shell(
['cat', '.bazelversion'])
else :
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_version,
_SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated CUDA compute capabilities '
'you want to build with.\nYou can find the compute capability of your '
'device at: https://developer.nvidia.com/cuda-gpus. Each capability '
'can be specified as "x.y" or "compute_xy" to include both virtual and'
' binary GPU code, or as "sm_xy" to only include the binary '
'code.\nPlease note that each additional compute capability '
'significantly increases your build time and binary size, and that '
'TensorFlow only supports compute capabilities >= 3.5 [Default is: '
'%s]: ' % default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
# We now support sm_35,sm_50,sm_60,compute_70.
sm_compute_match = re.match('(sm|compute)_?([0-9]+[0-9]+)',
compute_capability)
if not sm_compute_match:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = int(sm_compute_match.group(2))
if ver < 30:
print(
'ERROR: TensorFlow only supports small CUDA compute'
' capabilities of sm_30 and higher. Please re-specify the list'
' of compute capabilities excluding version %s.' % ver)
all_valid = False
if ver < 35:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than sm_35. Disable XLA when running on older GPUs.')
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_opencl_sdk_root(environ_cp):
"""Set OPENCL SDK ROOT"""
def toolkit_exists(toolkit_path):
"""Check if a CL header path is valid."""
if toolkit_path == '':
return True
if is_linux():
cl_header_path = 'opencl/SDK/include/CL/cl.h'
else:
cl_header_path = ''
cl_path_full = os.path.join(toolkit_path, cl_header_path)
exists = os.path.exists(cl_path_full)
if not exists:
print('Invalid OPENCL SDK ROOT path. %s cannot be found' %
(cl_path_full))
return exists
ocl_sdk_root = prompt_loop_or_load_from_env(
environ_cp,
var_name='OCL_SDK_ROOT',
var_default=_DEFAULT_OCL_SDK_ROOT,
ask_for_var=(
'Please specify the location of opencl SDK install path '
'for ocl headers and libOpenCL.so'),
check_success=toolkit_exists,
error_msg='Invalid OPENCL SDK ROOT path.',
suppress_default_error=True)
write_action_env_to_bazelrc('OCL_SDK_ROOT',
ocl_sdk_root)
def set_gcc_toolchain_path(environ_cp):
"""Set GCC_TOOLCHAIN_PATH."""
def no_check(arg):
return True
gcc_toolchain_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_TOOLCHAIN_PATH',
var_default=_DEFAULT_GCC_TOOLCHAIN_PATH,
ask_for_var=(
'Please specify the location of gcc toolchain used by the compiler'),
check_success=no_check,
error_msg='Invalid GCC_TOOLCHAIN path.',
suppress_default_error=True)
write_action_env_to_bazelrc('GCC_TOOLCHAIN_PATH',
gcc_toolchain_path)
return gcc_toolchain_path
def set_gcc_toolchain_target(environ_cp, gcc_toolchain_path):
"""Set GCC_TOOLCHAIN_TARGET."""
if gcc_toolchain_path == "":
return ""
def toolkit_exists(target):
"""Check if a gcc toolchain-target is valid."""
if is_linux():
if target == '':
gcc_bin_path = 'bin/gcc'
else:
gcc_bin_path = 'bin/' + target + '-gcc'
else:
gcc_bin_path = ''
gcc_bin_path_full = os.path.join(gcc_toolchain_path, gcc_bin_path)
exists = os.path.exists(gcc_bin_path_full)
if not exists:
print('Invalid GCC_TOOLCHAIN path and TARGET. %s cannot be found' %
(gcc_bin_path_full))
return exists
gcc_toolchain_target = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_TOOLCHAIN_TARGET',
var_default=_DEFAULT_GCC_TOOLCHAIN_TARGET,
ask_for_var=(
'Please specify the target of gcc toolchain (e.g. x86_64-pc-linux) '
'the compiler will use.'),
check_success=toolkit_exists,
error_msg='Invalid GCC_TOOLCHAIN_TARGET',
suppress_default_error=True)
write_action_env_to_bazelrc('GCC_TOOLCHAIN_TARGET',
gcc_toolchain_target)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
(os.path.exists(os.path.join(mpi_home, 'lib')) or
os.path.exists(os.path.join(mpi_home, 'lib64')) or
os.path.exists(os.path.join(mpi_home, 'lib32'))))
if not exists:
print(
'Invalid path to the MPI Toolkit. %s or %s or %s or %s cannot be found'
% (os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib')),
os.path.exists(os.path.join(mpi_home, 'lib64')),
os.path.exists(os.path.join(mpi_home, 'lib32'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib64/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib64/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib32/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib32/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError(
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
paths = glob.glob('**/third_party/gpus/find_cuda_config.py', recursive=True)
if not paths:
raise FileNotFoundError(
"Can't find 'find_cuda_config.py' script inside working directory")
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], paths[0]] + cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version('3.1.0', '3.7.0')
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
create_build_configuration(environ_cp)
if is_windows():
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUDNN_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if __name__ == '__main__':
main()
|
the-stack_0_13991 | import os
import localstack_client.config
# LocalStack version
VERSION = '0.10.7'
# constant to represent the "local" region, i.e., local machine
REGION_LOCAL = 'local'
# dev environment
ENV_DEV = 'dev'
# backend service ports, for services that are behind a proxy (counting down from 4566)
DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
DEFAULT_PORT_KINESIS_BACKEND = 4565
DEFAULT_PORT_DYNAMODB_BACKEND = 4564
DEFAULT_PORT_S3_BACKEND = 4563
DEFAULT_PORT_SNS_BACKEND = 4562
DEFAULT_PORT_SQS_BACKEND = 4561
DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
DEFAULT_PORT_STEPFUNCTIONS_BACKEND = 4558
DEFAULT_PORT_IAM_BACKEND = 4557
DEFAULT_PORT_EC2_BACKEND = 4556
DEFAULT_PORT_KMS_BACKEND = 4555
DEFAULT_PORT_EVENTS_BACKEND = 4554
DEFAULT_PORT_LOGS_BACKEND = 4553
DEFAULT_PORT_WEB_UI = 8080
LOCALHOST = 'localhost'
# version of the Maven dependency with Java utility code
LOCALSTACK_MAVEN_VERSION = '0.2.0'
# map of default service APIs and ports to be spun up (fetch map from localstack_client)
DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
# host to bind to when starting the services
BIND_HOST = '0.0.0.0'
# AWS user account ID used for tests
if 'TEST_AWS_ACCOUNT_ID' not in os.environ:
os.environ['TEST_AWS_ACCOUNT_ID'] = '000000000000'
TEST_AWS_ACCOUNT_ID = os.environ['TEST_AWS_ACCOUNT_ID']
# root code folder
LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# virtualenv folder
LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
# assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
# API Gateway path to indicate a user request sent to the gateway
PATH_USER_REQUEST = '_user_request_'
# name of LocalStack Docker image
DOCKER_IMAGE_NAME = 'localstack/localstack'
# backdoor API path used to retrieve or update config variables
CONFIG_UPDATE_PATH = '/?_config_'
# environment variable name to tag local test runs
ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
# content types
APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
APPLICATION_JSON = 'application/json'
APPLICATION_XML = 'application/xml'
APPLICATION_X_WWW_FORM_URLENCODED = 'application/x-www-form-urlencoded'
# strings to indicate truthy/falsy values
TRUE_STRINGS = ('1', 'true', 'True')
FALSE_STRINGS = ('0', 'false', 'False')
# Lambda defaults
LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
# installation constants
ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.7.0.zip'
# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'analysis-kuromoji',
'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']
# Default ES modules to exclude (save apprx 66MB in the final image)
ELASTICSEARCH_DELETE_MODULES = ['ingest-geoip']
ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.15.2.jar'
STS_JAR_URL = 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
STEPFUNCTIONS_ZIP_URL = 'https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip'
KMS_URL_PATTERN = 'https://s3-eu-west-2.amazonaws.com/local-kms/localstack/v3/local-kms.<arch>.bin'
# TODO: Temporarily using a fixed version of DDB in Alpine, as we're hitting a SIGSEGV JVM crash with latest
DYNAMODB_JAR_URL_ALPINE = 'https://github.com/whummer/dynamodb-local/raw/master/etc/DynamoDBLocal.zip'
DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
# API endpoint for analytics events
API_ENDPOINT = os.environ.get('API_ENDPOINT') or 'https://api.localstack.cloud/v1'
# environment variable to indicates that this process is running the Web UI
LOCALSTACK_WEB_PROCESS = 'LOCALSTACK_WEB_PROCESS'
LOCALSTACK_INFRA_PROCESS = 'LOCALSTACK_INFRA_PROCESS'
# Hardcoded AWS account ID used by moto
MOTO_ACCOUNT_ID = '123456789012'
# Default lambda registry
DEFAULT_LAMBDA_CONTAINER_REGISTRY = 'lambci/lambda'
|
the-stack_0_13994 | from PySide2.QtWidgets import QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, QWidget, QLabel, \
QListWidget, QListWidgetItem
import os
from custom_src.global_tools.Debugger import Debugger
class SelectPackages_Dialog(QDialog):
def __init__(self, parent, packages):
super(SelectPackages_Dialog, self).__init__(parent)
self.file_paths = []
self.required_packages = packages
self.setLayout(QVBoxLayout())
self.layout().addWidget(QLabel('You need to select the locations of the following required node packages'))
# package lists
required_packages_list_widget = QListWidget()
for p in packages:
package_item = QListWidgetItem()
package_item.setText(p)
required_packages_list_widget.addItem(package_item)
selected_items_widget = QWidget()
selected_items_widget.setLayout(QVBoxLayout())
self.selected_packages_list_widget = QListWidget()
selected_items_widget.layout().addWidget(self.selected_packages_list_widget)
auto_import_button = QPushButton('auto import')
auto_import_button.setFocus()
auto_import_button.clicked.connect(self.auto_import_button_clicked)
selected_items_widget.layout().addWidget(auto_import_button)
add_package_button = QPushButton('add')
add_package_button.clicked.connect(self.add_package_button_clicked)
selected_items_widget.layout().addWidget(add_package_button)
clear_package_list_button = QPushButton('clear')
clear_package_list_button.clicked.connect(self.clear_selected_packages_list)
selected_items_widget.layout().addWidget(clear_package_list_button)
finished_button = QPushButton('OK')
finished_button.clicked.connect(self.finished_button_clicked)
selected_items_widget.layout().addWidget(finished_button)
packages_lists_widget = QWidget()
packages_lists_widget.setLayout(QHBoxLayout())
packages_lists_widget.layout().addWidget(required_packages_list_widget)
packages_lists_widget.layout().addWidget(selected_items_widget)
self.layout().addWidget(packages_lists_widget)
self.setWindowTitle('select required packages')
def auto_import_button_clicked(self):
packages_dir = '../packages'
folders_list = [x[0] for x in os.walk(packages_dir) if
os.path.basename(os.path.normpath(x[0])) in self.required_packages]
required_files = self.required_packages.copy()
for folder in folders_list:
for r_f in required_files:
if r_f + '.rpc' in os.listdir(packages_dir + '/' + folder):
self.file_paths.append(os.path.normpath(packages_dir + '/' + folder + '/' + r_f + '.rpc'))
break
self.rebuild_selected_packages_list_widget()
self.clean_packages_list()
if self.all_required_packages_selected():
self.finished()
def add_package_button_clicked(self):
file_names = \
QFileDialog.getOpenFileNames(self, 'select package files', '../packages', 'Ryven Package(*.rpc)')[0]
for file_name in file_names:
try:
f = open(file_name)
f.close()
self.file_paths.append(file_name)
except FileNotFoundError:
Debugger.debug('couldn\'t open file')
self.rebuild_selected_packages_list_widget()
def rebuild_selected_packages_list_widget(self):
# remove all items
self.selected_packages_list_widget.clear()
for f in self.file_paths:
file_item = QListWidgetItem()
file_item.setText(f)
self.selected_packages_list_widget.addItem(file_item)
def clear_selected_packages_list(self):
self.file_paths.clear()
self.rebuild_selected_packages_list_widget()
def finished_button_clicked(self):
self.clean_packages_list()
if self.all_required_packages_selected():
self.finished()
def clean_packages_list(self):
"""remove duplicates from self.file_paths"""
files_dict = {}
for p in self.file_paths:
filename = os.path.splitext(os.path.basename(p))[0]
files_dict[filename] = p
self.file_paths = list(files_dict.values())
self.rebuild_selected_packages_list_widget()
def all_required_packages_selected(self):
files = [os.path.splitext(os.path.basename(path))[0] for path in self.file_paths]
# search for missing packages
for p in self.required_packages:
if p not in files:
return False
return True
def finished(self):
self.accept() |
the-stack_0_13995 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib import oauth2
from oauthlib.common import generate_token
# Django
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.relations import ManyRelatedField
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import (
SCHEDULEABLE_PROVIDERS,
ANSI_SGR_PATTERN,
ACTIVE_STATES,
CENSOR_VALUE,
)
from awx.main.models import (
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
CredentialType, CustomInventoryScript, Group, Host, Instance,
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
JobTemplate, Label, Notification, NotificationTemplate,
OAuth2AccessToken, OAuth2Application, Organization, Project,
ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,
SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,
UnifiedJobTemplate, V1Credential, WorkflowJob, WorkflowJobNode,
WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
)
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import (
get_roles_on_resource, role_summary_fields_generator
)
from awx.main.fields import ImplicitRoleField, JSONBField
from awx.main.utils import (
get_type_for_model, get_model_for_type,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict,
prefetch_page_capabilities, get_external_account)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse, get_request_version
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
VerbatimField, DeprecatedCredentialField)
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'has_inventory_sources'),
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': {'id', 'name', 'controller_id'},
'insights_credential': DEFAULT_SUMMARY_FIELDS,
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v1/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
# The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.
data = kwargs.get('data', False)
if data:
for field_name, field_instance in self.fields.items():
if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:
if isinstance(data.get(field_name, False), dict):
raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))
@property
def version(self):
"""
The request version component of the URL as an integer i.e., 1 or 2
"""
return get_request_version(self.context.get('request')) or 1
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
'job_template': _('Job Template')
}
choices = []
for t in self.get_types():
name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
if not original_url.startswith('/api/v1'):
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
try:
fkval = getattr(obj, fk, None)
except ObjectDoesNotExist:
continue
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
if self.version < 2 and field == 'credential_type_id': # TODO: remove version check in 3.3
continue
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
user_capabilities = self._obj_capability_dict(obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:
qs = self.parent.instance
if 'capability_map' not in self.context:
if hasattr(self, 'polymorphic_base'):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context['capability_map'] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context['capability_map']:
capabilities_cache = self.context['capability_map'][obj.id]
return get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,
capabilities_cache=capabilities_cache
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = list(map(force_text, v2))
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class UnifiedJobTemplateSerializer(BaseSerializer):
# As a base serializer, the capabilities prefetch is not used directly
_capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'workflowjobtemplate.organization.workflow_admin']}
]
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this '
'unified job have been saved to the database.'),
read_only=True
)
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation',
'execution_node', 'controller_node',
'result_traceback', 'event_processing_finished')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# TODO: restrict models for capabilities prefetch, when it is added
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobListSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
def to_representation(self, obj): # TODO: Remove in 3.3
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer or self.version == 1:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.extend(['password', 'is_system_auditor'])
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
# Cycle the session key, but if the requesting user is the same
# as the modified user then inject a session key derived from
# the updated user to prevent logout. This is the logic used by
# the Django admin's own user_change_password view.
update_session_auth_hash(self.context['request'], obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
return get_external_account(obj)
def create(self, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserActivityStreamSerializer(UserSerializer):
"""Changes to system auditor status are shown as separate entries,
so by excluding it from fields here we avoid duplication, which
would carry some unintended consequences.
"""
class Meta:
model = User
fields = ('*', '-is_system_auditor')
class BaseOAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
ALLOWED_SCOPES = ['read', 'write']
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'refresh_token')
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True}
}
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if not obj.refresh_token:
return None
elif request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return None
def get_related(self, obj):
ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _is_valid_scope(self, value):
if not value or (not isinstance(value, str)):
return False
words = value.split()
for word in words:
if words.count(word) > 1:
return False # do not allow duplicates
if word not in self.ALLOWED_SCOPES:
return False
return True
def validate_scope(self, value):
if not self._is_valid_scope(value):
raise serializers.ValidationError(_(
'Must be a simple space-separated string with allowed scopes {}.'
).format(self.ALLOWED_SCOPES))
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
try:
return super(BaseOAuth2TokenSerializer, self).create(validated_data)
except oauth2.AccessDeniedError as e:
raise PermissionDenied(str(e))
class UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True},
'application': {'allow_null': False, 'required': True}
}
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)
obj.save()
if obj.application and obj.application.authorization_grant_type != 'implicit':
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenSerializer(BaseOAuth2TokenSerializer):
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application and obj.application.authorization_grant_type != 'implicit':
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
read_only_fields = ('user', 'token', 'expires', 'application')
def create(self, validated_data):
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
validated_data['application'] = None
obj = super(UserPersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', '-user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': True, 'required': False},
'organization': {'allow_null': False},
'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},
'client_secret': {
'label': _('Client Secret')
},
'client_type': {
'label': _('Client Type')
},
'redirect_uris': {
'label': _('Redirect URIs')
},
'skip_authorization': {
'label': _('Skip Authorization')
},
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if request.method != 'POST' and obj.client_type == 'confidential':
ret['client_secret'] = CENSOR_VALUE
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_related(self, obj):
res = super(OAuth2ApplicationSerializer, self).get_related(obj)
res.update(dict(
tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
))
return res
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
def validate(self, attrs):
obj = self.instance
view = self.context['view']
obj_limit = getattr(obj, 'max_hosts', None)
api_limit = attrs.get('max_hosts')
if not view.request.user.is_superuser:
if api_limit is not None and api_limit != obj_limit:
# Only allow superusers to edit the max_hosts field
raise serializers.ValidationError(_('Cannot change max_hosts.'))
return super(OrganizationSerializer, self).validate(attrs)
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = _('This path is already being used by another manual project.')
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']
capabilities_prefetch = [
'admin', 'update',
{'copy': 'organization.project_admin'}
]
class Meta:
model = Project
fields = ('*', 'organization', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:project_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type', '-controller_node')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
class Meta:
model = ProjectUpdate
fields = ('*', 'host_status_counts', 'playbook_counts',)
def get_playbook_counts(self, obj):
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except ProjectUpdateEvent.DoesNotExist:
counts = {}
return counts
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = [
'admin', 'adhoc',
{'copy': 'organization.inventory_admin'}
]
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources',
'total_inventory_sources', 'inventory_sources_with_failures',
'insights_credential', 'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == 'exact':
# __exact is allowed
continue
match = '__{}'.format(match)
if re.match(
'ansible_facts[^=]+{}='.format(match),
host_filter
):
raise models.base.ValidationError({
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
})
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
# TODO: Remove entire serializer in 3.3, replace with normal serializer
class InventoryDetailSerializer(InventorySerializer):
def get_fields(self):
fields = super(InventoryDetailSerializer, self).get_fields()
if self.version == 1:
fields['can_run_ad_hoc_commands'] = serializers.SerializerMethodField()
return fields
def get_can_run_ad_hoc_commands(self, obj):
view = self.context.get('view', None)
return bool(obj and view and view.request and view.request.user and view.request.user.can_access(Inventory, 'run_ad_hoc_commands', obj))
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
capabilities_prefetch = ['inventory.admin']
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['insights'] = self.reverse('api:host_insights', kwargs={'pk': obj.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
if self.version > 1:
res.update(dict(
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Group
fields = ('*', 'inventory', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources')
@property
def show_capabilities(self): # TODO: consolidate in 3.3
if self.version == 1:
return ['copy', 'edit', 'start', 'schedule', 'delete']
else:
return ['copy', 'edit', 'delete']
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(GroupSerializer, self).get_summary_fields(obj)
if self.version == 1:
try:
inv_src = obj.deprecated_inventory_source
summary_fields['inventory_source'] = {}
for field in SUMMARIZABLE_FK_FIELDS['inventory_source']:
fval = getattr(inv_src, field, None)
if fval is not None:
summary_fields['inventory_source'][field] = fval
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
return summary_fields
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if self.version == 1: # TODO: remove in 3.3
try:
res['inventory_source'] = self.reverse('api:inventory_source_detail',
kwargs={'pk': obj.deprecated_inventory_source.pk})
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def create(self, validated_data): # TODO: remove in 3.3
instance = super(GroupSerializer, self).create(validated_data)
if self.version == 1: # TODO: remove in 3.3
manual_src = InventorySource(deprecated_group=instance, inventory=instance.inventory)
manual_src.v1_group_name = instance.name
manual_src.save()
return instance
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [
{'edit': 'admin'}
]
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = DeprecatedCredentialField(
help_text=_('Cloud credential to use for inventory updates.')
)
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'custom_virtualenv', 'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
# TODO: remove when old 'credential' fields are removed
def get_summary_fields(self, obj):
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
all_creds = []
if 'credential' in summary_fields:
cred = obj.get_cloud_credential()
if cred:
summarized_cred = {
'id': cred.id, 'name': cred.name, 'description': cred.description,
'kind': cred.kind, 'cloud': True
}
summary_fields['credential'] = summarized_cred
all_creds.append(summarized_cred)
if self.version > 1:
summary_fields['credential']['credential_type_id'] = cred.credential_type_id
else:
summary_fields.pop('credential')
if self.version > 1:
summary_fields['credentials'] = all_creds
return summary_fields
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
capabilities_prefetch = [
{'admin': 'inventory.admin'},
{'start': 'inventory.update'}
]
group = serializers.SerializerMethodField(
help_text=_('Automatic group relationship, will be removed in 3.3'))
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated', 'group') # Backwards compatibility.
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
if self.version == 1: # TODO: remove in 3.3
if obj.deprecated_group:
res['group'] = self.reverse('api:group_detail', kwargs={'pk': obj.deprecated_group.pk})
else:
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
return res
def get_fields(self): # TODO: remove in 3.3
fields = super(InventorySourceSerializer, self).get_fields()
if self.version > 1:
fields.pop('group', None)
return fields
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
if self.version == 1 and obj.deprecated_group_id:
g = obj.deprecated_group
summary_fields['group'] = {}
for field in SUMMARIZABLE_FK_FIELDS['group']:
fval = getattr(g, field, None)
if fval is not None:
summary_fields['group'][field] = fval
return summary_fields
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_source(self, value):
if value == '':
raise serializers.ValidationError(_(
"Manual inventory sources are created automatically when a group is created in the v1 API."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing = obj.credentials.all()
if new_cred not in existing:
for cred in existing:
# Remove all other cloud credentials
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = list(filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
))
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"credential": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
'source_project_update', 'custom_virtualenv', '-controller_node',)
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if self.version > 1:
res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})
return res
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
source_project = serializers.SerializerMethodField(
help_text=_('The project used for this job.'),
method_name='get_source_project_id'
)
class Meta:
model = InventoryUpdate
fields = ('*', 'source_project',)
def get_source_project(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template', None)
def get_source_project_id(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
def get_related(self, obj):
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
source_project_id = self.get_source_project_id(obj)
if source_project_id:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
summary_obj = self.get_source_project(obj)
if summary_obj:
summary_fields['source_project'] = {}
for field in SUMMARIZABLE_FK_FIELDS['project']:
value = getattr(summary_obj, field, None)
if value is not None:
summary_fields['source_project'][field] = value
return summary_fields
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
fields = ('*', '-created', '-modified')
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
value['name'] = _(value['name'])
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = list(filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
))
return fields
# TODO: remove when API v1 is removed
class V1CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'kind', 'cloud', 'host', 'username',
'password', 'security_token', 'project', 'domain',
'ssh_key_data', 'ssh_key_unlock', 'become_method',
'become_username', 'become_password', 'vault_password',
'subscription', 'tenant', 'secret', 'client', 'authorize',
'authorize_password')
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in V1Credential.FIELDS:
return self.build_standard_field(field_name,
V1Credential.FIELDS[field_name])
return super(V1CredentialFields, self).build_field(field_name, info, model_class, nested_depth)
class V2CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'credential_type', 'inputs')
extra_kwargs = {
'credential_type': {
'label': _('Credential Type'),
},
}
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy', 'use']
capabilities_prefetch = ['admin', 'use']
class Meta:
model = Credential
fields = ('*', 'organization')
def get_fields(self):
fields = super(CredentialSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1CredentialFields().get_fields())
else:
fields.update(V2CredentialFields().get_fields())
return fields
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
# TODO: remove when API v1 is removed
if self.version == 1:
if value.get('kind') == 'vault':
value['kind'] = 'ssh'
for field in V1Credential.PASSWORD_FIELDS:
if field in value and force_text(value[field]).startswith('$encrypted$'):
value[field] = '$encrypted$'
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:credential_copy', kwargs={'pk': obj.pk})
res['input_sources'] = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk})
# TODO: remove when API v1 is removed
if self.version > 1:
res.update(dict(
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
# CredentialType is now part of validation; legacy v1 fields (e.g.,
# 'username', 'password') in JSON POST payloads use the
# CredentialType's inputs definition to determine their validity
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if 'credential_type' not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
#
# In this scenario, we should automatically detect the proper
# CredentialType based on the provided values
kind = data.get('kind', 'ssh')
credential_type = CredentialType.from_v1_kind(kind, data)
if credential_type is None:
raise serializers.ValidationError({"kind": _('"%s" is not a valid choice' % kind)})
data['credential_type'] = credential_type.pk
value = OrderedDict(
list({'credential_type': credential_type}.items()) +
list(super(CredentialSerializer, self).to_internal_value(data).items())
)
# Make a set of the keys in the POST/PUT payload
# - Subtract real fields (name, organization, inputs)
# - Subtract virtual v1 fields defined on the determined credential
# type (username, password, etc...)
# - Any leftovers are invalid for the determined credential type
valid_fields = set(super(CredentialSerializer, self).get_fields().keys())
valid_fields.update(V2CredentialFields().get_fields().keys())
valid_fields.update(['kind', 'cloud'])
for field in set(data.keys()) - valid_fields - set(credential_type.defined_fields):
if data.get(field):
raise serializers.ValidationError(
{"detail": _("'{field_name}' is not a valid field for {credential_type_name}").format(
field_name=field, credential_type_name=credential_type.name
)}
)
value.pop('kind', None)
return value
return super(CredentialSerializer, self).to_internal_value(data)
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for rel in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
try:
return super(CredentialSerializerCreate, self).validate(attrs)
except ValidationError as e:
# TODO: remove when API v1 is removed
# If we have an `inputs` error on `/api/v1/`:
# {'inputs': {'username': [...]}}
# ...instead, send back:
# {'username': [...]}
if self.version == 1 and isinstance(e.detail.get('inputs'), dict):
e.detail = e.detail['inputs']
raise e
else:
raise
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
# If our payload contains v1 credential fields, translate to the new
# model
# TODO: remove when API v1 is removed
if self.version == 1:
for attr in (
set(V1Credential.FIELDS) & set(validated_data.keys()) # set intersection
):
validated_data.setdefault('inputs', {})
value = validated_data.pop(attr)
if value:
validated_data['inputs'][attr] = value
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class CredentialInputSourceSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = CredentialInputSource
fields = (
'*',
'input_field_name',
'metadata',
'target_credential',
'source_credential',
'-name',
)
extra_kwargs = {
'input_field_name': {'required': True},
'target_credential': {'required': True},
'source_credential': {'required': True},
}
def get_related(self, obj):
res = super(CredentialInputSourceSerializer, self).get_related(obj)
res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))
res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))
return res
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
# TODO: remove when API v1 is removed
class V1JobOptionsSerializer(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'cloud_credential', 'network_credential')
V1_FIELDS = ('cloud_credential', 'network_credential',)
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.V1_FIELDS:
return (DeprecatedCredentialField, {})
return super(V1JobOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
class LegacyCredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'credential', 'vault_credential')
LEGACY_FIELDS = ('credential', 'vault_credential',)
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.LEGACY_FIELDS:
return (DeprecatedCredentialField, {})
return super(LegacyCredentialFields, self).build_field(field_name, info, model_class, nested_depth)
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_fields(self):
fields = super(JobOptionsSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1JobOptionsSerializer().get_fields())
fields.update(LegacyCredentialFields().get_fields())
return fields
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
try:
if obj.credential:
res['credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.credential}
)
except ObjectDoesNotExist:
setattr(obj, 'credential', None)
try:
if obj.vault_credential:
res['vault_credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.vault_credential}
)
except ObjectDoesNotExist:
setattr(obj, 'vault_credential', None)
if self.version > 1:
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
else:
cloud_cred = obj.cloud_credential
if cloud_cred:
res['cloud_credential'] = self.reverse('api:credential_detail', kwargs={'pk': cloud_cred})
net_cred = obj.network_credential
if net_cred:
res['network_credential'] = self.reverse('api:credential_detail', kwargs={'pk': net_cred})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
ret['credential'] = obj.credential
ret['vault_credential'] = obj.vault_credential
if self.version == 1:
ret['cloud_credential'] = obj.cloud_credential
ret['network_credential'] = obj.network_credential
return ret
def create(self, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).create(validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def update(self, obj, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).update(obj, validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def _update_deprecated_fields(self, fields, obj):
for key, existing in (
('credential', obj.credentials.filter(credential_type__kind='ssh')),
('vault_credential', obj.credentials.filter(credential_type__kind='vault')),
('cloud_credential', obj.cloud_credentials),
('network_credential', obj.network_credentials),
):
if key in fields:
new_cred = fields[key]
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
def validate(self, attrs):
v1_credentials = {}
view = self.context.get('view', None)
for attr, kind, error in (
('cloud_credential', 'cloud', _('You must provide a cloud credential.')),
('network_credential', 'net', _('You must provide a network credential.')),
('credential', 'ssh', _('You must provide an SSH credential.')),
('vault_credential', 'vault', _('You must provide a vault credential.')),
):
if kind in ('cloud', 'net') and self.version > 1:
continue # cloud and net deprecated creds are v1 only
if attr in attrs:
v1_credentials[attr] = None
pk = attrs.pop(attr)
if pk:
cred = v1_credentials[attr] = Credential.objects.get(pk=pk)
if cred.credential_type.kind != kind:
raise serializers.ValidationError({attr: error})
if ((not self.instance or cred.pk != getattr(self.instance, attr)) and
view and view.request and view.request.user not in cred.use_role):
raise PermissionDenied()
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance and self.instance.project or None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
ret.update(v1_credentials)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
# Exclude "joblets", jobs that ran as part of a sliced workflow job
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
# Would like to apply an .only, but does not play well with non_polymorphic
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
optimized_qs = uj_qs.non_polymorphic()
return [{
'id': x.id, 'status': x.status, 'finished': x.finished,
# Make type consistent with API top-level key, for instance workflow_job
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
} for x in optimized_qs[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
# TODO: remove in 3.3
if self.version == 1 and 'vault_credential' in d:
if d['vault_credential'].get('kind','') == 'vault':
d['vault_credential']['kind'] = 'ssh'
return d
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use']}
]
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _(
"Cannot enable provisioning callback without an inventory set."
)})
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
# TODO: remove most of this as v1 is removed
vault_credential = None
credential = None
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
if self.version > 1:
summarized_cred['credential_type_id'] = cred.credential_type_id
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif summarized_cred['kind'] == 'ssh':
credential = summarized_cred
elif summarized_cred['kind'] == 'vault':
vault_credential = summarized_cred
# Selectively apply those fields, depending on view deetails
if (self.is_detail_view or self.version == 1) and credential:
summary_fields['credential'] = credential
else:
# Credential could be an empty dictionary in this case
summary_fields.pop('credential', None)
if (self.is_detail_view or self.version == 1) and vault_credential:
summary_fields['vault_credential'] = vault_credential
else:
# vault credential could be empty dictionary
summary_fields.pop('vault_credential', None)
if self.version > 1:
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = JobTemplate
fields = ('*', 'survey_spec')
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('*', 'job_template', 'passwords_needed_to_start',
'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
if self.version > 1:
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_internal_value(self, data):
# When creating a new job and a job template is specified, populate any
# fields not provided in data from the job template.
if not self.instance and isinstance(data, dict) and data.get('job_template', False):
try:
job_template = JobTemplate.objects.get(pk=data['job_template'])
except JobTemplate.DoesNotExist:
raise serializers.ValidationError({'job_template': _('Invalid job template.')})
data.setdefault('name', job_template.name)
data.setdefault('description', job_template.description)
data.setdefault('job_type', job_template.job_type)
if job_template.inventory:
data.setdefault('inventory', job_template.inventory.pk)
if job_template.project:
data.setdefault('project', job_template.project.pk)
data.setdefault('playbook', job_template.playbook)
if job_template.credential:
data.setdefault('credential', job_template.credential)
data.setdefault('forks', job_template.forks)
data.setdefault('limit', job_template.limit)
data.setdefault('verbosity', job_template.verbosity)
data.setdefault('extra_vars', job_template.extra_vars)
data.setdefault('job_tags', job_template.job_tags)
data.setdefault('force_handlers', job_template.force_handlers)
data.setdefault('skip_tags', job_template.skip_tags)
data.setdefault('start_at_task', job_template.start_at_task)
return super(JobSerializer, self).to_internal_value(data)
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
# TODO: remove most of this as v1 is removed
vault_credential = None
credential = None
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
if self.version > 1:
summarized_cred['credential_type_id'] = cred.credential_type_id
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif summarized_cred['kind'] == 'ssh':
credential = summarized_cred
elif summarized_cred['kind'] == 'vault':
vault_credential = summarized_cred
# Selectively apply those fields, depending on view deetails
if (self.is_detail_view or self.version == 1) and credential:
summary_fields['credential'] = credential
else:
# Credential could be an empty dictionary in this case
summary_fields.pop('credential', None)
if (self.is_detail_view or self.version == 1) and vault_credential:
summary_fields['vault_credential'] = vault_credential
else:
# vault credential could be empty dictionary
summary_fields.pop('vault_credential', None)
if self.version > 1:
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobDetailSerializer(JobSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = Job
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
def get_playbook_counts(self, obj):
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except JobEvent.DoesNotExist:
counts = {}
return counts
class JobCancelSerializer(BaseSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
model = Job
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
credential_passwords = VerbatimField(required=True, write_only=True)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)
def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start
missing = set(pnts) - set(key for key in value if value[key])
if missing:
raise serializers.ValidationError(_(
'Missing passwords needed to start: {}'.format(', '.join(missing))
))
return value
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def get_validation_exclusions(self, *args, **kwargs):
r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)
r.append('credential_passwords')
return r
def validate(self, attrs):
obj = self.instance
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.credential_id:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory_id:
ret['inventory'] = None
if 'credential' in ret and not obj.credential_id:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate(self, attrs):
ret = super(AdHocCommandSerializer, self).validate(attrs)
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(sorted(removed_vars, reverse=True))))
return vars_validate_or_raise(value)
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
class Meta:
model = AdHocCommand
fields = ('*', 'host_status_counts',)
def get_host_status_counts(self, obj):
try:
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except AdHocCommandEvent.DoesNotExist:
counts = {}
return counts
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': 'organization.workflow_admin'}
]
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'survey_spec')
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
'job_template', 'is_sliced_job',
'-execution_node', '-event_processing_finished', '-controller_node',
'inventory',)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node', '-controller_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in list(attrs.items()):
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
return summary_fields
def validate(self, attrs):
db_extra_data = {}
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# build additional field survey_passwords to track redacted variables
password_dict = {}
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
# Replace $encrypted$ submissions with db value if exists
if 'extra_data' in attrs:
if password_dict:
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
# Encrypt the extra_data for save, only current password vars in JT survey
# but first, make a copy or else this is referenced by request.data, and
# user could get encrypted string in form data in API browser
attrs['extra_data'] = extra_data.copy()
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - ignore, if default present
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
# NOTE: validation _of_ the default values of password type
# questions not done here or on launch, but doing so could
# leak info about values, so it should not be added
if not ('default' in element and element['default']):
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict:
for key, value in attrs['extra_data'].copy().items():
if value == REPLACE_STR:
if key in password_dict:
attrs['extra_data'].pop(key)
attrs.get('survey_passwords', {}).pop(key, None)
else:
errors.setdefault('extra_vars', []).append(
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
)
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
ujt_obj = None
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
elif self.instance:
ujt_obj = self.instance.unified_job_template
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
if not ujt_obj.ask_credential_on_launch:
raise serializers.ValidationError({"credential": _(
"Related template is not configured to accept credentials on launch.")})
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
existing = obj.credentials.filter(credential_type__kind='ssh')
new_cred = deprecated_fields['credential']
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
return obj
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'do_not_run',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this through the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
class Meta:
model = SystemJob
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
'ignored', 'rescued')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
if obj.parent_id:
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
if obj.hosts.exists():
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
ret = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class JobEventWebSocketSerializer(JobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = JobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
stdout = serializers.SerializerMethodField()
event_data = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
def get_stdout(self, obj):
return UriCleaner.remove_sensitive(obj.stdout)
def get_event_data(self, obj):
try:
return json.loads(
UriCleaner.remove_sensitive(
json.dumps(obj.event_data)
)
)
except Exception:
logger.exception("Failed to sanitize event_data")
return {}
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = AdHocCommandEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
if self.version > 1:
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):
errors.setdefault('credentials', []).append(_(
'Cannot assign a Credential of kind `{}`'
).format(cred.credential_type.kind))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
defaults = serializers.SerializerMethodField()
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
'inventory', 'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
if errors:
raise serializers.ValidationError(errors)
WFJT_extra_vars = template.extra_vars
WFJT_inventory = template.inventory
super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory
return accepted
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration')
type_map = {"string": (str,),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str,),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field, params in notification_class.init_parameters.items():
if field not in attrs['notification_configuration']:
if 'default' in params:
attrs['notification_configuration'][field] = params['default']
else:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = params['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject')
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
timezone = serializers.SerializerMethodField()
until = serializers.SerializerMethodField()
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',
'until')
def get_timezone(self, obj):
return obj.timezone
def get_until(self, obj):
return obj.until
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def get_summary_fields(self, obj):
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
if 'inventory' in summary_fields:
return summary_fields
inventory = None
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
inventory = obj.unified_job_template.inventory
else:
return summary_fields
summary_fields['inventory'] = dict()
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
summary_fields['inventory'][field] = getattr(inventory, field, None)
return summary_fields
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance'),
read_only=True
)
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance group'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance group'),
read_only=True
)
instances = serializers.SerializerMethodField()
is_controller = serializers.BooleanField(
help_text=_('Indicates whether instance group controls any other group'),
read_only=True
)
is_isolated = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are isolated.'
'Isolated groups have a designated controller group.'),
read_only=True
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
policy_instance_percentage = serializers.IntegerField(
default=0, min_value=0, max_value=100, required=False, initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to "
"this group when new instances come online.")
)
policy_instance_minimum = serializers.IntegerField(
default=0, min_value=0, required=False, initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to "
"this group when new instances come online.")
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(), required=False,
label=_('Policy Instance List'),
help_text=_("List of exact-match Instances that will be assigned to this group")
)
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "jobs_total",
"instances", "controller", "is_controller", "is_isolated",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def validate_policy_instance_list(self, value):
for instance_name in value:
if value.count(instance_name) > 1:
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
if not Instance.objects.filter(hostname=instance_name).exists():
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
if Instance.objects.get(hostname=instance_name).is_isolated():
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
if self.instance and self.instance.controller_id is not None:
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
return value
def validate_name(self, value):
if self.instance and self.instance.name == 'tower' and value != 'tower':
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
return value
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=jobs_qs, breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
consumed = self.get_consumed_capacity(obj)
if consumed >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)
)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField()
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = list(summary_dict.items())
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
('o_auth2_application', ('id', 'name', 'description')),
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
('ad_hoc_command', ('id', 'name', 'status', 'limit'))
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified',
'timestamp', 'operation', 'changes', 'object1', 'object2', 'object_association')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in list(ret.items()):
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
# roles: these values look like
# "awx.main.models.inventory.Inventory.admin_role"
# due to historical reasons the UI expects just "role" here
return "role"
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so instead of splitting on period we have to take after the first underscore
try:
return obj.object_relationship_type.split(".")[-1].split("_", 1)[1]
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_related(self, obj):
rel = {}
if obj.actor is not None:
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
rel[fk] = []
id_list = []
for thisItem in m2m_list:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if hasattr(thisItem, 'get_absolute_url'):
rel_url = thisItem.get_absolute_url(self.context.get('request'))
else:
view_name = fk + '_detail'
rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})
rel[fk].append(rel_url)
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return rel
def _get_rel(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
if fk == 'job':
summary_fields['job_template'] = []
job_template_item = {}
job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']
job_template = getattr(thisItem, 'job_template', None)
if job_template is not None:
for field in job_template_fields:
fval = getattr(job_template, field, None)
if fval is not None:
job_template_item[field] = fval
summary_fields['job_template'].append(job_template_item)
if fk == 'workflow_job_template_node':
summary_fields['workflow_job_template'] = []
workflow_job_template_item = {}
workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']
workflow_job_template = getattr(thisItem, 'workflow_job_template', None)
if workflow_job_template is not None:
for field in workflow_job_template_fields:
fval = getattr(workflow_job_template, field, None)
if fval is not None:
workflow_job_template_item[field] = fval
summary_fields['workflow_job_template'].append(workflow_job_template_item)
if fk == 'schedule':
unified_job_template = getattr(thisItem, 'unified_job_template', None)
if unified_job_template is not None:
summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,
'name': unified_job_template.name}
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
elif obj.deleted_actor:
summary_fields['actor'] = obj.deleted_actor.copy()
summary_fields['actor']['id'] = None
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields
|
the-stack_0_13999 | import librosa
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct
from scipy.signal import spectrogram
import operator
import pickle
import time
import csv
from random import shuffle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn import metrics
import logging
################################################# LOG FILE CONFIGURATION ##################################
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='log_file_combined_dict_norm_all_examples.log',
filemode='w')
################################################# MEL PARAMETERS DEFINITION ##################################
class parameters:
def __init__(self):
# specify file path when executing
self.win_size = 1024
self.hop_size = 512
self.min_freq = 80
self.max_freq = 4000
self.num_mel_filts = 40
self.n_dct = 13
param = parameters()
logging.info(param.__dict__)
##################################### FUNCTIONS FOR MEL SPECTRUM CALCULATION ##################################
# converts frequency in Hz to Mel values
# pass a numpy array to the function
def hz2mel(hzval):
melval = 1127.01028*np.log(1+hzval/700)
return melval
# funtion tested with example
# converts Mel values to Hz
# pass a numpy array to the function
def mel2hz(melval):
hzval = 700*(np.exp(melval/1127.01028)-1)
return hzval
# function tested with example
# f_fft will be the input
# rounding to the values in freq_list
def find_nearest(values,freq_list):
q_freq_ind=[]
for value in values.tolist():
ind = np.argmin(np.abs(value-freq_list))
q_freq_ind.append(ind)
return np.asarray(q_freq_ind)
def compute_mfcc(filepath,win_size,hop_size,min_freq,max_freq,num_mel_filts,n_dct):
melval = hz2mel(np.array([min_freq,max_freq]))
min_mel = melval[0]
max_mel = melval[1]
step = (max_mel-min_mel)/(num_mel_filts-1)
mel_freq_list = np.linspace(min_mel,max_mel,num_mel_filts)
mel_freq_list = np.concatenate(([mel_freq_list[0]-step],mel_freq_list,[mel_freq_list[-1]+step]))
hz_freq_list = mel2hz(mel_freq_list)
nfft = win_size # number of ft points for the spectrogram
# make sure librosa is imported
x,Fs = librosa.load(filepath,sr=16000)
f,t,Sxx = spectrogram(x,Fs,nperseg=win_size,noverlap=win_size-hop_size,nfft=nfft)
Sxx = np.square(np.abs(Sxx))
# the spectrogram has to be plotted flipped up-dpwn to make the lower freq show at the bottom
fft_freq_indices = find_nearest(hz_freq_list,f)# approximate the fft freq list to the nearest value in the hz freq list got by converting mel scale
# print(fft_freq_indices,'len=',fft_freq_indices.shape)
filt_bank = np.zeros((1,int(nfft/2) + 1))
for i in range(1,fft_freq_indices.shape[0]-1):# from sec ele to sec last ele
a = fft_freq_indices[i-1]
b = fft_freq_indices[i]
c = fft_freq_indices[i+1]
t1 = (1/(b-a))*np.linspace(a-a,b-a,b-a+1)
t2 = (-1/(c-b))*np.linspace(b-c,c-c,c-b+1)
filt = np.concatenate((t1,t2[1:]))
filt = filt/(np.sum(filt))
filt_zero_pad = np.zeros((1,int(nfft/2)+1))
filt_zero_pad[0,a:c+1] = filt
filt_bank = np.concatenate((filt_bank,filt_zero_pad),axis=0)
filt_bank = filt_bank[1:,:]
mel_spec = np.dot(filt_bank,Sxx)
mel_spec = np.where(mel_spec == 0, np.finfo(float).eps, mel_spec) # for numerical stability
mel_spec = 20*np.log10(mel_spec)
fs_mfcc = mel_spec.shape[1]
return mel_spec,fs_mfcc # returning the mel_spectrum
############################# CREATING COMBINED DICT BY JOINING ANNOTATIONS+CLIP_INFO ##########################
# /Users/nitin/Documents/Music Info Retrieval/project/database/magnatagatune/data_from_trey
f = open('/scratch/nn1174/MIR/data_from_trey/annotations_final.txt', 'r')
reader = csv.reader(f, delimiter='\t')
tags = next(reader)
annotation_dict = {}
while True:
try:
values = next(reader)
annotation_dict[values[0]] = {}# data is a dict. values[0] is the clip id, which is the key->pointing to a dict of all tags
for tagnames, value in zip(tags[1:], values[1:]):
annotation_dict[values[0]][tagnames] = value
except StopIteration:
logging.info('end tag annotations file')
break
ff = open('/scratch/nn1174/MIR/data_from_trey/clip_info_final.txt', 'r')
rreader = csv.reader(ff, delimiter='\t')
metadata = next(rreader)
clip_inf_dict = {}
while True:
try:
values = next(rreader)
# values1 = next(reader, None)
clip_inf_dict[values[0]] = {}
for metdat, val in zip(metadata[1:], values[1:]):
clip_inf_dict[values[0]][metdat] = val
except StopIteration:
logging.info('end clip info file')
break
combined_dict = {}
for key in annotation_dict.keys(): # you can list as many input dicts as you want here
combined_dict[key] = annotation_dict[key].copy()
combined_dict[key].update(clip_inf_dict[key])
# IMPORTANT DECLARATION DEFINING VARIABLE "KEYS"
keys = list(combined_dict.keys())
logging.info('done combining the dictionaries')
logging.info(len(combined_dict.keys()))
logging.info(len(combined_dict['2'].keys()))
################################ LOADING ALL PICKLE FILES NEEDED FOR INDEXING ##################################
with open('train_ind.pickle','rb') as handle:
train_ind = pickle.load(handle)
with open('val_ind.pickle','rb') as handle:
val_ind = pickle.load(handle)
with open('test_ind.pickle','rb') as handle:
test_ind = pickle.load(handle)
### loading sorted tags
with open('sorted_tags.pickle', 'rb') as handle:
sorted_stats = pickle.load(handle)
################################## CALCULATING THE NORMALIZATION COEFFICIENTS ##################################
start_time = time.time()
spec_mat_train = np.zeros((len(train_ind),40,909))
datapath = '/scratch/nn1174/MIR/mp3_all'
logging.info('starting to create spec_mat_trin to generate the normalizing COEFFICIENTS')
for i,ind in enumerate(train_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
spec_mat_train[i,:,:] = np.zeros((40,909))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec_mat_train[i,:,:] = spec
if i%20==0:
logging.info(i)
###### normalizing parameters
mn = np.mean(spec_mat_train,axis=0)
stdev = np.std(spec_mat_train,axis=0)
norm_coeff = [mn,stdev]
with open('norm_coeff.pickle','wb') as handle:
pickle.dump(norm_coeff,handle)
######
logging.info('got the mean and std')
########################## ADDING MEL SPECTRUM AND OUTPUT FIELDS IN DICTIONARY ##################################
logging.info('appending spectrum+output to validation set')
for i,ind in enumerate(val_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = np.zeros((40,909))
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,fs_spec)
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20==0:
logging.info(i)
logging.info('appending spectrum+output to test set')
for i,ind in enumerate(test_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = np.zeros((40,909))
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,fs_spec)
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20 == 0:
logging.info(i)
logging.info('appending spectrum+output to train set')
for i,ind in enumerate(train_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = spec_mat_train[i,:,:]
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
spec = spec_mat_train[i,:,:] # using already calculated spectrograms
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,909)# hard coded , but never used, so doesnt matter
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20 == 0:
logging.info(i)
logging.info('Done with creating the spec_matrices')
logging.info('done with generating the whole combined_dict')
with open('combined_dict_norm_all_examples.pickle', 'wb') as handle:
pickle.dump(combined_dict, handle)
logging.info('Done with Everything')
|
the-stack_0_14000 | # coding=utf-8
# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LXMERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from ...activations import ACT2FN, gelu
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_lxmert import LxmertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
_CONFIG_FOR_DOC = "LxmertConfig"
_TOKENIZER_FOR_DOC = "LxmertTokenizer"
LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"unc-nlp/lxmert-base-uncased",
]
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
@dataclass
class LxmertModelOutput(ModelOutput):
"""
Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
encoder")
Args:
language_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the language encoder.
vision_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the visual encoder.
pooled_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
by a Linear layer and a Tanh activation function. The Linear
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
language_output: Optional[torch.FloatTensor] = None
vision_output: Optional[torch.FloatTensor] = None
pooled_output: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForQuestionAnsweringOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForQuestionAnswering`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.k.
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`, `optional`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cross_relationship_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the textual matching objective (classification) head (scores of True/False
continuation before SoftMax).
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: [torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
cross_relationship_score: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class LxmertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertSelfAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, attention_mask, output_attentions=False):
# Self attention attends to itself, thus keys and queries are the same (input_tensor).
output = self.self(
input_tensor,
input_tensor,
attention_mask,
output_attentions=output_attentions,
)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class LxmertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = LxmertSelfAttentionLayer(config)
self.intermediate = LxmertIntermediate(config)
self.output = LxmertOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs[1:] # add attentions if we output them
return outputs
class LxmertXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = LxmertCrossAttentionLayer(config)
# Self-attention Layers
self.lang_self_att = LxmertSelfAttentionLayer(config)
self.visn_self_att = LxmertSelfAttentionLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = LxmertIntermediate(config)
self.lang_output = LxmertOutput(config)
self.visn_inter = LxmertIntermediate(config)
self.visn_output = LxmertOutput(config)
def cross_att(
self,
lang_input,
lang_attention_mask,
visual_input,
visual_attention_mask,
output_x_attentions=False,
):
# Cross Attention
lang_att_output = self.visual_attention(
lang_input,
visual_input,
ctx_att_mask=visual_attention_mask,
output_attentions=output_x_attentions,
)
visual_att_output = self.visual_attention(
visual_input,
lang_input,
ctx_att_mask=lang_attention_mask,
output_attentions=False,
)
return lang_att_output, visual_att_output
def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
return lang_att_output[0], visual_att_output[0]
def output_fc(self, lang_input, visual_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visual_inter_output = self.visn_inter(visual_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visual_output = self.visn_output(visual_inter_output, visual_input)
return lang_output, visual_output
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=False,
):
lang_att_output, visual_att_output = self.cross_att(
lang_input=lang_feats,
lang_attention_mask=lang_attention_mask,
visual_input=visual_feats,
visual_attention_mask=visual_attention_mask,
output_x_attentions=output_attentions,
)
attention_probs = lang_att_output[1:]
lang_att_output, visual_att_output = self.self_att(
lang_att_output[0],
lang_attention_mask,
visual_att_output[0],
visual_attention_mask,
)
lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
return (
(
lang_output,
visual_output,
attention_probs[0],
)
if output_attentions
else (lang_output, visual_output)
)
class LxmertVisualFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = config.visual_feat_dim
pos_dim = config.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visual_feats, visual_pos):
x = self.visn_fc(visual_feats)
x = self.visn_layer_norm(x)
y = self.box_fc(visual_pos)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
class LxmertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = LxmertVisualFeatureEncoder(config)
self.config = config
# Number of layers
self.num_l_layers = config.l_layers
self.num_x_layers = config.x_layers
self.num_r_layers = config.r_layers
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_pos,
visual_attention_mask=None,
output_attentions=None,
):
vision_hidden_states = ()
language_hidden_states = ()
vision_attentions = () if output_attentions or self.config.output_attentions else None
language_attentions = () if output_attentions or self.config.output_attentions else None
cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
visual_feats = self.visn_fc(visual_feats, visual_pos)
# Run language layers
for layer_module in self.layer:
l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
lang_feats = l_outputs[0]
language_hidden_states = language_hidden_states + (lang_feats,)
if language_attentions is not None:
language_attentions = language_attentions + (l_outputs[1],)
# Run relational layers
for layer_module in self.r_layers:
v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
visual_feats = v_outputs[0]
vision_hidden_states = vision_hidden_states + (visual_feats,)
if vision_attentions is not None:
vision_attentions = vision_attentions + (v_outputs[1],)
# Run cross-modality layers
for layer_module in self.x_layers:
x_outputs = layer_module(
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=output_attentions,
)
lang_feats, visual_feats = x_outputs[:2]
vision_hidden_states = vision_hidden_states + (visual_feats,)
language_hidden_states = language_hidden_states + (lang_feats,)
if cross_encoder_attentions is not None:
cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
visual_encoder_outputs = (
vision_hidden_states,
vision_attentions if output_attentions else None,
)
lang_encoder_outputs = (
language_hidden_states,
language_attentions if output_attentions else None,
)
return (
visual_encoder_outputs,
lang_encoder_outputs,
cross_encoder_attentions if output_attentions else None,
)
class LxmertPooler(nn.Module):
def __init__(self, config):
super(LxmertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class LxmertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(LxmertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class LxmertLMPredictionHead(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertLMPredictionHead, self).__init__()
self.transform = LxmertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
lxmert_model_embedding_weights.size(1),
lxmert_model_embedding_weights.size(0),
bias=False,
)
self.decoder.weight = lxmert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LxmertVisualAnswerHead(nn.Module):
def __init__(self, config, num_labels):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_labels),
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class LxmertVisualObjHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
if config.visual_attr_loss:
visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
}
self.visual_losses = visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class LxmertPreTrainingHeads(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertPreTrainingHeads, self).__init__()
self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class LxmertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LxmertConfig
load_tf_weights = load_tf_weights_in_lxmert
base_model_prefix = "lxmert"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
LXMERT_START_DOCSTRING = r"""
The LXMERT model was proposed in `LXMERT: Learning Cross-Modality Encoder Representations from Transformers
<https://arxiv.org/abs/1908.07490>`__ by Hao Tan and Mohit Bansal. It's a vision and language transformer model,
pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome,
using a combination of masked language modeling, region of interest feature regression, cross entropy loss for
question answering attribute prediction, and object tag prediction.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.LxmertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LXMERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LxmertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
visual_feats: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_pos_dim)՝):
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
These are currently not provided by the transformers library.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
visual_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
LXMERT_START_DOCSTRING,
)
class LxmertModel(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = LxmertEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.pooler = LxmertPooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LxmertModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if visual_feats is None:
raise ValueError("`visual_feats` cannot be `None`")
if visual_pos is None:
raise ValueError("`visual_pos` cannot be `None`")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Process the visual attention mask
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0
else:
extended_visual_attention_mask = None
# Positional Word Embeddings
embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
# Run Lxmert encoder
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
visual_feats=visual_feats,
visual_pos=visual_pos,
visual_attention_mask=extended_visual_attention_mask,
output_attentions=output_attentions,
)
visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
vision_hidden_states = visual_encoder_outputs[0]
language_hidden_states = lang_encoder_outputs[0]
all_attentions = ()
if output_attentions:
language_attentions = lang_encoder_outputs[1]
vision_attentions = visual_encoder_outputs[1]
cross_encoder_attentions = encoder_outputs[2]
all_attentions = (
language_attentions,
vision_attentions,
cross_encoder_attentions,
)
hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
visual_output = vision_hidden_states[-1]
lang_output = language_hidden_states[-1]
pooled_output = self.pooler(lang_output)
if not return_dict:
return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
return LxmertModelOutput(
pooled_output=pooled_output,
language_output=lang_output,
vision_output=visual_output,
language_hidden_states=language_hidden_states if output_hidden_states else None,
vision_hidden_states=vision_hidden_states if output_hidden_states else None,
language_attentions=language_attentions if output_attentions else None,
vision_attentions=vision_attentions if output_attentions else None,
cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
)
@add_start_docstrings(
"""Lxmert Model with a specified pretraining head on top. """,
LXMERT_START_DOCSTRING,
)
class LxmertForPreTraining(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Use of pretraining tasks
self.task_mask_lm = config.task_mask_lm
self.task_obj_predict = config.task_obj_predict
self.task_matched = config.task_matched
self.task_qa = config.task_qa
# Lxmert backbone
self.lxmert = LxmertModel(config)
# Pre-training heads
self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
if self.task_obj_predict:
self.obj_predict_head = LxmertVisualObjHead(config)
if self.task_qa:
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
# Initialize weights and apply final processing
self.post_init()
# Loss functions
self.loss_fcts = {
"l2": SmoothL1Loss(reduction="none"),
"visual_ce": CrossEntropyLoss(reduction="none"),
"ce": CrossEntropyLoss(),
}
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {
"shape": (-1,),
"num": config.num_object_labels,
"loss": "visual_ce",
}
if config.visual_attr_loss:
visual_losses["attr"] = {
"shape": (-1,),
"num": config.num_attr_labels,
"loss": "visual_ce",
}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
"loss": "l2",
}
self.visual_losses = visual_losses
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
will add newly initialized weights. Reducing the size will remove weights from the end
Args:
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
weights at the end. Reducing the size will remove weights from the end. If not provided or :obj:`None`,
just returns a pointer to the qa labels :obj:`torch.nn.Linear`` module of the model without doing
anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits.
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states or :obj:`None` if
LXMERT does not have a visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
obj_labels=None,
matched_label=None,
ans=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
obj_labels: (``Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]``, `optional`):
each key is named after each one of the visual losses and each element of the tuple is of the shape
``(batch_size, num_features)`` and ``(batch_size, num_features, visual_feature_dim)`` for each the label id
and the label score respectively
matched_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the whether or not the text input matches the image (classification) loss. Input
should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
a one hot representation hof the correct answer `optional`
Returns:
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
device = input_ids.device if input_ids is not None else inputs_embeds.device
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
lang_output, visual_output, pooled_output = (
lxmert_output[0],
lxmert_output[1],
lxmert_output[2],
)
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
answer_score = pooled_output[0][0]
total_loss = (
None
if (labels is None and matched_label is None and obj_labels is None and ans is None)
else torch.tensor(0.0, device=device)
)
if labels is not None and self.task_mask_lm:
masked_lm_loss = self.loss_fcts["ce"](
lang_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
total_loss += masked_lm_loss
if matched_label is not None and self.task_matched:
matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
total_loss += matched_loss
if obj_labels is not None and self.task_obj_predict:
total_visual_loss = torch.tensor(0.0, device=input_ids.device)
visual_prediction_scores_dict = self.obj_predict_head(visual_output)
for key, key_info in self.visual_losses.items():
label, mask_conf = obj_labels[key]
output_dim = key_info["num"]
loss_fct_name = key_info["loss"]
label_shape = key_info["shape"]
weight = self.visual_loss_normalizer
visual_loss_fct = self.loss_fcts[loss_fct_name]
visual_prediction_scores = visual_prediction_scores_dict[key]
visual_loss = visual_loss_fct(
visual_prediction_scores.view(-1, output_dim),
label.view(*label_shape),
)
if visual_loss.dim() > 1: # Regression Losses
visual_loss = visual_loss.mean(1)
visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
total_visual_loss += visual_loss
total_loss += total_visual_loss
if ans is not None and self.task_qa:
answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
total_loss += answer_loss
if not return_dict:
output = (
lang_prediction_scores,
cross_relationship_score,
answer_score,
) + lxmert_output[3:]
return ((total_loss,) + output) if total_loss is not None else output
return LxmertForPreTrainingOutput(
loss=total_loss,
prediction_logits=lang_prediction_scores,
cross_relationship_score=cross_relationship_score,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
@add_start_docstrings(
"""Lxmert Model with a visual-answering head on top for downstream QA tasks""",
LXMERT_START_DOCSTRING,
)
class LxmertForQuestionAnswering(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Lxmert backbone
self.lxmert = LxmertModel(config)
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
# Initialize weights and apply final processing
self.post_init()
# Loss function
self.loss = CrossEntropyLoss()
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
will add newly initialized weights. Reducing the size will remove weights from the end
Args:
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
weights at the end. Reducing the size will remove weights from the end. If not provided or :obj:`None`,
just returns a pointer to the qa labels :obj:`torch.nn.Linear`` module of the model without doing
anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states. :obj:`None`: A
NoneType object if Lxmert does not have the visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LxmertForQuestionAnsweringOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
A one-hot representation of the correct answer
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
pooled_output = lxmert_output[2]
answer_score = self.answer_head(pooled_output)
loss = None
if labels is not None:
loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
if not return_dict:
output = (answer_score,) + lxmert_output[3:]
return (loss,) + output if loss is not None else output
return LxmertForQuestionAnsweringOutput(
loss=loss,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
|
the-stack_0_14002 | # MenuTitle: Make Kerning Display
# -*- coding: utf-8 -*-
__doc__ = """
Open tab containing Kerning strings for the selected glyphs.
"""
import re
from collections import defaultdict, OrderedDict
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from vanilla import (
Window,
TextBox,
RadioGroup,
Button,
CheckBox,
)
import _kerningStrings
Glyphs.clearLog()
# Glyphs.showMacroWindow()
quotations = [
('/parenleft', '/parenright'),
('/bracketleft', '/bracketright'),
('/braceleft', '/braceright'),
('/quoteleft', '/quoteright'),
('/quotedblleft', '/quotedblright'),
('/quotesinglbase', '/quoteleft'),
('/quotedblbase', '/quotedblleft'),
('/quotedblbase', '/quotedblright'),
('/quoteright', '/quoteright'),
('/guillemetleft', '/guillemetright'),
('/guilsinglleft', '/guilsinglright'),
('/guillemetright', '/guillemetleft'),
('/guilsinglright', '/guilsinglleft')
]
# punctuations = ['period', 'comma', 'colon', 'semicolon', 'hyphen']
punctuations = {}
punctuations['dflt'] = '. , : ; -'.split(' ')
punctuations['greek'] = ["/comma", "/period", "/anoteleia", "/questiongreek"]
punctuations['armenian'] = ["/comma-arm", '/period-arm', '/hyphen-arm', "/emphasis-arm", "/exclam-arm", "/question-arm", "/abbreviation-arm", ]
class makeDisplay(object):
def __init__(self):
self.verboten = {
'right': ['napostrophe', 'Omegadasiavaria'],
'left': ['ldot', 'Ldot', 'ldot.sc', 'sigmafinal'],
'both': ['*.tf', '*.tosf', '.notdef', 'NULL', 'CR']
}
self.category = None
self.messages = []
self.interpolated_fonts = dict()
self.use_real = True
self.use_selection = False
self.ignore_red = False
self.current_glyph = None
self.leftside_kerning_groups = None
self.rightside_kerning_groups = None
self.all_kern_categories = self.get_all_kern_categories()
self.categories_leftside = self.get_categorised_glyphs('left')
self.categories_rightside = self.get_categorised_glyphs('right')
item_height = 24.0
w_width = 300.0
w_height = item_height * (7 + len(self.all_kern_categories))
margin = 10
next_y = margin
col_1_width = w_width - (margin * 2)
item_height = 24
radio_height = item_height * len(self.all_kern_categories)
self.w = Window((w_width, w_height), "Make Kerning Strings")
self.w.text_1 = TextBox((margin, next_y, w_width, item_height), "Kern with:", sizeStyle='regular')
next_y += item_height
self.w.radioCategories = RadioGroup((margin, next_y, col_1_width, radio_height), self.all_kern_categories, sizeStyle='regular')
self.w.radioCategories.set(0)
next_y += radio_height + margin
self.w.use_real = CheckBox((margin, next_y, col_1_width, item_height), "Use real words", value=True, sizeStyle='regular')
next_y += item_height
self.w.use_selected = CheckBox((margin, next_y, col_1_width, item_height), "Use the selected glyphs verbatum", value=False, sizeStyle='regular')
next_y += item_height
self.w.ignore_red = CheckBox((margin, next_y, col_1_width, item_height), "Ignore red marked glyphs", value=False, sizeStyle='regular')
next_y += item_height + margin
self.w.gobutton = Button((margin + (col_1_width / 4), next_y, col_1_width / 2, item_height), 'Make Strings', callback=self.makeitso)
self.w.setDefaultButton(self.w.gobutton)
self.w.center()
self.w.open()
# self.makeitso(None)
def sbuttonCallback(self, sender):
self.s.close()
@staticmethod
def has_smallcaps():
for g in Glyphs.font.glyphs:
if g.subCategory == 'Smallcaps':
return True
return False
def get_all_kern_categories(self):
kcats = [
'Uppercase',
'Lowercase',
]
if self.has_smallcaps:
kcats.append('Smallcaps')
kcats += [
'Quotes',
'Number',
'Punctuation',
'Other',
]
return kcats
def get_canonincal_kerning_glyph(self, layer, pair_side):
g = layer.parent
if self.use_selection:
return g
if pair_side == 'left':
g = Glyphs.font.glyphs[layer.parent.rightKerningGroup] or layer.parent
if pair_side == 'right':
g = Glyphs.font.glyphs[layer.parent.leftKerningGroup] or layer.parent
if g is None:
g = layer.parent
return g
@staticmethod
def make_list_unique(this_list):
unique_list = []
for x in this_list:
if x in unique_list or x is None:
continue
unique_list.append(x)
return unique_list
def get_categorised_glyphs(self, side):
# cats = defaultdict(lambda: defaultdict(list))
cats = dict((k, defaultdict(list)) for k in self.all_kern_categories)
for g in [x for x in Glyphs.font.glyphs if self.is_elligable(x)]:
l = cats.get(g.category, cats.get(g.subCategory, cats['Other']))
l[g.script].append(self.get_canonincal_kerning_glyph(g.layers[0], side))
for cat in cats.keys():
for script in cats[cat].keys():
cats[cat][script] = self.make_list_unique(cats[cat][script])
return cats
def get_string(self, left_g, right_g):
string = None
if self.category == 'Quotes':
cat = left_g.subCategory if left_g.subCategory != 'Other' else left_g.category
pattern = _kerningStrings.patterns.get(left_g.script, _kerningStrings.patterns.get('latin')).get(cat + '-Quotes', '')
strings = [pattern.format(right=right_g.name, left=left_g.name, qL=quote_pair[0], qR=quote_pair[1]).replace(' /', '/') for quote_pair in _kerningStrings.quotations]
string = ' '.join(strings)
if not string and self.use_real:
base_name_left, _, suffix_left = left_g.name.partition('.')
base_name_right, _, suffix_right = right_g.name.partition('.')
potentials = [
base_name_left + base_name_right,
base_name_left + '/' + base_name_right,
'/' + base_name_left + ' ' + base_name_right,
'/' + base_name_left + '/' + base_name_right,
]
for s in potentials:
string = _kerningStrings.strings.get(s)
if string:
break
print(s)
if not string:
pattern = self.get_pattern(left_g, right_g)
string = pattern.format(right=right_g.name, left=left_g.name).replace(' /', '/')
if not string:
string = '/' + left_g.name + '/' + right_g.name
return string
def get_category_for_glyph(self, glyph):
if glyph.category in self.all_kern_categories:
return glyph.category
if glyph.subCategory in self.all_kern_categories:
return glyph.subCategory
if glyph.subCategory == 'Currancy':
return 'Number'
return 'Other'
def get_pattern(self, main_glyph, other_glyph):
scripts_patterns = _kerningStrings.patterns.get(main_glyph.script, {})
# print(self.get_category_for_glyph(main_glyph))
# print(self.get_category_for_glyph(main_glyph) + '-' + self.get_category_for_glyph(other_glyph), self.all_kern_categories)
pattern = scripts_patterns.get(self.get_category_for_glyph(main_glyph) + '-' + self.get_category_for_glyph(other_glyph), '')
if self.category == 'Number':
suffix = ''.join(main_glyph.name.partition('.')[1:])
else:
suffix = ''
try:
pattern = pattern.format(
suffix=suffix,
left='{left}',
right='{right}',
)
except KeyError:
pass
return pattern
def is_elligable(self, glyph, side='both'):
if self.ignore_red and glyph.color == 0:
return False
if not glyph.export:
return False
for vgn in self.verboten[side]:
if re.match(vgn.replace('.', '\\.').replace('*', '.*'), glyph.name):
return False
return True
def makeitso(self, sender):
try:
self.w.close()
except AttributeError:
pass
self.category = self.all_kern_categories[self.w.radioCategories.get()]
self.use_real = self.w.use_real.get()
self.use_selection = self.w.use_selected.get()
self.ignore_red = self.w.ignore_red.get()
all_strings = []
if self.category == 'Quotes':
left_of_string_glyphs = self.make_list_unique([self.get_canonincal_kerning_glyph(sl, 'right') for sl in Glyphs.font.selectedLayers if self.is_elligable(sl.parent, 'right')])
right_of_string_glyphs = self.make_list_unique([self.get_canonincal_kerning_glyph(sl, 'left') for sl in Glyphs.font.selectedLayers if self.is_elligable(sl.parent, 'left')])
pairs = zip_longest(left_of_string_glyphs, right_of_string_glyphs)
for p in pairs:
gl, gr = p
if gl is None:
gl = gr if gr in left_of_string_glyphs else left_of_string_glyphs[0]
if gr is None:
gr = gl if gl in left_of_string_glyphs else right_of_string_glyphs[0]
kerning_string = self.get_string(gl, gr)
if kerning_string not in all_strings:
all_strings.append(kerning_string)
else:
# Holds kerning key glyphs that have been seen already, to avoid duplicates
processed_main_glyphs_left = OrderedDict()
processed_main_glyphs_right = OrderedDict()
# print([(k, self.categories_rightside[k].keys()) for k in self.categories_rightside.keys()])
for sl in Glyphs.font.selectedLayers:
# Process the selected glyph on the left side
main_g_left = self.get_canonincal_kerning_glyph(sl, 'left')
pair_strings_left = []
if self.is_elligable(main_g_left, 'left'):
if main_g_left.name not in processed_main_glyphs_left.keys():
processed_main_glyphs_left[main_g_left.name] = [sl.parent.name]
try:
if sl.parent.script:
other_glyphs_rightside = self.categories_rightside[self.category].get(sl.parent.script, self.categories_rightside[self.category].get(None))
else:
other_glyphs_rightside = self.categories_rightside[self.category].get(None, self.categories_rightside[self.category].get('latin'))
except KeyError:
other_glyphs_rightside = []
# print(self.category, self.categories_rightside.keys())
print(sl.parent.script, self.category, self.categories_rightside[self.category].keys())
for g in other_glyphs_rightside:
if not self.is_elligable(g, 'right'):
continue
other_g = self.get_canonincal_kerning_glyph(g.layers[sl.associatedMasterId], 'right')
kerning_string_left = self.get_string(main_g_left, other_g)
if kerning_string_left not in pair_strings_left:
pair_strings_left.append(kerning_string_left)
else:
processed_main_glyphs_left[main_g_left.name].append(sl.parent.name)
if pair_strings_left:
pair_strings_left.insert(0, main_g_left.name)
# Process the selected glyph on the right side
main_g_right = self.get_canonincal_kerning_glyph(sl, 'right')
pair_strings_right = []
if self.is_elligable(main_g_right, 'right'):
if main_g_right.name not in processed_main_glyphs_right.keys():
processed_main_glyphs_right[main_g_right.name] = [sl.parent.name]
if self.category == 'Quotes':
other_glyphs_leftside = [main_g_right]
main_g_right = self.get_canonincal_kerning_glyph(sl, 'left')
else:
if sl.parent.script:
other_glyphs_leftside = self.categories_leftside[self.category].get(sl.parent.script, self.categories_leftside[self.category].get(None, []))
else:
other_glyphs_leftside = self.categories_leftside[self.category].get(None, self.categories_leftside[self.category].get('latin', []))
for g in other_glyphs_leftside:
if not self.is_elligable(g, 'left'):
continue
other_g = self.get_canonincal_kerning_glyph(g.layers[sl.associatedMasterId], 'left')
kerning_string_right = self.get_string(other_g, main_g_right)
if kerning_string_right not in pair_strings_right:
pair_strings_right.append(kerning_string_right)
else:
processed_main_glyphs_right[main_g_right.name].append(sl.parent.name)
if pair_strings_right:
pair_strings_right.insert(0, main_g_right.name)
left_string = ' '.join(self.make_list_unique(pair_strings_left))
right_string = ' '.join(self.make_list_unique(pair_strings_right))
if all([left_string, right_string]):
pair_strings = '\n'.join([left_string, right_string])
else:
pair_strings = left_string or right_string
# print(':', pair_strings, ':')
if pair_strings:
all_strings.append(pair_strings)
Glyphs.font.newTab('\n\n'.join(all_strings))
Glyphs.font.currentTab.previewInstances = 'live'
Glyphs.font.currentTab.scale = 0.065
Glyphs.font.currentTab.textCursor = 3
Glyphs.font.tool = 'TextTool'
# Glyphs.showMacroWindow()
makeDisplay()
print('Done.')
|
the-stack_0_14007 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core import serializers
from django.core.management import call_command
from django.db import migrations, models
import os
fixture_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../fixtures'))
def load_fixture(fixture_filename):
fixture_file = os.path.join(fixture_dir, fixture_filename)
fixture = open(fixture_file, 'rb')
objects = serializers.deserialize('json', fixture, ignorenonexistent=True)
for obj in objects:
obj.save()
fixture.close()
class Migration(migrations.Migration):
def load_data(apps, schema_editor):
load_fixture("awardees.json")
load_fixture("countries.json")
load_fixture("material_types.json")
load_fixture("languages.json")
load_fixture("institutions.json")
load_fixture("ethnicities.json")
load_fixture("labor_presses.json")
load_fixture("countries.json")
dependencies = [
("core", "0002_auto_20160713_1509"),
]
operations = [
migrations.RunPython(load_data)
]
|
the-stack_0_14008 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from docutils import nodes
from functools import partial
from sphinx.util.docfields import _is_single_paragraph
from sphinx.util import docfields
from sphinx import directives, addnodes
from sphinx import addnodes
from sphinx.addnodes import desc, desc_signature
from .utils import transform_node as _transform_node
from .nodes import remarks
TYPE_SEP_PATTERN = '(\[|\]|, |\(|\))'
def _get_desc_data(node):
assert node.tagname == 'desc'
if node.attributes['domain'] != 'py':
print(
'Skipping Domain Object (%s)' % node.attributes['domain']
)
return None, None
try:
module = node[0].attributes['module']
full_name = node[0].attributes['fullname'].split('.')[-1]
except KeyError as e:
print("[docfx_yaml] There maybe some syntax error in docstring near: " + node.astext())
raise e
try:
uid = node[0].attributes['ids'][0]
except Exception:
uid = '{module}.{full_name}'.format(module=module, full_name=full_name)
print('Non-standard id: %s' % uid)
return full_name, uid
def _is_desc_of_enum_class(node):
assert node.tagname == 'desc_content'
if node[0] and node[0].tagname == 'paragraph' and node[0].astext() == 'Bases: enum.Enum':
return True
return False
def _hacked_transform(typemap, node):
"""
Taken from docfields.py from sphinx.
This does all the steps around gathering data,
but doesn't actually do the node transformations.
"""
entries = []
groupindices = {}
types = {}
# step 1: traverse all fields and collect field types and content
for field in node:
fieldname, fieldbody = field
try:
# split into field type and argument
fieldtype, fieldarg = fieldname.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
fieldtype, fieldarg = fieldname.astext(), ''
typedesc, is_typefield = typemap.get(fieldtype, (None, None))
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
fieldname[0] = nodes.Text(new_fieldname)
entries.append(field)
continue
typename = typedesc.name
# collect the content, trying not to keep unnecessary paragraphs
if _is_single_paragraph(fieldbody):
content = fieldbody.children[0].children
else:
content = fieldbody.children
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = [n for n in content if isinstance(n, nodes.Inline) or
isinstance(n, nodes.Text)]
if content:
types.setdefault(typename, {})[fieldarg] = content
continue
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.split(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = \
[nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(fieldbody.rawsource,
translatable=True)
translatable_content.source = fieldbody.parent.source
translatable_content.line = fieldbody.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in groupindices:
group = entries[groupindices[typename]]
else:
groupindices[typename] = len(entries)
group = [typedesc, []]
entries.append(group)
entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(entry)
else:
entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append([typedesc, entry])
return (entries, types)
def patch_docfields(app):
"""
Grab syntax data from the Sphinx info fields.
This is done by monkeypatching into the DocFieldTransformer,
which is what Sphinx uses to transform the docutils ``nodes.field``
into the sphinx ``docfields.Field`` objects.
See usage in Sphinx
`here <https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/__init__.py#L180>`_.
This also performs the RST doctree to Markdown transformation on the content,
using the :class:`docfx_yaml.writers.MarkdownWriter`.
"""
transform_node = partial(_transform_node, app)
def get_data_structure(entries, types, field_object):
"""
Get a proper docfx YAML data structure from the entries & types
"""
data = {
'parameters': [],
'variables': [],
'exceptions': [],
'return': {},
'references': [],
}
def make_param(_id, _description, _type=None, _required=None):
ret = {
'id': _id,
'description': _description.strip(" \n\r\t")
}
if _type:
ret['type'] = _type
if _required is not None:
ret['isRequired'] = _required
return ret
def transform_para(para_field):
if isinstance(para_field, addnodes.pending_xref):
return transform_node(para_field)
else:
return para_field.astext()
def resolve_type(data_type):
# Remove @ ~ and \n for cross reference in parameter/return value type to apply to docfx correctly
data_type = re.sub('[@~\n]', '', data_type)
# Add references for docfx to resolve ref if type contains TYPE_SEP_PATTERN
_spec_list = []
_spec_fullnames = re.split(TYPE_SEP_PATTERN, data_type)
_added_reference = {}
if len(_spec_fullnames) > 1:
_added_reference_name = ''
for _spec_fullname in _spec_fullnames:
if _spec_fullname != '':
_spec = {}
_spec['name'] = _spec_fullname.split('.')[-1]
_spec['fullName'] = _spec_fullname
if re.match(TYPE_SEP_PATTERN, _spec_fullname) is None:
_spec['uid'] = _spec_fullname
_spec_list.append(_spec)
_added_reference_name += _spec['name']
_added_reference = {
'uid': data_type,
'name': _added_reference_name,
'fullName': data_type,
'spec.python': _spec_list
}
return data_type, _added_reference
def extract_exception_desc(field_object):
ret = []
if len(field_object) > 0:
for field in field_object:
if 'field_name' == field[0].tagname and field[0].astext() == 'Raises':
assert field[1].tagname == 'field_body'
field_body = field[1]
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
for child in children:
if isinstance (child, nodes.paragraph):
pending_xref_index = child.first_child_matching_class(addnodes.pending_xref)
if pending_xref_index is not None:
pending_xref = child[pending_xref_index]
raise_type_index = pending_xref.first_child_matching_class(nodes.literal)
if raise_type_index is not None:
raise_type = pending_xref[raise_type_index]
ret.append({'type': pending_xref['reftarget'], 'desc': raise_type.astext()})
return ret
for entry in entries:
if isinstance(entry, nodes.field):
# pass-through old field
pass
else:
fieldtype, content = entry
fieldtypes = types.get(fieldtype.name, {})
if fieldtype.name == 'exceptions':
for _type, _description in content:
data['exceptions'].append({
'type': _type,
'description': transform_node(_description[0]).strip(" \n\r\t")
})
if fieldtype.name == 'returntype':
for returntype_node in content[1]:
returntype_ret = transform_node(returntype_node)
if returntype_ret:
# Support or in returntype
for returntype in re.split('[ \n]or[ \n]', returntype_ret):
returntype, _added_reference = resolve_type(returntype)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
data['return'].setdefault('type', []).append(returntype)
if fieldtype.name == 'returnvalue':
returnvalue_ret = transform_node(content[1][0])
if returnvalue_ret:
data['return']['description'] = returnvalue_ret.strip(" \n\r\t")
if fieldtype.name in ['parameter', 'variable', 'keyword']:
for field, node_list in content:
_id = field
_description = transform_node(node_list[0])
if field in fieldtypes:
_type = u''.join(transform_para(n) for n in fieldtypes[field])
else:
_type = None
_para_types = []
if fieldtype.name == 'parameter' or fieldtype.name == 'keyword':
if _type:
# Support or in parameter type
for _s_type in re.split('[ \n]or[ \n]', _type):
_s_type, _added_reference = resolve_type(_s_type)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
_para_types.append(_s_type)
_data = make_param(_id=_id, _type=_para_types, _description=_description, _required=False if fieldtype.name == 'keyword' else True)
data['parameters'].append(_data)
if fieldtype.name == 'variable':
if _type:
# Support or in variable type
for _s_type in re.split('[ \n]or[ \n]', _type):
_s_type, _added_reference = resolve_type(_s_type)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
_para_types.append(_s_type)
_data = make_param(_id=_id, _type=_para_types, _description=_description)
data['variables'].append(_data)
ret_list = extract_exception_desc(field_object)
for ret in ret_list:
# only use type in exceptions
data.setdefault('exceptions', []).append({
'type': ret['type']
})
return data
class PatchedDocFieldTransformer(docfields.DocFieldTransformer):
@staticmethod
def type_mapping(type_name):
mapping = {
"staticmethod": "method",
"classmethod": "method",
"exception": "class"
}
return mapping[type_name] if type_name in mapping else type_name
def __init__(self, directive):
self.directive = directive
super(PatchedDocFieldTransformer, self).__init__(directive)
def transform_all(self, node):
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
summary = []
data = {}
name, uid = _get_desc_data(node.parent)
for child in node:
if isinstance(child, remarks):
remarks_string = transform_node(child)
data['remarks'] = remarks_string
elif isinstance(child, addnodes.desc):
if child.get('desctype') == 'attribute':
attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them
for item in child:
if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):
# capture attributes data and cache it
data.setdefault('added_attribute', [])
item_ids = item.get('ids', [''])
if len(item_ids) == 0: # find a node with no 'ids' attribute
curuid = item.get('module', '') + '.' + item.get('fullname', '')
# generate its uid by module and fullname
else:
curuid = item_ids[0]
if len(curuid) > 0:
parent = curuid[:curuid.rfind('.')]
name = item.children[0].astext()
if curuid in attribute_map:
if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed
attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())
# concat the description of duplicated nodes
else:
attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']
else:
if _is_desc_of_enum_class(node):
addedData = {
'uid': curuid,
'id': name,
'parent': parent,
'langs': ['python'],
'name': name,
'fullName': curuid,
'type': item.parent.get('desctype'),
'module': item.get('module'),
'syntax': {
'content': item.astext(),
'return': {
'type': [parent]
}
}
}
else:
addedData = {
'uid': curuid,
'class': parent,
'langs': ['python'],
'name': name,
'fullName': curuid,
'type': 'attribute',
'module': item.get('module'),
'syntax': {
'content': item.astext()
}
}
attribute_map[curuid] = addedData
else:
raise Exception('ids of node: ' + repr(item) + ' is missing.')
# no ids and no duplicate or uid can not be generated.
if 'added_attribute' in data:
data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list
# Don't recurse into child nodes
continue
elif isinstance(child, nodes.field_list):
(entries, types) = _hacked_transform(self.typemap, child)
_data = get_data_structure(entries, types, child)
data.update(_data)
elif isinstance(child, addnodes.seealso):
data['seealso'] = transform_node(child)
elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():
# Remove the admonition node
child_copy = child.deepcopy()
child_copy.pop(0)
data['example'] = transform_node(child_copy)
else:
content = transform_node(child)
# skip 'Bases' in summary
if not content.startswith('Bases: '):
summary.append(content)
if "desctype" in node.parent and node.parent["desctype"] == 'class':
data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.
if summary:
data['summary'] = '\n'.join(summary)
# Don't include empty data
for key, val in data.copy().items():
if not val:
del data[key]
data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent["desctype"]) if "desctype" in node.parent else 'unknown'
self.directive.env.docfx_info_field_data[uid] = data
super(PatchedDocFieldTransformer, self).transform_all(node)
directives.DocFieldTransformer = PatchedDocFieldTransformer
|
the-stack_0_14010 | import os
GRID_FOLDER = "gpw-v4-national-identifier-grid-rev11_30_sec_asc/"
GRID_LOOKUP = "gpw_v4_national_identifier_grid_rev11_lookup.txt"
DATA_FOLDER = os.path.expanduser("~") + "/.sedac_gpw_parser/"
def id_lookup(searchterm, lookup_file=DATA_FOLDER+GRID_FOLDER+GRID_LOOKUP,
verbose=True):
success = False
names_ids = []
searchterm = searchterm.lower().replace(" ", "")
with open(lookup_file, "r") as infile:
infile.readline()
for line in infile:
line = line.split("\t")
country_id = line[0]
country_name = line[3]
if searchterm.lower() in country_name.lower().replace(" ", ""):
if verbose:
print(country_name, ":", country_id)
success = True
names_ids.append((country_name, int(country_id)))
if not success:
if verbose:
print("No country found for search term:", searchterm)
return names_ids
|
the-stack_0_14011 | # __author__ = 'ktc312'
# -*- coding: utf-8 -*-
# coding: utf-8
import urllib2 as ul
from bs4 import BeautifulSoup
import csv
import os
import pandas as pd
import time
import data_cleaning
data_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tw_perm_data_analysis/')
# Construct the visadoor search url
base_url = str('http://visadoor.com/greencards/index?country=Taiwan&submit=Search')
def raw_data_rows_to_csv(list_data, file_name):
with open(data_path + "data/" + file_name, "wb") as f:
writer = csv.writer(f)
writer.writerows(list_data)
# get last_year
def get_last_year():
col_names = ['id', 'Decision_Date', 'Employer', 'City_State', 'Case_Status', 'Job_Title', 'Wage_Offer']
tw_perm_df = pd.read_csv(data_path + 'data/TW_PERM.csv', names=col_names, dtype=str, skiprows=1)
data_cleaning.convert_datetime(tw_perm_df, 'Decision_Date')
sorted_df = tw_perm_df.sort_values('Decision_Date', ascending=True)
return str(sorted_df.iloc[[-1]]['Decision_Date']).split('-')[0][-4:]
get_last_year()
# get cases found
def get_cases_found(last_year):
cases_found_in_page = 0
test_search_term = '&year=' + last_year
soup = BeautifulSoup(ul.urlopen(base_url + test_search_term, data=None, timeout=5), "html.parser")
cases_found_class = soup.findAll("div", {"class": "col-sm-5"})
for div in cases_found_class:
cases_found_in_page = int(str(div).split('<h4>')[1].split(' ')[3])
return cases_found_in_page
# get page count
def get_page_count(cases):
if cases <= 1000:
count = 1
else:
count = (cases/1000) + 1
return count
# get data
def scrape_data(last_year, page_count):
i = 0
encode_raw_data = []
while i < page_count:
search_term = '&year=' + last_year + '&page=' + str(i+1)
soup = BeautifulSoup(ul.urlopen(base_url + search_term, data=None, timeout=5), "html.parser")
# get data table
raw_data = []
table = soup.find('table', attrs={'class': 'table table-bordered table-striped table-hover'})
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
cols = [ele.text.strip() for ele in cols]
for col in cols:
raw_data.append(col)
for u_item in raw_data:
encode_raw_data.append(u_item.encode('UTF8'))
time.sleep(5)
i += 1
i = 0
encode_raw_data_rows = []
while i < len(encode_raw_data):
encode_raw_data_rows.append(encode_raw_data[i:i+7])
i += 7
raw_data_rows_to_csv(encode_raw_data_rows, 'temp_new_data.csv')
col_names = ['id', 'Decision_Date', 'Employer', 'City_State', 'Case_Status', 'Job_Title', 'Wage_Offer']
new_df = pd.read_csv(data_path + 'data/temp_new_data.csv', names=col_names, dtype=str, skiprows=1)
return new_df
# get the latest data
def download_data():
last_yr = get_last_year()
pages = get_page_count(get_cases_found(last_yr))
return scrape_data(last_yr, pages)
|
the-stack_0_14012 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 13:46:42 2019
@author: Zaki
"""
from sympy.parsing import sympy_parser
from pint import UnitRegistry
import numpy
import sympy
ureg = UnitRegistry()
Q = ureg.Quantity
LENGTH = '[length]'
INDUCTANCE = '[length] ** 2 * [mass] / [current] ** 2 / [time] ** 2'
CAPACITANCE = '[current] ** 2 * [time] ** 4 / [length] ** 2 / [mass]'
RESISTANCE = '[length] ** 2 * [mass] / [current] ** 2 / [time] ** 3'
DIMENSIONLESS = 'dimensionless'
LENGTH_UNIT = 'meter'
INDUCTANCE_UNIT = 'nH'
CAPACITANCE_UNIT = 'fF'
RESISTANCE_UNIT = 'ohm'
DIMENSIONLESS_UNIT = ''
### List handling
# Useful function to manipulate to_move entities and ports
def find_last_list(list_entities):
# return the last list of a set of nested lists
if isinstance(list_entities, list):
if len(list_entities)==0:
return list_entities
else:
if isinstance(list_entities[-1], list):
return find_last_list(list_entities[-1])
else:
return list_entities
else:
raise TypeError('There are no list')
def find_penultimate_list(list_entities):
# return the last list of a set of nested lists
if isinstance(list_entities, list):
if len(list_entities)==0:
return False
else:
if isinstance(list_entities[-1], list):
if len(list_entities[-1])==0:
return list_entities
else:
if isinstance(list_entities[-1][-1], list):
return find_penultimate_list(list_entities[-1])
else:
return list_entities
else:
return False
else:
raise TypeError('There are no list')
def add_to_corresponding_list(elt, nested_list, added_elt):
# return the last list of a set of nested lists
if isinstance(nested_list, list):
if elt in nested_list:
index = nested_list.index(elt)
nested_list.insert(index+1, added_elt)
return True
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
if add_to_corresponding_list(elt, elt_list, added_elt):
break
else:
return False
return True
else:
pass#raise TypeError('Argument is not a list')
def general_remove(elt, nested_list):
# same as list.remove(elt) but for a nested list
if isinstance(nested_list, list):
if elt in nested_list:
nested_list.remove(elt)
return True
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
success = general_remove(elt, elt_list)
if success:
break
else:
raise TypeError('Argument is not a list')
def find_corresponding_list(elt, nested_list):
# return the last list of a set of nested lists
if isinstance(nested_list, list):
if elt in nested_list:
return nested_list
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
found_list = find_corresponding_list(elt, elt_list)
if found_list:
break
else:
return False
return found_list
else:
return None
### Naming
def gen_name(name):
# routine to mimic the default naming procedure of HFSS when object
# already exists
end = ''
for ii in name[::-1]:
if ii.isdigit():
end+=ii
else:
break
if end=='':
return name+'1'
number = int(end[::-1])
if number==0:
return name+'1'
else:
prefix = name[:-len(str(number))]
suffix = str(number+1)
return prefix+suffix
def check_name(_class, name):
end = ''
for ii, char in enumerate(name[::-1]):
if char.isdigit():
end+=char
else:
break
else:
ii += 1
if end == '':
radical = name
number = 0
else:
radical = name[:-ii]
number = int(end[::-1])
new_name = name
while(new_name in _class.dict_instances.keys()):
number+=1
new_name = radical+str(number)
if new_name != name:
print("%s: changed '%s' name into '%s'"%(_class.__name__, name, new_name))
return new_name
### Litteral Expressions
def equal_float(float1, float2):
if abs(float1)>1e-10:
rel_diff = abs((float1-float2)/float1)
if rel_diff<1e-5:
return True
else:
return False
elif abs(float2)>1e-10:
rel_diff = abs((float1-float2)/float2)
if rel_diff<1e-5:
return True
else:
return False
else:
return True
def simplify_arith_expr(expr):
try:
out = repr(sympy_parser.parse_expr(str(expr)))
return out
except Exception:
print("Couldn't parse", expr)
raise
def extract_value_unit(expr, units):
"""
:type expr: str
:type units: str
:return: float
"""
try:
return Q(expr).to(units).magnitude
except Exception:
try:
return float(expr)
except Exception:
return expr
def extract_value_dim(expr):
"""
type expr: str
"""
return str(Q(expr).dimensionality)
def parse_entry(*entries, marker=True):
#should take a list of tuple of list... of int, float or str...
parsed = []
for entry in entries:
if not isinstance(entry, list) and not isinstance(entry, tuple):
parsed.append(extract_value_unit(entry, LENGTH_UNIT))
else:
if isinstance(entry, list):
if isinstance(entry, Vector):
parsed.append(Vector(parse_entry(*entry, marker=False)))
else:
parsed.append(parse_entry(*entry, marker=False))
elif isinstance(entry, tuple):
parsed.append(tuple(parse_entry(*entry, marker=False)))
else:
raise TypeError('Not foreseen type: %s'%(type(entry)))
if len(parsed)==1 and marker:
return parsed[0]
else:
return parsed
def rem_unit(other):
try:
value = extract_value_unit(other, LENGTH_UNIT)
return value
except Exception:
return other
def _val(elt):
if isinstance(elt, (int, float, numpy.int64, numpy.float64, numpy.int32, numpy.float32)):
return elt
else:
return float(elt.evalf(subs=variables))
def val(*entries, marker=True):
#should take a list of tuple of list... of int, float or str...
parsed = []
for entry in entries:
if not isinstance(entry, (list, tuple, Vector)):
parsed.append(_val(entry))
else:
if isinstance(entry, Vector):
parsed.append(Vector(val(*entry, marker=False)))
elif isinstance(entry, list):
parsed.append(val(*entry, marker=False))
elif isinstance(entry, tuple):
parsed.append(tuple(val(*entry, marker=False)))
else:
raise TypeError('Not foreseen type: %s'%(type(entry)))
if len(parsed)==1 and marker:
return parsed[0]
else:
return parsed
def way(vec):
if vec[1] != 0:
if abs(vec[0]/vec[1])<1e-2:
if vec[1]>0:
return Vector(0,1)
elif vec[1]<0:
return Vector(0,-1)
if vec[0] != 0 :
if abs(vec[1]/vec[0])<1e-2:
if vec[0]>0:
return Vector(1,0)
elif vec[0]<0:
return Vector(-1,0)
variables = {}
def store_variable(symbol, value): # put value in SI
if isinstance(value, str):
if LENGTH == extract_value_dim(value):
unit = LENGTH_UNIT
if INDUCTANCE == extract_value_dim(value):
unit = INDUCTANCE_UNIT
if CAPACITANCE == extract_value_dim(value):
unit = CAPACITANCE_UNIT
if RESISTANCE == extract_value_dim(value):
unit = RESISTANCE_UNIT
if DIMENSIONLESS == extract_value_dim(value):
unit = DIMENSIONLESS_UNIT
value = extract_value_unit(value, unit)
variables[symbol] = value
class Vector(numpy.ndarray):
"""
Vector is a custom 3D vector class, alowing for opperations optimized to
interface properly with HFSS.
The class can be instenciate as a 2D vector, how ever, it will effectively
creat a 3D vector with 0 for z axis.
"""
def __new__(cls, vec, vec_y=None, vec_z=None):
"""
Init of the 3D vector:
If vec_y, and vec_z are None, then vec must a len=2 or len=3 iterable.
If vec_y is not None, and vec_z is, then creat a vector [vec, vec_y, 0].
If vec_y and vec_z are not None, then creat a vector [vec, vec_y, vec_z].
"""
if vec_y is not None:
vec = [vec, vec_y, 0]
if(vec_z is not None):
vec[2] = vec_z
try:
if(not (len(vec)==2 or len(vec)==3)):
raise TypeError('vec can only be 2 or 3D, not %iD' % (len(vec)))
except:
raise TypeError('vec must be iterable')
if(len(vec) == 2):
vec = [vec[0], vec[1], 0]
obj = numpy.asarray(vec).view(cls)
return obj
@staticmethod
def check(elt):
"""
Utility function to check if an element is compatible with vectors
opperations. It only requiers to be iterable and of len=3.
Args:
elt: The element to be tested
Returns:
Boolean, true if elt is compatible with Vector opperations, False
otherwise.
"""
try:
return len(elt)==3
except:
return False
def __eq__(self, other):
val_self = val(self)
val_other = val(other)
bool_result = (equal_float(val_self[0], val_other[0]) and
equal_float(val_self[1], val_other[1]) and
equal_float(val_self[2], val_other[2]))
return bool_result
def index(self, elt):
val_self = val(self)
val_elt = val(elt)
for ii, item in enumerate(val_self):
if item == val_elt:
break
else:
return -1
return ii
# def __add__(self, other):
# if Vector.check(other):
# return Vector([self[0]+other[0], self[1]+other[1], self[2]+other[2]])
# else:
# try:
# return Vector([self[0]+other, self[1]+other, self[2]+other])
# except:
# raise TypeError('Could not perform add operation')
# def __radd__(self, other):
# return self + other
# def __sub__(self, other) :
# if Vector.check(other):
# return Vector([self[0]-other[0], self[1]-other[1], self[2]-other[2]])
# else:
# try:
# Vector([self[0]-other, self[1]-other, self[2]-other])
# except:
# raise TypeError('Could not perform sub operation')
# def __neg__(self):
# return Vector([-self[0], -self[1], -self[2]])
# def __rsub__(self, other):
# return -self + other
# def __mul__(self, other):
# if Vector.check(other):
# return Vector([self[0]*other[0], self[1]*other[1], self[2]*other[2]])
# else:
# try:
# return Vector([other*self[0], other*self[1], other*self[2]])
# except:
# raise TypeError('Could not perform mul operation')
# def __rmul__(self, other):
# return self * other
# def __truediv__(self, other):
# if Vector.check(other):
# return Vector([self[0]/other[0], self[1]/other[1], self[2]/other[2]])
# else:
# try:
# return Vector([self[0]/other, self[1]/other, self[2]/other])
# except:
# raise TypeError('Could not perform div operation')
# def __rtruediv__(self, other):
# self / other
# def dot(self, other):
# if Vector.check(other):
# return self[0]*other[0]+self[1]*other[1]+self[2]*other[2]
# else:
# raise TypeError('Could not perform dot operation')
def cross(self, other):
"""
This function returns the cross product beteween self and other.
Args:
other: of type Vector
Returns:
type Vector, self x other
"""
if(Vector.check(other) and Vector.check(other)):
return Vector(self[1]*other[2]-self[2]*other[1],
-(self[0]*other[2]-self[2]*other[0]),
self[0]*other[1]-self[1]*other[0])
else:
raise TypeError('Could not perform dot operation')
def scalar_cross(self, other, ref=None):
"""
This function is a bit cryptic. It computes the signed magnitude of
the cross product between self and other, assuming they both are in
the plan orthogonal to ref.
Args:
other: a Vector
ref: an other Vector, if None, assumed to be [0, 0, 1]
Returns:
dot((self x other), ref)
"""
if(ref is None):
ref = Vector(0, 0, 1)
if(Vector.check(other) and Vector.check(ref)):
return self.cross(other).dot(ref)
else:
raise TypeError('Could not perform dot operation')
def norm(self):
return (self[0]**2+self[1]**2+self[2]**2)**0.5
def abs(self):
return Vector([abs(self[0]), abs(self[1]), abs(self[2])])
def unit(self):
norm = self.norm()
return Vector([self[0]/norm, self[1]/norm, self[2]/norm])
def orth(self):
return Vector([-self[1], self[0]])
# def as_nda(self):
# return numpy.array([self[0], self[1], self[2]], dtype=object)
def rot(self, other, ref=None):
'''
This function is just completly cryptic, Ulysse wrote it a long time ago.
Here is what it is doing: we assume that self is expressed in x=(100), y=(010), z=(001)
This function returns the coordinates of self in x'=other,y'=-(other x ref), z'=ref
In other words, this function computes a 3D change of coordinates.
Note:
This function has been writen assuming other and ref are given orthogonal.
Hence, if not the case, it can have unexpected behaviors.
Args:
other: type Vector, the new x reference vector (x')
ref: type Vector, the new z reference vector (z'), if None, taken to be (0,0,1)
Returns:
self expressed in the new coordinate system.
'''
if(ref is None):
ref = Vector([0, 0, 1])
else:
ref = Vector(ref)
other = Vector(other)
if(Vector.check(other) and Vector.check(ref)):
other = Vector(other).unit()
ortho = -other.cross(ref)
return (Vector([self.dot(other.refx()), self.dot(other.orth().refy()), 0])*ref[2] +
Vector([self.dot(other.orth().refx()), 0, self.dot(other.refz())])*ref[1] +
Vector([0, self.dot(other.refy()), self.dot(other.orth().refz())])*ref[0])
else:
raise TypeError('other must be a Vector')
def px(self):
return Vector([self[0], 0, 0])
def py(self):
return Vector([0, self[1], 0])
def pz(self):
return Vector([0, 0, self[2]])
def refx(self, offset=0):
return Vector([self[0], -self[1]+2*offset, self[2]])
def refy(self, offset=0):
return Vector([-self[0]+2*offset, self[1], self[2]])
def refz(self, offset=0):
return Vector([self[0], self[1], -self[2]+2*offset])
# if(__name__ == "__main__"):
# x = Vector([1, 0, 0])
# y = Vector([0, -1, 0])
# print(x.rot(y))
def coor2angle(x, y=None):
if(y is None):
x, y = x
norm = (x**2+y**2)**0.5
if(x != 0 and abs(y/x) < 1):
angle = numpy.arcsin(y/norm)
if(x<0):
angle = numpy.pi - numpy.arcsin(y/norm)
else:
angle = numpy.arccos(x/norm)
if(y<0):
angle = - numpy.arccos(x/norm) + 2*numpy.pi
return angle%(2*numpy.pi)
# if(__name__=="__main__"):
# import matplotlib.pyplot as plt
# plt.figure()
# for theta in numpy.arange(0, 2*numpy.pi, 0.05):
# x, y = numpy.cos(theta), numpy.sin(theta)
# plt.plot(theta, coor2angle(x, y), 'o')
# plt.show() |
the-stack_0_14013 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
General utils_
"""
import contextlib
import glob
import logging
import math
import os
import platform
import random
import re
import shutil
import signal
import time
import urllib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from zipfile import ZipFile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from utils_.downloads import gsutil_getsize
from utils_.metrics import box_iou, fitness
# Settings
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
def set_logging(name=None, verbose=True):
# Sets level and returns logger
for h in logging.root.handlers:
logging.root.removeHandler(h) # remove all handlers associated with the root logger object
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)
return logging.getLogger(name)
LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
# Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(name, opt):
# Print argparser arguments
LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
# cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
import torch.backends.cudnn as cudnn
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
# Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if test: # method 1
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except ImportError:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
return re.search('[\u4e00-\u9fff]', s)
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(path):
# Return file/dir size (MB)
path = Path(path)
if path.is_file():
return path.stat().st_size / 1E6
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
else:
return 0.0
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
@try_except
@WorkingDirectory(ROOT)
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
print(colorstr('github: '), end='')
assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
assert not is_docker(), 'skipping check (Docker image)' + msg
assert check_online(), 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
def check_python(minimum='3.6.2'):
# Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ', hard=True)
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string
if hard:
assert result, s # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
@try_except
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install:
print(f"{s}, attempting auto-update...")
try:
assert check_online(), f"'pip install {r}' skipped (offline)"
print(check_output(f"pip install '{r}'", shell=True).decode())
n += 1
except Exception as e:
print(f'{prefix} {e}')
else:
print(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=('.yaml', '.yml')):
# Search/download YAML file (if necessary) and return path, checking suffix
return check_file(file, suffix)
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or file == '': # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
print(f'Found {url} locally at {file}') # file already exists
else:
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'data', 'models', 'utils_': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Parse yaml
path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and autodownload: # download script
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} to {f}...')
torch.hub.download_url_to_file(s, f)
Path(root).mkdir(parents=True, exist_ok=True) # create root
ZipFile(f).extractall(path=root) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n")
else:
raise Exception('Dataset not found.')
return data # dictionary
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
return file
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f, progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns nearest x divisible by divisor
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils_.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(results, hyp, save_dir, bucket):
evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Print to screen
print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :7])) #
f.write('# YOLOv5 Hyperparameter Evolution Results\n' +
f'# Best generation: {i}\n' +
f'# Last generation: {len(data) - 1}\n' +
'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to YOLO outputs
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('example%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# Variables
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
|
the-stack_0_14015 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
import adopy
import numpy as np
import pytest
import shutil
import os
@pytest.fixture
def steadyflofile(tmpdir):
datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
flofilename = r'flairs.FLO'
flofile = os.path.join(datadir, flofilename)
testfile = tmpdir.join(flofilename)
shutil.copyfile(flofile, testfile)
return testfile
@pytest.fixture
def transientflofile(tmpdir):
datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
flofilename = r'flairs1_2007.flo'
flofile = os.path.join(datadir, flofilename)
testfile = tmpdir.join(flofilename)
shutil.copyfile(flofile, testfile)
return testfile
class TestSteadyFloFile(object):
def test_read(self, steadyflofile):
with adopy.open_flo(steadyflofile, transient=False) as src:
flo = src.as_dict()
assert flo['PHI1'].values.shape == (136365,)
class TestTransientFloFile(object):
def test_read(self, transientflofile):
with adopy.open_flo(transientflofile, transient=True) as src:
flo = src.read() |
the-stack_0_14016 | """Test asyncpraw.models.user."""
import pytest
from asynctest import mock
from asyncpraw.exceptions import RedditAPIException
from asyncpraw.models import Multireddit, Redditor, Subreddit
from .. import IntegrationTest
class TestUser(IntegrationTest):
async def test_blocked(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = await self.reddit.user.blocked()
assert len(blocked) > 0
assert all(isinstance(user, Redditor) for user in blocked)
async def test_blocked_fullname(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = next(iter(await self.reddit.user.blocked()))
assert blocked.fullname.startswith("t2_")
assert not blocked.fullname.startswith("t2_t2")
async def test_contributor_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.contributor_subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
async def test_friends(self):
self.reddit.read_only = False
with self.use_cassette():
friends = await self.reddit.user.friends()
assert len(friends) > 0
assert all(isinstance(friend, Redditor) for friend in friends)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
friend = await self.reddit.user.friends(user=await self.reddit.user.me())
assert isinstance(friend, Redditor)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_not_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
with pytest.raises(RedditAPIException):
await self.reddit.user.friends(user="fake__user_user_user")
async def test_karma(self):
self.reddit.read_only = False
with self.use_cassette():
karma = await self.reddit.user.karma()
assert isinstance(karma, dict)
for subreddit in karma:
assert isinstance(subreddit, Subreddit)
keys = sorted(karma[subreddit].keys())
assert ["comment_karma", "link_karma"] == keys
async def test_me(self):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
assert isinstance(me, Redditor)
me.praw_is_cached = True
me = await self.reddit.user.me()
assert me.praw_is_cached
@mock.patch("asyncio.sleep", return_value=None)
async def test_me__bypass_cache(self, _):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
me.praw_is_cached = True
me = await self.reddit.user.me(use_cache=False)
assert not hasattr(me, "praw_is_cached")
async def test_multireddits(self):
self.reddit.read_only = False
with self.use_cassette():
multireddits = await self.reddit.user.multireddits()
assert isinstance(multireddits, list)
assert multireddits
assert all(isinstance(x, Multireddit) for x in multireddits)
async def test_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
|
the-stack_0_14019 | #!/usr/bin/env python3
# Connect the ipad (ground station) to your computer and find the dji
# go flight log. Upload that to https://www.phantomhelp.com/LogViewer,
# download as csv and copy that next to the flight movie and srt file.
# extract srt form of subtitles from dji movie (caption setting needs
# to be turned on when movie is recorded)
#
# ffmpeg -txt_format text -i input_file.MOV output_file.srt
import argparse
import cv2
import datetime
import skvideo.io # pip3 install scikit-video
import math
import fractions
import json
from matplotlib import pyplot as plt
import numpy as np
import os
import pyexiv2
import re
import sys
from scipy import interpolate # strait up linear interpolation, nothing fancy
from rcUAS import wgs84
from props import PropertyNode
import props_json
import djilog
parser = argparse.ArgumentParser(description='extract and geotag dji movie frames.')
parser.add_argument('--video', required=True, help='input video')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='down',
help='approximate camera mounting orientation')
parser.add_argument('--interval', type=float, default=1.0, help='extraction interval')
parser.add_argument('--distance', type=float, help='max extraction distance interval')
parser.add_argument('--start-time', type=float, help='begin frame grabbing at this time.')
parser.add_argument('--end-time', type=float, help='end frame grabbing at this time.')
parser.add_argument('--start-counter', type=int, default=1, help='first image counter')
parser.add_argument('--ground', type=float, help='ground altitude in meters')
parser.add_argument('--djicsv', help='name of dji exported csv log file from the flight, see https://www.phantomhelp.com/logviewer/upload/')
args = parser.parse_args()
r2d = 180.0 / math.pi
match_ratio = 0.75
scale = 0.4
filter_method = 'homography'
tol = 3.0
overlap = 0.20
djicsv = djilog.djicsv()
djicsv.load(args.djicsv)
class Fraction(fractions.Fraction):
"""Only create Fractions from floats.
>>> Fraction(0.3)
Fraction(3, 10)
>>> Fraction(1.1)
Fraction(11, 10)
"""
def __new__(cls, value, ignore=None):
"""Should be compatible with Python 2.6, though untested."""
return fractions.Fraction.from_float(value).limit_denominator(99999)
def dms_to_decimal(degrees, minutes, seconds, sign=' '):
"""Convert degrees, minutes, seconds into decimal degrees.
>>> dms_to_decimal(10, 10, 10)
10.169444444444444
>>> dms_to_decimal(8, 9, 10, 'S')
-8.152777777777779
"""
return (-1 if sign[0] in 'SWsw' else 1) * (
float(degrees) +
float(minutes) / 60 +
float(seconds) / 3600
)
def decimal_to_dms(decimal):
"""Convert decimal degrees into degrees, minutes, seconds.
>>> decimal_to_dms(50.445891)
[Fraction(50, 1), Fraction(26, 1), Fraction(113019, 2500)]
>>> decimal_to_dms(-125.976893)
[Fraction(125, 1), Fraction(58, 1), Fraction(92037, 2500)]
"""
remainder, degrees = math.modf(abs(decimal))
remainder, minutes = math.modf(remainder * 60)
return [Fraction(n) for n in (degrees, minutes, remainder * 60)]
# find affine transform between matching keypoints in pixel
# coordinate space. fullAffine=True means unconstrained to
# include best warp/shear. fullAffine=False means limit the
# matrix to only best rotation, translation, and scale.
def findAffine(src, dst, fullAffine=False):
affine_minpts = 7
#print("src:", src)
#print("dst:", dst)
if len(src) >= affine_minpts:
# affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
else:
affine = None
#print str(affine)
return affine
def decomposeAffine(affine):
if affine is None:
return (0.0, 0.0, 0.0, 1.0, 1.0)
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_deg = math.atan2(-b,a) * 180.0/math.pi
if rotate_deg < -180.0:
rotate_deg += 360.0
if rotate_deg > 180.0:
rotate_deg -= 360.0
return (rotate_deg, tx, ty, sx, sy)
def filterMatches(kp1, kp2, matches):
mkp1, mkp2 = [], []
idx_pairs = []
used = np.zeros(len(kp2), np.bool_)
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * match_ratio:
#print " dist[0] = %d dist[1] = %d" % (m[0].distance, m[1].distance)
m = m[0]
# FIXME: ignore the bottom section of movie for feature detection
#if kp1[m.queryIdx].pt[1] > h*0.75:
# continue
if not used[m.trainIdx]:
used[m.trainIdx] = True
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
idx_pairs.append( (m.queryIdx, m.trainIdx) )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs, idx_pairs, mkp1
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = None
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
p1 = np.float32(newp1)
p2 = np.float32(newp2)
inliers = np.sum(status)
total = len(status)
#print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status))
return M, status, np.float32(newp1), np.float32(newp2)
# pathname work
abspath = os.path.abspath(args.video)
basename, ext = os.path.splitext(abspath)
srtname = basename + ".srt"
dirname = basename + "_frames"
print("basename:", basename)
print("srtname:", srtname)
print("dirname:", dirname)
local_config = os.path.join(dirname, "camera.json")
config = PropertyNode()
if args.camera:
# seed the camera calibration and distortion coefficients from a
# known camera config
print('Setting camera config from:', args.camera)
props_json.load(args.camera, config)
config.setString('name', args.camera)
props_json.save(local_config, config)
elif os.path.exists(local_config):
# load local config file if it exists
props_json.load(local_config, config)
K_list = []
for i in range(9):
K_list.append( config.getFloatEnum('K', i) )
K = np.copy(np.array(K_list)).reshape(3,3)
dist = []
for i in range(5):
dist.append( config.getFloatEnum("dist_coeffs", i) )
# check for required input files
if not os.path.isfile(args.video):
print("%s doesn't exist, aborting ..." % args.video)
quit()
if os.path.isfile(basename + ".srt"):
srtname = basename + ".srt"
elif os.path.isfile(basename + ".SRT"):
srtname = basename + ".SRT"
else:
print("SRT (caption) file doesn't exist, aborting ...")
quit()
# output directory
os.makedirs(dirname, exist_ok=True)
# setup feature detection
detector = cv2.SIFT_create(nfeatures=1000)
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
flann_params = { 'algorithm': FLANN_INDEX_KDTREE,
'trees': 5 }
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
srt = djilog.djisrt()
srt.load(srtname)
# fetch video metadata
metadata = skvideo.io.ffprobe(args.video)
#print(metadata.keys())
#print(json.dumps(metadata["video"], indent=4))
fps_string = metadata['video']['@avg_frame_rate']
(num, den) = fps_string.split('/')
fps = float(num) / float(den)
codec = metadata['video']['@codec_long_name']
w = int(metadata['video']['@width'])
h = int(metadata['video']['@height'])
print('fps:', fps)
print('codec:', codec)
print('output size:', w, 'x', h)
# extract frames
print("Opening ", args.video)
reader = skvideo.io.FFmpegReader(args.video, inputdict={}, outputdict={})
meta = os.path.join(dirname, "image-metadata.txt")
f = open(meta, 'w')
print("writing meta data to", meta)
last_time = -1000000
counter = 0
img_counter = args.start_counter
last_lat = 0
last_lon = 0
kp_list_ref = []
des_list_ref = []
for frame in reader.nextFrame():
frame = frame[:,:,::-1] # convert from RGB to BGR (to make opencv happy)
time = float(counter) / fps
counter += 1
print("frame:", counter, "time:", "%.3f" % time)
if args.start_time and time < args.start_time:
continue
if args.end_time and time > args.end_time:
break
if srt.need_interpolate:
lat_deg = srt.interp_lats(time)
lon_deg = srt.interp_lons(time)
alt_m = srt.interp_heights(time) + args.ground
else:
if counter - 1 >= len(srt.times):
print("MORE FRAMES THAN SRT ENTRIS")
continue
time_str = srt.times[counter - 1]
lat_deg = srt.lats[counter - 1]
lon_deg = srt.lons[counter - 1]
alt_m = srt.heights[counter - 1]
# compute unix version of timestamp (here in local tz)
main_str, t1, t2 = time_str.split(",")
fraction = (float(t1)*1000 + float(t2)) / 1000000
print("dt:", time_str)
date_time_obj = datetime.datetime.strptime(main_str, '%Y-%m-%d %H:%M:%S')
unix_sec = float(date_time_obj.strftime('%s')) + fraction
print("from local:", unix_sec)
record = djicsv.query(unix_sec)
roll = record['roll']
pitch = record['pitch']
yaw = record['yaw']
if yaw < 0: yaw += 360.0
if abs(lat_deg) < 0.001 and abs(lon_deg) < 0.001:
continue
write_frame = False
# by distance camera has moved
(c1, c2, dist_m) = wgs84.geo_inverse(lat_deg, lon_deg, last_lat, last_lon)
print("dist:", dist_m)
#if time >= last_time + args.interval and dist_m >= args.distance:
if args.distance and dist_m >= args.distance:
write_frame = True
# by visual overlap
method = cv2.INTER_AREA
frame_scale = cv2.resize(frame, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imshow('frame', frame_scale)
gray = cv2.cvtColor(frame_scale, cv2.COLOR_BGR2GRAY)
(h, w) = gray.shape
kp_list = detector.detect(gray)
kp_list, des_list = detector.compute(gray, kp_list)
if not (des_list_ref is None) and not (des_list is None) and len(des_list_ref) and len(des_list):
matches = matcher.knnMatch(des_list, trainDescriptors=des_list_ref, k=2)
p1, p2, kp_pairs, idx_pairs, mkp1 = filterMatches(kp_list, kp_list_ref, matches)
M, status, newp1, newp2 = filterFeatures(p1, p2, K, filter_method)
filtered = []
for i, flag in enumerate(status):
if flag:
filtered.append(mkp1[i])
affine = findAffine(p2, p1, fullAffine=False)
if affine is None:
write_frame = True
else:
(rot, tx, ty, sx, sy) = decomposeAffine(affine)
xperc = abs(tx) / w
yperc = abs(ty) / h
perc = math.sqrt(xperc*xperc + yperc*yperc)
print("pixel dist:", tx, ty, "%.1f%% %.1f%%" % (xperc*100, yperc*100))
if perc >= overlap:
write_frame = True
else:
# first frame
write_frame = True
cv2.waitKey(1)
if write_frame:
print("WRITE FRAME")
file = os.path.join(dirname, "img_%04d" % img_counter + ".jpg")
img_counter += 1
cv2.imwrite(file, frame)
# geotag the image
exif = pyexiv2.ImageMetadata(file)
exif.read()
print(lat_deg, lon_deg, alt_m)
exif['Exif.Image.DateTime'] = time_str
GPS = 'Exif.GPSInfo.GPS'
exif[GPS + 'AltitudeRef'] = '0' if alt_m >= 0 else '1'
exif[GPS + 'Altitude'] = Fraction(alt_m)
exif[GPS + 'Latitude'] = decimal_to_dms(lat_deg)
exif[GPS + 'LatitudeRef'] = 'N' if lat_deg >= 0 else 'S'
exif[GPS + 'Longitude'] = decimal_to_dms(lon_deg)
exif[GPS + 'LongitudeRef'] = 'E' if lon_deg >= 0 else 'W'
exif[GPS + 'MapDatum'] = 'WGS-84'
exif.write()
head, tail = os.path.split(file)
f.write("%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f,%.2f\n" % (tail, lat_deg, lon_deg, alt_m, yaw, pitch, roll, time))
# by distance
last_lat = lat_deg
last_lon = lon_deg
# by time
last_time = time
# by overlap
kp_list_ref = kp_list
des_list_ref = des_list
f.close()
|
the-stack_0_14021 | # -*- coding: utf-8 -*-
"""
cannlytics.traceability..utils.utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains general cannabis analytics utility functions.
"""
from datetime import datetime, timedelta
from re import sub, findall
def camelcase(string):
"""Turn a given string to CamelCase.
Args:
string (str): A given string to turn to CamelCase.
Returns:
(str): A string in CamelCase.
"""
key = ''.join(x for x in string.title() if not x.isspace())
key = key.replace('_', '').replace('-', '')
return key
def get_timestamp(past=0, future=0, time_zone='local'):
"""Get an ISO formatted timestamp.
Args:
past (int): Number of minutes in the past to get a timestamp.
future (int): Number of minutes into the future to get a timestamp.
time_zone (str): UNIMPLEMENTED Set a given timezone.
Returns:
(str): An ISO formatted date/time string.
"""
now = datetime.now()
now += timedelta(minutes=future)
now -= timedelta(minutes=past)
if time_zone is None:
return now.isoformat()[:19]
else:
return now.isoformat()
def snake_case(string):
"""Turn a given string to snake case.
Handles CamelCase, replaces known special characters with
preferred namespaces, replaces spaces with underscores,
and removes all other nuisance characters.
Args:
string (str): The string to turn to snake case.
Returns"
(str): A snake case string.
"""
key = string.replace(' ', '_')
key = key.replace('&', 'and')
key = key.replace('%', 'percent')
key = key.replace('#', 'number')
key = key.replace('$', 'dollars')
key = key.replace('/', '_')
key = key.replace(r'\\', '_')
key = sub('[!@#$%^&*()[]{};:,./<>?\|`~-=+]', ' ', key)
keys = findall(r'[A-Z]?[a-z]+|[A-Z]{2,}(?=[A-Z][a-z]|\d|\W|$)|\d+', key)
return '_'.join(map(str.lower, keys))
|
the-stack_0_14023 | # -*- coding: utf-8 -*-
'''
noxfile
~~~~~~~
Nox configuration script
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import sys
import glob
import json
import pprint
import shutil
import tempfile
if __name__ == '__main__':
sys.stderr.write('Do not execute this file directly. Use nox instead, it will know how to handle this file\n')
sys.stderr.flush()
exit(1)
# Import 3rd-party libs
import nox
from nox.command import CommandFailed
IS_PY3 = sys.version_info > (2,)
# Be verbose when runing under a CI context
PIP_INSTALL_SILENT = (os.environ.get('JENKINS_URL') or os.environ.get('CI') or os.environ.get('DRONE')) is None
# Global Path Definitions
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, 'tests', 'support', 'coverage')
IS_WINDOWS = sys.platform.lower().startswith('win')
# Python versions to run against
_PYTHON_VERSIONS = ('2', '2.7', '3', '3.4', '3.5', '3.6', '3.7')
# Nox options
# Reuse existing virtualenvs
nox.options.reuse_existing_virtualenvs = True
# Don't fail on missing interpreters
nox.options.error_on_missing_interpreters = False
def _create_ci_directories():
for dirname in ('logs', 'coverage', 'xml-unittests-output'):
path = os.path.join(REPO_ROOT, 'artifacts', dirname)
if not os.path.exists(path):
os.makedirs(path)
def _get_session_python_version_info(session):
try:
version_info = session._runner._real_python_version_info
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session_py_version = session.run(
'python', '-c'
'import sys; sys.stdout.write("{}.{}.{}".format(*sys.version_info))',
silent=True,
log=False,
)
version_info = tuple(int(part) for part in session_py_version.split('.') if part.isdigit())
session._runner._real_python_version_info = version_info
finally:
session._runner.global_config.install_only = old_install_only_value
return version_info
def _get_session_python_site_packages_dir(session):
try:
site_packages_dir = session._runner._site_packages_dir
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
site_packages_dir = session.run(
'python', '-c'
'import sys; from distutils.sysconfig import get_python_lib; sys.stdout.write(get_python_lib())',
silent=True,
log=False,
)
session._runner._site_packages_dir = site_packages_dir
finally:
session._runner.global_config.install_only = old_install_only_value
return site_packages_dir
def _get_pydir(session):
version_info = _get_session_python_version_info(session)
if version_info < (2, 7):
session.error('Only Python >= 2.7 is supported')
return 'py{}.{}'.format(*version_info)
def _get_distro_info(session):
try:
distro = session._runner._distro
except AttributeError:
# The distro package doesn't output anything for Windows
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session.install('--progress-bar=off', 'distro', silent=PIP_INSTALL_SILENT)
output = session.run('distro', '-j', silent=True)
distro = json.loads(output.strip())
session.log('Distro information:\n%s', pprint.pformat(distro))
session._runner._distro = distro
finally:
session._runner.global_config.install_only = old_install_only_value
return distro
def _install_system_packages(session):
'''
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
'''
system_python_packages = {
'__debian_based_distros__': [
'/usr/lib/python{py_version}/dist-packages/*apt*'
]
}
for key in ('ubuntu-14.04', 'ubuntu-16.04', 'ubuntu-18.04', 'debian-8', 'debian-9'):
system_python_packages[key] = system_python_packages['__debian_based_distros__']
distro = _get_distro_info(session)
distro_keys = [
'{id}'.format(**distro),
'{id}-{version}'.format(**distro),
'{id}-{version_parts[major]}'.format(**distro)
]
version_info = _get_session_python_version_info(session)
py_version_keys = [
'{}'.format(*version_info),
'{}.{}'.format(*version_info)
]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
for distro_key in distro_keys:
if distro_key not in system_python_packages:
continue
patterns = system_python_packages[distro_key]
for pattern in patterns:
for py_version in py_version_keys:
matches = set(glob.glob(pattern.format(py_version=py_version)))
if not matches:
continue
for match in matches:
src = os.path.realpath(match)
dst = os.path.join(session_site_packages_dir, os.path.basename(match))
if os.path.exists(dst):
session.log('Not overwritting already existing %s with %s', dst, src)
continue
session.log('Copying %s into %s', src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
def _get_distro_pip_constraints(session, transport):
# Install requirements
distro_constraints = []
if transport == 'tcp':
# The TCP requirements are the exact same requirements as the ZeroMQ ones
transport = 'zeromq'
pydir = _get_pydir(session)
if IS_WINDOWS:
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}-windows.txt'.format(transport))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'windows.txt')
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
else:
_install_system_packages(session)
distro = _get_distro_info(session)
distro_keys = [
'linux',
'{id}'.format(**distro),
'{id}-{version}'.format(**distro),
'{id}-{version_parts[major]}'.format(**distro)
]
for distro_key in distro_keys:
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}.txt'.format(distro_key))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}-{}.txt'.format(transport, distro_key))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
return distro_constraints
def _install_requirements(session, transport, *extra_requirements):
# Install requirements
distro_constraints = _get_distro_pip_constraints(session, transport)
_requirements_files = [
os.path.join(REPO_ROOT, 'requirements', 'base.txt'),
os.path.join(REPO_ROOT, 'requirements', 'zeromq.txt'),
os.path.join(REPO_ROOT, 'requirements', 'pytest.txt')
]
if sys.platform.startswith('linux'):
requirements_files = [
os.path.join(REPO_ROOT, 'requirements', 'static', 'linux.in')
]
elif sys.platform.startswith('win'):
requirements_files = [
os.path.join(REPO_ROOT, 'pkg', 'windows', 'req.txt'),
os.path.join(REPO_ROOT, 'requirements', 'static', 'windows.in')
]
elif sys.platform.startswith('darwin'):
requirements_files = [
os.path.join(REPO_ROOT, 'pkg', 'osx', 'req.txt'),
os.path.join(REPO_ROOT, 'pkg', 'osx', 'req_ext.txt'),
os.path.join(REPO_ROOT, 'requirements', 'static', 'osx.in')
]
while True:
if not requirements_files:
break
requirements_file = requirements_files.pop(0)
if requirements_file not in _requirements_files:
_requirements_files.append(requirements_file)
session.log('Processing {}'.format(requirements_file))
with open(requirements_file) as rfh: # pylint: disable=resource-leakage
for line in rfh:
line = line.strip()
if not line:
continue
if line.startswith('-r'):
reqfile = os.path.join(os.path.dirname(requirements_file), line.strip().split()[-1])
if reqfile in _requirements_files:
continue
_requirements_files.append(reqfile)
continue
for requirements_file in _requirements_files:
install_command = [
'--progress-bar=off', '-r', requirements_file
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if extra_requirements:
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command += list(extra_requirements)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
def _run_with_coverage(session, *test_cmd):
session.install('--progress-bar=off', 'coverage==4.5.3', silent=PIP_INSTALL_SILENT)
session.run('coverage', 'erase')
python_path_env_var = os.environ.get('PYTHONPATH') or None
if python_path_env_var is None:
python_path_env_var = SITECUSTOMIZE_DIR
else:
python_path_entries = python_path_env_var.split(os.pathsep)
if SITECUSTOMIZE_DIR in python_path_entries:
python_path_entries.remove(SITECUSTOMIZE_DIR)
python_path_entries.insert(0, SITECUSTOMIZE_DIR)
python_path_env_var = os.pathsep.join(python_path_entries)
try:
session.run(
*test_cmd,
env={
# The updated python path so that sitecustomize is importable
'PYTHONPATH': python_path_env_var,
# The full path to the .coverage data file. Makes sure we always write
# them to the same directory
'COVERAGE_FILE': os.path.abspath(os.path.join(REPO_ROOT, '.coverage')),
# Instruct sub processes to also run under coverage
'COVERAGE_PROCESS_START': os.path.join(REPO_ROOT, '.coveragerc')
}
)
finally:
# Always combine and generate the XML coverage report
try:
session.run('coverage', 'combine')
except CommandFailed:
# Sometimes some of the coverage files are corrupt which would trigger a CommandFailed
# exception
pass
session.run('coverage', 'xml', '-o', os.path.join(REPO_ROOT, 'artifacts', 'coverage', 'coverage.xml'))
def _runtests(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
try:
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', os.path.join('tests', 'runtests.py'), *cmd_args)
else:
session.run('python', os.path.join('tests', 'runtests.py'), *cmd_args)
except CommandFailed:
# Disabling re-running failed tests for the time being
raise
# pylint: disable=unreachable
names_file_path = os.path.join('artifacts', 'failed-tests.txt')
session.log('Re-running failed tests if possible')
session.install('--progress-bar=off', 'xunitparser==1.3.3', silent=PIP_INSTALL_SILENT)
session.run(
'python',
os.path.join('tests', 'support', 'generate-names-file-from-failed-test-reports.py'),
names_file_path
)
if not os.path.exists(names_file_path):
session.log(
'Failed tests file(%s) was not found. Not rerunning failed tests.',
names_file_path
)
# raise the original exception
raise
with open(names_file_path) as rfh:
contents = rfh.read().strip()
if not contents:
session.log(
'The failed tests file(%s) is empty. Not rerunning failed tests.',
names_file_path
)
# raise the original exception
raise
failed_tests_count = len(contents.splitlines())
if failed_tests_count > 500:
# 500 test failures?! Something else must have gone wrong, don't even bother
session.error(
'Total failed tests({}) > 500. No point on re-running the failed tests'.format(
failed_tests_count
)
)
for idx, flag in enumerate(cmd_args[:]):
if '--names-file=' in flag:
cmd_args.pop(idx)
break
elif flag == '--names-file':
cmd_args.pop(idx) # pop --names-file
cmd_args.pop(idx) # pop the actual names file
break
cmd_args.append('--names-file={}'.format(names_file_path))
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'tests.runtests', *cmd_args)
else:
session.run('python', os.path.join('tests', 'runtests.py'), *cmd_args)
# pylint: enable=unreachable
@nox.session(python=_PYTHON_VERSIONS, name='runtests-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodomex'])
def runtests_parametrized(session, coverage, transport, crypto):
# Install requirements
_install_requirements(session, transport, 'unittest-xml-reporting==2.2.1')
if crypto:
if crypto == 'm2crypto':
session.run('pip', 'uninstall', '-y', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
else:
session.run('pip', 'uninstall', '-y', 'm2crypto', silent=True)
distro_constraints = _get_distro_pip_constraints(session, transport)
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--transport={}'.format(transport)
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def runtests(session, coverage):
'''
runtests.py session with zeromq transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp')
@nox.parametrize('coverage', [False, True])
def runtests_tcp(session, coverage):
'''
runtests.py session with TCP transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq(session, coverage):
'''
runtests.py session with zeromq transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_m2crypto(session, coverage):
'''
runtests.py session with zeromq transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_m2crypto(session, coverage):
'''
runtests.py session with TCP transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_m2crypto(session, coverage):
'''
runtests.py session with zeromq transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_pycryptodomex(session, coverage):
'''
runtests.py session with zeromq transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_pycryptodomex(session, coverage):
'''
runtests.py session with TCP transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_pycryptodomex(session, coverage):
'''
runtests.py session with zeromq transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-cloud')
@nox.parametrize('coverage', [False, True])
def runtests_cloud(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq', 'unittest-xml-reporting==2.2.1')
pydir = _get_pydir(session)
cloud_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', pydir, 'cloud.txt')
session.install('--progress-bar=off', '-r', cloud_requirements, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--cloud-provider-tests'
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tornado')
@nox.parametrize('coverage', [False, True])
def runtests_tornado(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq', 'unittest-xml-reporting==2.2.1')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodomex'])
def pytest_parametrized(session, coverage, transport, crypto):
# Install requirements
_install_requirements(session, transport)
if crypto:
if crypto == 'm2crypto':
session.run('pip', 'uninstall', '-y', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
else:
session.run('pip', 'uninstall', '-y', 'm2crypto', silent=True)
distro_constraints = _get_distro_pip_constraints(session, transport)
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
'--transport={}'.format(transport)
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def pytest(session, coverage):
'''
pytest session with zeromq transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp')
@nox.parametrize('coverage', [False, True])
def pytest_tcp(session, coverage):
'''
pytest session with TCP transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq(session, coverage):
'''
pytest session with zeromq transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_m2crypto(session, coverage):
'''
pytest session with zeromq transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_m2crypto(session, coverage):
'''
pytest session with TCP transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_m2crypto(session, coverage):
'''
pytest session with zeromq transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_pycryptodomex(session, coverage):
'''
pytest session with zeromq transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_pycryptodomex(session, coverage):
'''
pytest session with TCP transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_pycryptodomex(session, coverage):
'''
pytest session with zeromq transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-cloud')
@nox.parametrize('coverage', [False, True])
def pytest_cloud(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq')
pydir = _get_pydir(session)
cloud_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', pydir, 'cloud.txt')
session.install('--progress-bar=off', '-r', cloud_requirements, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
os.path.join(REPO_ROOT, 'tests', 'integration', 'cloud', 'providers')
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tornado')
@nox.parametrize('coverage', [False, True])
def pytest_tornado(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
] + session.posargs
_pytest(session, coverage, cmd_args)
def _pytest(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
try:
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'py.test', *cmd_args)
else:
session.run('py.test', *cmd_args)
except CommandFailed:
# Re-run failed tests
session.log('Re-running failed tests')
cmd_args.append('--lf')
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'py.test', *cmd_args)
else:
session.run('py.test', *cmd_args)
def _lint(session, rcfile, flags, paths):
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', '-r', 'requirements/static/{}/lint.txt'.format(_get_pydir(session)), silent=PIP_INSTALL_SILENT)
session.run('pylint', '--version')
pylint_report_path = os.environ.get('PYLINT_REPORT')
cmd_args = [
'pylint',
'--rcfile={}'.format(rcfile)
] + list(flags) + list(paths)
stdout = tempfile.TemporaryFile(mode='w+b')
lint_failed = False
try:
session.run(*cmd_args, stdout=stdout)
except CommandFailed:
lint_failed = True
raise
finally:
stdout.seek(0)
contents = stdout.read()
if contents:
if IS_PY3:
contents = contents.decode('utf-8')
else:
contents = contents.encode('utf-8')
sys.stdout.write(contents)
sys.stdout.flush()
if pylint_report_path:
# Write report
with open(pylint_report_path, 'w') as wfh:
wfh.write(contents)
session.log('Report file written to %r', pylint_report_path)
stdout.close()
@nox.session(python='2.7')
def lint(session):
'''
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
'''
session.notify('lint-salt-{}'.format(session.python))
session.notify('lint-tests-{}'.format(session.python))
@nox.session(python='2.7', name='lint-salt')
def lint_salt(session):
'''
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
'''
flags = [
'--disable=I,W1307,C0411,C0413,W8410,str-format-in-logging'
]
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'salt/']
_lint(session, '.testing.pylintrc', flags, paths)
@nox.session(python='2.7', name='lint-tests')
def lint_tests(session):
'''
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
'''
flags = [
'--disable=I,W0232,E1002,W1307,C0411,C0413,W8410,str-format-in-logging'
]
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint(session, '.testing.pylintrc', flags, paths)
@nox.session(python='3')
def docs(session):
'''
Build Salt's Documentation
'''
pydir = _get_pydir(session)
if pydir == 'py3.4':
session.error('Sphinx only runs on Python >= 3.5')
session.install(
'--progress-bar=off',
'-r', 'requirements/static/{}/docs.txt'.format(pydir),
silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
session.run('make', 'clean', external=True)
session.run('make', 'html', 'SPHINXOPTS=-W', external=True)
session.run('tar', '-czvf', 'doc-archive.tar.gz', '_build/html')
os.chdir('..')
|
the-stack_0_14024 | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cudf
import cupy as cp
import numpy as np
from pandas import DataFrame as pdDF
from cuml.common import input_to_cuml_array, CumlArray
from cuml.common import input_to_host_array
from cuml.common import has_cupy
from cuml.common.input_utils import convert_dtype
from cuml.common.memory_utils import _check_array_contiguity
from numba import cuda as nbcuda
###############################################################################
# Parameters #
###############################################################################
test_dtypes_all = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64
]
test_dtypes_acceptable = [
np.float32, np.float64
]
test_input_types = [
'numpy', 'numba', 'cupy', 'cudf', 'pandas', 'cuml'
]
test_num_rows = [1, 100]
test_num_cols = [1, 100]
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('order', ['C', 'F', 'K'])
def test_input_to_cuml_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(input_type, num_rows, num_cols,
dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
X, n_rows, n_cols, res_dtype = input_to_cuml_array(input_data,
order=order)
np.testing.assert_equal(X.to_output('numpy'), real_data)
assert n_rows == num_rows == X.shape[0] == len(X)
assert n_cols == num_cols == X.shape[1]
assert dtype == res_dtype == X.dtype
del input_data
del real_data
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', ['numba', 'cupy'])
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('order_check', ['C', 'F'])
def test_fail_on_order(dtype, input_type, order, order_check):
# this is tested only for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
if order == order_check:
input_to_cuml_array(input_data, fail_on_order=False, order=order)
else:
with pytest.raises(ValueError):
input_to_cuml_array(input_data, fail_on_order=True,
order=order_check)
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('from_order', ['C', 'F'])
@pytest.mark.parametrize('to_order', ['C', 'F', 'K'])
def test_convert_matrix_order_cuml_array(dtype, input_type, from_order,
to_order):
input_data, real_data = get_input(input_type, 10, 10, dtype,
order=from_order)
# conv_data = np.array(real_data, order=to_order, copy=True)
if from_order == to_order or to_order == 'K':
conv_data, *_ = input_to_cuml_array(input_data, fail_on_order=False,
order=to_order)
else:
# Warning is raised for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
if input_type in ['numpy', 'cupy', 'numba']:
# with pytest.warns(UserWarning):
# warning disabled due to using cuml logger, need to
# adapt tests for that.
conv_data, *_ = input_to_cuml_array(input_data,
fail_on_order=False,
order=to_order)
else:
conv_data, *_ = input_to_cuml_array(input_data,
fail_on_order=False,
order=to_order)
if to_order == 'K':
if input_type in ['cudf']:
assert conv_data.order == 'F'
elif input_type in ['pandas']:
assert conv_data.order == 'C'
else:
assert conv_data.order == from_order
else:
assert conv_data.order == to_order
np.testing.assert_equal(real_data, conv_data.to_output('numpy'))
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('shape', [(1, 10), (10, 1)])
@pytest.mark.parametrize('from_order', ['C', 'F'])
@pytest.mark.parametrize('to_order', ['C', 'F', 'K'])
def test_convert_vector_order_cuml_array(dtype, input_type, shape, from_order,
to_order):
input_data, real_data = get_input(input_type, shape[0], shape[1], dtype,
order=from_order)
# conv_data = np.array(real_data, order=to_order, copy=True)
conv_data, *_ = input_to_cuml_array(input_data, fail_on_order=False,
order=to_order)
np.testing.assert_equal(real_data, conv_data.to_output('numpy'))
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_input_to_host_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(input_type, num_rows, num_cols, dtype,
order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
X, X_ptr, n_rows, n_cols, dtype = input_to_host_array(input_data,
order=order)
np.testing.assert_equal(X, real_data)
assert n_rows == num_rows
assert n_cols == num_cols
assert dtype == dtype
del input_data
del real_data
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('check_dtype', test_dtypes_all)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_dtype_check(dtype, check_dtype, input_type, order):
if (dtype == np.float16 or check_dtype == np.float16)\
and input_type != 'numpy':
pytest.xfail("float16 not yet supported by numba/cuDF")
if dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type in ['cudf', 'pandas']:
pytest.xfail("unsigned int types not yet supported")
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
if dtype == check_dtype:
_, _, _, got_dtype = \
input_to_cuml_array(input_data, check_dtype=check_dtype,
order=order)
assert got_dtype == check_dtype
else:
with pytest.raises(TypeError):
_, _, _, got_dtype = \
input_to_cuml_array(input_data, check_dtype=check_dtype,
order=order)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('to_dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('from_dtype', test_dtypes_all)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_convert_input_dtype(from_dtype, to_dtype, input_type, num_rows,
num_cols, order):
if from_dtype == np.float16 and input_type != 'numpy':
pytest.xfail("float16 not yet supported by numba/cuDF")
if from_dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type == 'cudf':
pytest.xfail("unsigned int types not yet supported by \
cuDF")
elif not has_cupy():
pytest.xfail("unsigned int types not yet supported by \
cuDF and cuPy is not installed.")
input_data, real_data = get_input(input_type, num_rows, num_cols,
from_dtype, out_dtype=to_dtype,
order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
converted_data = convert_dtype(input_data, to_dtype=to_dtype)
if input_type == 'numpy':
np.testing.assert_equal(converted_data, real_data)
elif input_type == 'cudf':
np.testing.assert_equal(converted_data.as_matrix(), real_data)
elif input_type == 'pandas':
np.testing.assert_equal(converted_data.to_numpy(), real_data)
else:
np.testing.assert_equal(converted_data.copy_to_host(), real_data)
if from_dtype == to_dtype:
check_ptr(converted_data, input_data, input_type)
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', ['numpy', 'cupy'])
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('contiguous', [True, False])
@pytest.mark.parametrize('force_contiguous', [True, False])
def test_non_contiguous_to_contiguous_input(dtype, input_type, order,
contiguous, force_contiguous):
input_data, real_data = get_input(input_type, 10, 8, dtype,
order=order)
if not contiguous:
if order == 'F':
data_view = input_data[:-3]
real_data = real_data[:-3]
else:
data_view = input_data[:, :-3]
real_data = real_data[:, :-3]
else:
data_view = input_data
cumlary, *_ = input_to_cuml_array(data_view,
force_contiguous=force_contiguous)
if force_contiguous:
assert(_check_array_contiguity(cumlary))
np.testing.assert_equal(real_data, cumlary.to_output('numpy'))
###############################################################################
# Utility Functions #
###############################################################################
def check_numpy_order(ary, order):
if order == 'F':
return ary.flags.f_contiguous
else:
return ary.flags.c_contiguous
def check_ptr(a, b, input_type):
if input_type == 'cudf':
for (_, col_a), (_, col_b) in zip(a._data.items(), b._data.items()):
assert col_a.base_data.ptr == col_b.base_data.ptr
else:
def get_ptr(x):
try:
return x.__cuda_array_interface__['data'][0]
except AttributeError:
return x.__array_interface__['data'][0]
if input_type == 'pandas':
a = a.values
b = b.values
assert get_ptr(a) == get_ptr(b)
def get_input(type, nrows, ncols, dtype, order='C', out_dtype=False):
rand_mat = (cp.random.rand(nrows, ncols) * 10)
rand_mat = cp.array(rand_mat, dtype=dtype, order=order)
if type == 'numpy':
result = np.array(cp.asnumpy(rand_mat), order=order)
if type == 'cupy':
result = rand_mat
if type == 'numba':
result = nbcuda.as_cuda_array(rand_mat)
if type == 'cudf':
result = cudf.DataFrame(rand_mat)
if type == 'pandas':
result = pdDF(cp.asnumpy(rand_mat))
if type == 'cuml':
result = CumlArray(data=rand_mat)
if out_dtype:
return result, np.array(cp.asnumpy(rand_mat).astype(out_dtype),
order=order)
else:
return result, np.array(cp.asnumpy(rand_mat), order=order)
|
the-stack_0_14026 | # -*- coding: utf-8 -*-
#
# websockets documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 31 20:48:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('..'), 'src'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append('sphinxcontrib.spelling')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'websockets'
copyright = '2013-{}, Aymeric Augustin'.format(datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.0'
# The full version, including alpha/beta/rc tags.
release = '7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'websockets.svg',
'description': 'A library for building WebSocket servers and clients in Python with a focus on correctness and simplicity.',
'github_button': True,
'github_user': 'aaugustin',
'github_repo': 'websockets',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'websocketsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'websockets.tex', 'websockets Documentation',
'Aymeric Augustin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'websockets', 'websockets Documentation',
['Aymeric Augustin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'websockets', 'websockets Documentation',
'Aymeric Augustin', 'websockets', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3/': None}
|
the-stack_0_14028 | from sqlalchemy.exc import IntegrityError
from .. import auth
from ..base_view import BaseView
from ..collaborator.models import Collaborator
from ..department.models import Department
from ..dependent.models import Dependent
from ..dependent.schemas import DependentsSchema
from .schemas import CollaboratorSchema
class CollaboratorView(BaseView):
schema = CollaboratorSchema
model = Collaborator
@auth.login_required
def get(self, id=None):
if id:
collaborator = self.model.query.filter_by(id=id).first_or_404(
"Collaborator with id not found"
)
return self.jsonify(collaborator), 200
return self.jsonify(self.model.query.all(), many=True), 200
@auth.login_required
def post(self):
try:
super(CollaboratorView, self).post()
data = self.get_data()
department = data.pop("department")
department = Department.query.filter_by(name=department).first_or_404(
"Department with name not found"
)
data["department"] = department
collaborator = self.model(**data)
collaborator.save()
return self.jsonify(collaborator), 201
except IntegrityError:
self.abort(400, "Collaborator already exists")
@auth.login_required
def delete(self, id=None):
collaborator = (
self.model().query.filter_by(id=id).first_or_404("Collaborator not found")
)
collaborator.delete()
return super(CollaboratorView, self).delete()
@auth.login_required
def put(self, id=None):
try:
super(CollaboratorView, self).put()
collaborator = (
self.model()
.query.filter_by(id=id)
.first_or_404("Collaborator not found")
)
data = self.get_data(partial=True)
department = data.get("department")
if department:
department_instance = Department.query.filter_by(
name=department
).first_or_404("Department not found")
collaborator.department = department_instance
collaborator.full_name = data.get("full_name", collaborator.full_name)
collaborator.save()
return self.jsonify(collaborator), 200
except IntegrityError:
self.abort(400, "Collaborator already exists")
class CollaboratorDependents(BaseView):
model = Dependent
@auth.login_required
def post(self, id=None):
collaborator = Collaborator.query.filter_by(id=id).first_or_404(
"Collaborator with id not found"
)
schema = DependentsSchema()
data = self.get_data(schema)
data["collaborator"] = collaborator
dependent = self.model(**data)
dependent.save()
return schema.jsonify(dependent), 201
|
the-stack_0_14029 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _saturation
class Filter(BaseFilter):
@filter_method(BaseFilter.DecimalNumber)
def saturation(self, change):
mode, data = self.engine.image_data_as_rgb()
imgdata = _saturation.apply(mode, change, data)
self.engine.set_image_data(imgdata)
|
the-stack_0_14033 | import pyrogram
import asyncio
import os
from pyrogram import Client, filters
from pyrogram.types import Message, User, InlineKeyboardMarkup, InlineKeyboardButton
from donlee_robot.donlee_robot import DonLee_Robot
from config import FORCE_CHANNEL, SAVE_USER, DEV_USERNAME, WELCOME_BUTTON_NAME, CUSTOM_WELCOME_TEXT, CUSTOM_WELCOME
# f"👋Hy {mention} Welcome To {groupname}"
Url = f"t.me/{FORCE_CHANNEL}"
WELCOME_BUTTONS = [[ InlineKeyboardButton(WELCOME_BUTTON_NAME, url=Url)]]
@DonLee_Robot.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Rᴇᴘʟɪᴇᴅ Usᴇʀ ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Yᴏᴜʀ ID : `{user_id}`\nTʜɪs Gʀᴏᴜᴘ ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@DonLee_Robot.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>👨💼Nᴀᴍᴇ</b> : {name}\n\n"
f"<b>📃Usᴇʀ ID</b> : <code>{id}</code>\n\n"
f"<b>👤Usᴇʀɴᴀᴍᴇ</b> : {user_name}\n\n"
f"<b>🔐Pᴇʀᴍᴀɴᴀɴᴛ USER ʟɪɴᴋ</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>📑DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@DonLee_Robot.on_message(filters.group & filters.forwarded)
async def forward(bot, message):
await message.delete()
@DonLee_Robot.on_message(filters.group & filters.via_bot)
async def inline(bot, message):
await message.delete()
@DonLee_Robot.on_message(filters.new_chat_members)
async def auto_welcome(bot: DonLee_Robot, msg: Message):
# from PR0FESS0R-99 import Auto-Welcome-Bot
# from PR0FESS0R-99 import ID-Bot
# first = msg.from_user.first_name
# last = msg.from_user.last_name
# mention = msg.from_user.mention
# username = msg.from_user.username
# id = msg.from_user.id
# group_name = msg.chat.title
# group_username = msg.chat.username
# button_name = os.environ.get("WELCOME_BUTTON_NAME", name_button)
# button_link = os.environ.get("WELCOME_BUTTON_LINK", link_button)
# welcome_text = f"Hey {mention}\nWelcome To {group_name}"
# WELCOME_TEXT = os.environ.get("WELCOME_TEXT", welcome_text)
print("Welcome Message Activate")
# YES = "True"
# NO = "False"
# HOOOO = CUSTOM_WELCOME
# BUTTON = bool(os.environ.get("CUSTOM_WELCOME"))
if CUSTOM_WELCOME == "yes":
Auto_Delete=await msg.reply_text(text=CUSTOM_WELCOME_TEXT.format(
mention = msg.from_user.mention,
groupname = msg.chat.title
),
reply_markup=InlineKeyboardMarkup(WELCOME_BUTTONS)
)
await asyncio.sleep(60) # in seconds
await Auto_Delete.delete()
else:
await msg.delete()
@DonLee_Robot.on_message((filters.command(["report"]) | filters.regex("@admins") | filters.regex("@admin")) & filters.group)
async def report(bot, message):
if message.reply_to_message:
chat_id = message.chat.id
reporter = str(message.from_user.id)
mention = message.from_user.mention
admins = await bot.get_chat_members(chat_id=chat_id, filter="administrators")
success = False
report = f"Reporter : {mention} ({reporter})" + "\n"
report += f"Message : {message.reply_to_message.link}"
for admin in admins:
try:
reported_post = await message.reply_to_message.forward(admin.user.id)
await reported_post.reply_text(
text=report,
chat_id=admin.user.id,
disable_web_page_preview=True
)
success = True
except:
pass
if success:
await message.reply_text("**Reported to Admins!**")
@DonLee_Robot.on_message(filters.command(["ban"]))
async def ban(bot, message):
chatid = message.chat.id
if message.reply_to_message:
admins_list = await bot.get_chat_members(
chat_id=chatid, filter="administrators"
)
admins = []
for admin in admins_list:
id = admin.user.id
admins.append(id)
userid = message.from_user.id
if userid in admins:
user_to_ban = message.reply_to_message.from_user.id
if user_to_ban in admins:
await message.reply(text="Tʜɪɴᴋ ʜᴇ ɪs Aᴅᴍɪɴ, Cᴀɴ'ᴛ Bᴀɴ Aᴅᴍɪɴs")
else:
try:
await bot.kick_chat_member(chat_id=chatid, user_id=user_to_ban)
await message.reply_text(
f"Bye {message.reply_to_message.from_user.mention}"
)
except Exception as error:
await message.reply_text(f"{error}")
else:
await message.reply_text("Nɪᴄᴇ ᴛʀʏ, Bᴜᴛ ᴡʀᴏɴɢ ᴍᴏᴠᴇ..")
return
else:
return
@DonLee_Robot.on_message(filters.command(["unban"]))
async def ban(bot, message):
chatid = message.chat.id
if message.reply_to_message:
admins_list = await bot.get_chat_members(
chat_id=chatid,
filter="administrators"
)
admins = []
for admin in admins_list:
id = admin.user.id
admins.append(id)
userid = message.from_user.id
if userid in admins:
user_to_ban = message.reply_to_message.from_user.id
if user_to_unban in admins:
await message.reply(text="Tʜɪɴᴋ ʜᴇ ɪs Aᴅᴍɪɴ, Cᴀɴ'ᴛ Bᴀɴ Aᴅᴍɪɴs")
else:
try:
await bot.unban_chat_member(chat_id=chatid, user_id=user_to_unban)
await message.reply_text(
f"welcome {message.reply_to_message.from_user.mention}"
)
except Exception as error:
await message.reply_text(f"{error}")
else:
await message.reply_text("Nɪᴄᴇ ᴛʀʏ, Bᴜᴛ ᴡʀᴏɴɢ ᴍᴏᴠᴇ..")
return
else:
return
@DonLee_Robot.on_message(filters.channel & filters.text | filters.media )
async def tag(client, message):
await message.copy(message.chat.id)
|
the-stack_0_14034 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dependency(Model):
"""Deployment dependency information.
:param depends_on: Gets the list of dependencies.
:type depends_on: list of :class:`BasicDependency
<Default.models.BasicDependency>`
:param id: Gets or sets the ID of the dependency.
:type id: str
:param resource_type: Gets or sets the dependency resource type.
:type resource_type: str
:param resource_name: Gets or sets the dependency resource name.
:type resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(self, depends_on=None, id=None, resource_type=None, resource_name=None):
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
|
the-stack_0_14037 | # Given a binary search tree and a node in it, find the in-order successor of that node in the BST.
#
# The successor of a node p is the node with the smallest key greater than p.val.
#
# Input: root = [2, 1, 3], p = 1
# Output: 2
# Explanation: 1
# 's in-order successor node is 2. Note that both p and the return value is of TreeNode type.
#
# Input: root = [5,3,6,2,4,null,null,1], p = 6
# Output: null
# Explanation: There is no in-order successor of the current node, so the answer is null.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def inorderSuccessor(self, root, p):
if p.right:
curr = p.right
while curr.left:
curr = curr.left
return curr
stack, inorder = [], float("inf")
while stack or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if inorder == p.val:
return root
inorder = root.val
root = root.right
return None
|
the-stack_0_14038 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import sys
from ly_test_tools.o3de.editor_test import EditorTestSuite
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from Tools.LyTestTools.ly_test_tools.environment import process_utils
from Tools.LyTestTools.ly_test_tools.launchers import launcher_helper
from Tools.LyTestTools.ly_test_tools.log.log_monitor import LogMonitor
import Tools.LyTestTools.ly_test_tools.environment.waiter as waiter
@pytest.mark.SUITE_sandbox
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite):
def test_Multiplayer_SimpleGameServerLauncher_ConnectsSuccessfully(self, workspace, launcher_platform):
unexpected_lines = []
expected_lines = ["New outgoing connection to remote address:"]
halt_on_unexpected = False
timeout = 180
# Start the AutomatedTesting.ServerLauncher.exe in hosting mode, no rendering mode, and wait for it to exist
server_launcher = launcher_helper.create_server_launcher(workspace)
server_launcher.args.extend(['+host', '-rhi=Null'])
server_launcher.start()
waiter.wait_for(lambda: process_utils.process_exists(f"AutomatedTesting.ServerLauncher.exe", ignore_extensions=True))
# Start the AutomatedTesting.GameLauncher.exe in client mode, no rendering mode, and wait for it to exist
game_launcher = launcher_helper.create_game_launcher(workspace)
game_launcher.args.extend(['+connect', '-rhi=Null'])
game_launcher.start()
waiter.wait_for(lambda: process_utils.process_exists(f"AutomatedTesting.GameLauncher.exe", ignore_extensions=True))
# Verify that the GameLauncher.exe was able to connect to the ServerLauncher.exe by checking the logs
game_launcher_log_file = os.path.join(game_launcher.workspace.paths.project_log(), 'Game.log')
game_launcher_log_monitor = LogMonitor(game_launcher, game_launcher_log_file)
game_launcher_log_monitor.monitor_log_for_lines(expected_lines, unexpected_lines, halt_on_unexpected, timeout)
|
the-stack_0_14039 | from sympy.core.expr import unchanged
from sympy.sets import (ConditionSet, Intersection, FiniteSet,
EmptySet, Union, Contains, ImageSet)
from sympy.core.function import (Function, Lambda)
from sympy.core.mod import Mod
from sympy.core.numbers import (oo, pi)
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.trigonometric import (asin, sin)
from sympy.logic.boolalg import And
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.sets.sets import Interval
from sympy.testing.pytest import raises, warns_deprecated_sympy
w = Symbol('w')
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = Function('f')
def test_CondSet():
sin_sols_principal = ConditionSet(x, Eq(sin(x), 0),
Interval(0, 2*pi, False, True))
assert pi in sin_sols_principal
assert pi/2 not in sin_sols_principal
assert 3*pi not in sin_sols_principal
assert oo not in sin_sols_principal
assert 5 in ConditionSet(x, x**2 > 4, S.Reals)
assert 1 not in ConditionSet(x, x**2 > 4, S.Reals)
# in this case, 0 is not part of the base set so
# it can't be in any subset selected by the condition
assert 0 not in ConditionSet(x, y > 5, Interval(1, 7))
# since 'in' requires a true/false, the following raises
# an error because the given value provides no information
# for the condition to evaluate (since the condition does
# not depend on the dummy symbol): the result is `y > 5`.
# In this case, ConditionSet is just acting like
# Piecewise((Interval(1, 7), y > 5), (S.EmptySet, True)).
raises(TypeError, lambda: 6 in ConditionSet(x, y > 5,
Interval(1, 7)))
X = MatrixSymbol('X', 2, 2)
matrix_set = ConditionSet(X, Eq(X*Matrix([[1, 1], [1, 1]]), X))
Y = Matrix([[0, 0], [0, 0]])
assert matrix_set.contains(Y).doit() is S.true
Z = Matrix([[1, 2], [3, 4]])
assert matrix_set.contains(Z).doit() is S.false
assert isinstance(ConditionSet(x, x < 1, {x, y}).base_set,
FiniteSet)
raises(TypeError, lambda: ConditionSet(x, x + 1, {x, y}))
raises(TypeError, lambda: ConditionSet(x, x, 1))
I = S.Integers
U = S.UniversalSet
C = ConditionSet
assert C(x, False, I) is S.EmptySet
assert C(x, True, I) is I
assert C(x, x < 1, C(x, x < 2, I)
) == C(x, (x < 1) & (x < 2), I)
assert C(y, y < 1, C(x, y < 2, I)
) == C(x, (x < 1) & (y < 2), I), C(y, y < 1, C(x, y < 2, I))
assert C(y, y < 1, C(x, x < 2, I)
) == C(y, (y < 1) & (y < 2), I)
assert C(y, y < 1, C(x, y < x, I)
) == C(x, (x < 1) & (y < x), I)
assert unchanged(C, y, x < 1, C(x, y < x, I))
assert ConditionSet(x, x < 1).base_set is U
# arg checking is not done at instantiation but this
# will raise an error when containment is tested
assert ConditionSet((x,), x < 1).base_set is U
c = ConditionSet((x, y), x < y, I**2)
assert (1, 2) in c
assert (1, pi) not in c
raises(TypeError, lambda: C(x, x > 1, C((x, y), x > 1, I**2)))
# signature mismatch since only 3 args are accepted
raises(TypeError, lambda: C((x, y), x + y < 2, U, U))
def test_CondSet_intersect():
input_conditionset = ConditionSet(x, x**2 > 4, Interval(1, 4, False,
False))
other_domain = Interval(0, 3, False, False)
output_conditionset = ConditionSet(x, x**2 > 4, Interval(
1, 3, False, False))
assert Intersection(input_conditionset, other_domain
) == output_conditionset
def test_issue_9849():
assert ConditionSet(x, Eq(x, x), S.Naturals
) is S.Naturals
assert ConditionSet(x, Eq(Abs(sin(x)), -1), S.Naturals
) == S.EmptySet
def test_simplified_FiniteSet_in_CondSet():
assert ConditionSet(x, And(x < 1, x > -3), FiniteSet(0, 1, 2)
) == FiniteSet(0)
assert ConditionSet(x, x < 0, FiniteSet(0, 1, 2)) == EmptySet
assert ConditionSet(x, And(x < -3), EmptySet) == EmptySet
y = Symbol('y')
assert (ConditionSet(x, And(x > 0), FiniteSet(-1, 0, 1, y)) ==
Union(FiniteSet(1), ConditionSet(x, And(x > 0), FiniteSet(y))))
assert (ConditionSet(x, Eq(Mod(x, 3), 1), FiniteSet(1, 4, 2, y)) ==
Union(FiniteSet(1, 4), ConditionSet(x, Eq(Mod(x, 3), 1),
FiniteSet(y))))
def test_free_symbols():
assert ConditionSet(x, Eq(y, 0), FiniteSet(z)
).free_symbols == {y, z}
assert ConditionSet(x, Eq(x, 0), FiniteSet(z)
).free_symbols == {z}
assert ConditionSet(x, Eq(x, 0), FiniteSet(x, z)
).free_symbols == {x, z}
assert ConditionSet(x, Eq(x, 0), ImageSet(Lambda(y, y**2),
S.Integers)).free_symbols == set()
def test_bound_symbols():
assert ConditionSet(x, Eq(y, 0), FiniteSet(z)
).bound_symbols == [x]
assert ConditionSet(x, Eq(x, 0), FiniteSet(x, y)
).bound_symbols == [x]
assert ConditionSet(x, x < 10, ImageSet(Lambda(y, y**2), S.Integers)
).bound_symbols == [x]
assert ConditionSet(x, x < 10, ConditionSet(y, y > 1, S.Integers)
).bound_symbols == [x]
def test_as_dummy():
_0, _1 = symbols('_0 _1')
assert ConditionSet(x, x < 1, Interval(y, oo)
).as_dummy() == ConditionSet(_0, _0 < 1, Interval(y, oo))
assert ConditionSet(x, x < 1, Interval(x, oo)
).as_dummy() == ConditionSet(_0, _0 < 1, Interval(x, oo))
assert ConditionSet(x, x < 1, ImageSet(Lambda(y, y**2), S.Integers)
).as_dummy() == ConditionSet(
_0, _0 < 1, ImageSet(Lambda(_0, _0**2), S.Integers))
e = ConditionSet((x, y), x <= y, S.Reals**2)
assert e.bound_symbols == [x, y]
assert e.as_dummy() == ConditionSet((_0, _1), _0 <= _1, S.Reals**2)
assert e.as_dummy() == ConditionSet((y, x), y <= x, S.Reals**2
).as_dummy()
def test_subs_CondSet():
s = FiniteSet(z, y)
c = ConditionSet(x, x < 2, s)
assert c.subs(x, y) == c
assert c.subs(z, y) == ConditionSet(x, x < 2, FiniteSet(y))
assert c.xreplace({x: y}) == ConditionSet(y, y < 2, s)
assert ConditionSet(x, x < y, s
).subs(y, w) == ConditionSet(x, x < w, s.subs(y, w))
# if the user uses assumptions that cause the condition
# to evaluate, that can't be helped from SymPy's end
n = Symbol('n', negative=True)
assert ConditionSet(n, 0 < n, S.Integers) is S.EmptySet
p = Symbol('p', positive=True)
assert ConditionSet(n, n < y, S.Integers
).subs(n, x) == ConditionSet(n, n < y, S.Integers)
raises(ValueError, lambda: ConditionSet(
x + 1, x < 1, S.Integers))
assert ConditionSet(
p, n < x, Interval(-5, 5)).subs(x, p) == Interval(-5, 5), ConditionSet(
p, n < x, Interval(-5, 5)).subs(x, p)
assert ConditionSet(
n, n < x, Interval(-oo, 0)).subs(x, p
) == Interval(-oo, 0)
assert ConditionSet(f(x), f(x) < 1, {w, z}
).subs(f(x), y) == ConditionSet(f(x), f(x) < 1, {w, z})
# issue 17341
k = Symbol('k')
img1 = ImageSet(Lambda(k, 2*k*pi + asin(y)), S.Integers)
img2 = ImageSet(Lambda(k, 2*k*pi + asin(S.One/3)), S.Integers)
assert ConditionSet(x, Contains(
y, Interval(-1,1)), img1).subs(y, S.One/3).dummy_eq(img2)
assert (0, 1) in ConditionSet((x, y), x + y < 3, S.Integers**2)
raises(TypeError, lambda: ConditionSet(n, n < -10, Interval(0, 10)))
def test_subs_CondSet_tebr():
with warns_deprecated_sympy():
assert ConditionSet((x, y), {x + 1, x + y}, S.Reals**2) == \
ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
def test_dummy_eq():
C = ConditionSet
I = S.Integers
c = C(x, x < 1, I)
assert c.dummy_eq(C(y, y < 1, I))
assert c.dummy_eq(1) == False
assert c.dummy_eq(C(x, x < 1, S.Reals)) == False
c1 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
c2 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
c3 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Complexes**2)
assert c1.dummy_eq(c2)
assert c1.dummy_eq(c3) is False
assert c.dummy_eq(c1) is False
assert c1.dummy_eq(c) is False
# issue 19496
m = Symbol('m')
n = Symbol('n')
a = Symbol('a')
d1 = ImageSet(Lambda(m, m*pi), S.Integers)
d2 = ImageSet(Lambda(n, n*pi), S.Integers)
c1 = ConditionSet(x, Ne(a, 0), d1)
c2 = ConditionSet(x, Ne(a, 0), d2)
assert c1.dummy_eq(c2)
def test_contains():
assert 6 in ConditionSet(x, x > 5, Interval(1, 7))
assert (8 in ConditionSet(x, y > 5, Interval(1, 7))) is False
# `in` should give True or False; in this case there is not
# enough information for that result
raises(TypeError,
lambda: 6 in ConditionSet(x, y > 5, Interval(1, 7)))
# here, there is enough information but the comparison is
# not defined
raises(TypeError, lambda: 0 in ConditionSet(x, 1/x >= 0, S.Reals))
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(6) == (y > 5)
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(8) is S.false
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(w) == And(Contains(w, Interval(1, 7)), y > 5)
# This returns an unevaluated Contains object
# because 1/0 should not be defined for 1 and 0 in the context of
# reals.
assert ConditionSet(x, 1/x >= 0, S.Reals).contains(0) == \
Contains(0, ConditionSet(x, 1/x >= 0, S.Reals), evaluate=False)
c = ConditionSet((x, y), x + y > 1, S.Integers**2)
assert not c.contains(1)
assert c.contains((2, 1))
assert not c.contains((0, 1))
c = ConditionSet((w, (x, y)), w + x + y > 1, S.Integers*S.Integers**2)
assert not c.contains(1)
assert not c.contains((1, 2))
assert not c.contains(((1, 2), 3))
assert not c.contains(((1, 2), (3, 4)))
assert c.contains((1, (3, 4)))
def test_as_relational():
assert ConditionSet((x, y), x > 1, S.Integers**2).as_relational((x, y)
) == (x > 1) & Contains((x, y), S.Integers**2)
assert ConditionSet(x, x > 1, S.Integers).as_relational(x
) == Contains(x, S.Integers) & (x > 1)
def test_flatten():
"""Tests whether there is basic denesting functionality"""
inner = ConditionSet(x, sin(x) + x > 0)
outer = ConditionSet(x, Contains(x, inner), S.Reals)
assert outer == ConditionSet(x, sin(x) + x > 0, S.Reals)
inner = ConditionSet(y, sin(y) + y > 0)
outer = ConditionSet(x, Contains(y, inner), S.Reals)
assert outer != ConditionSet(x, sin(x) + x > 0, S.Reals)
inner = ConditionSet(x, sin(x) + x > 0).intersect(Interval(-1, 1))
outer = ConditionSet(x, Contains(x, inner), S.Reals)
assert outer == ConditionSet(x, sin(x) + x > 0, Interval(-1, 1))
def test_duplicate():
from sympy.core.function import BadSignatureError
# test coverage for line 95 in conditionset.py, check for duplicates in symbols
dup = symbols('a,a')
raises(BadSignatureError, lambda: ConditionSet(dup, x < 0))
|
the-stack_0_14040 | class Piece:
def __init__(self, piece_type, piece_colour, piece_name, xy = None):
assert piece_colour.lower() in ['black', 'white'], 'Invalid colour'
assert piece_type.lower() in ['pawn', 'bishop', 'rook', 'knight', 'king', 'queen'], 'Invalid piece_type'
self.type = piece_type
self.colour = piece_colour
self.name = piece_name
if xy is None:
print('Warning : xy initialised as None')
else:
xy = parse_xy(xy)
assert xy[0] in range(8) and xy[1] in range(8), 'Piece location out of range'
self.xy = xy
self.open_to_passant = False
self.peace_moves = None
self.kill_moves = None
def set_xy(self, xy):
xy = parse_xy(xy)
assert x in range(8) and y in range(8), 'Piece location out of range'
if self.type == 'pawn':
if self.colour == 'white' and self.xy[0] == 1 and xy[0] == 3:
self.open_to_passant = True
elif self.colour == 'black' and self.xy[0] == 6 and xy[0] == 4:
self.open_to_passant = True
else:
self.open_to_passant = False
self.xy = xy
def update_Moves(self, Board):
x, y = self.xy
peace_moves, kill_moves = [], []
move_functions_dict = get_move_functions(self.type)
if self.type == 'pawn':
if self.colour == 'white':
peace_moves.append((x + 1, y))
kill_moves += [(x + 1, y + 1), (x + 1, y - 1)]
if x == 1:
peace_moves.append((x + 2, y))
else:
peace_moves.append((x - 1, y))
kill_moves += [(x - 1, y + 1), (x - 1, y - 1)]
if x == 6:
peace_moves.append((x - 2, y))
peace_moves = [xy for xy in peace_moves if Board.is_peace_Space(xy)]
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type == 'knight':
peace_moves = [xy for xy in list(zip( [x+2, x+2, x+1 , x+1, x-1, x-1, x-2, x-2],
[y+1, y-1, y+2, y-2, y+2, y-2, y+1, y-1]))
if Board.is_peace_Space(xy)]
kill_moves = list(zip( [x+2, x+2, x+1 , x+1, x-1, x-1, x-2, x-2],
[y+1, y-1, y+2, y-2, y+2, y-2, y+1, y-1]))
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type == 'king':
peace_moves = [xy for xy in list(zip( [x , x , x+1, x+1, x+1, x-1, x-1, x-1],
[y+1, y-1, y , y+1, y-1, y , y+1, y-1]))
if Board.is_peace_Space(xy)]
kill_moves = list(zip( [x , x , x+1, x+1, x+1, x-1, x-1, x-1],
[y+1, y-1, y , y+1, y-1, y , y+1, y-1]))
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type in ['bishop', 'queen', 'rook']:
for func in move_functions_dict[self.type]:
i = 1
new_xy = func((x, y, i))
while Board.is_peace_Space(new_xy) or Board.is_kill_Move(new_xy, current_xy = self.xy)[0]:
vals = Board.is_kill_Move(new_xy, current_xy = self.xy)
if vals[0]:
kill_moves.append(vals)
break
peace_moves += [new_xy]
i += 1
self.peace_moves = peace_moves
self.kill_moves = kill_moves
return peace_moves, kill_moves
def get_xy(self):
return self.xy
def get_peace_Moves(self, board_coords = True):
if board_coords:
return xy_to_board(self.peace_moves)
else:
return self.peace_moves
def get_kill_Moves(self, board_coords = True):
if board_coords:
return xy_to_board(self.kill_moves)
else:
return self.kill_moves
def __str__(self):
rep = 'Piece(' + str(self.name) + ') at ' + xy_to_board(self.xy)
return rep
def __repr__(self):
return self.__str__()
class Board:
def __init__(self, to_setup = False):
colours = ('black', 'white')
self.board = [[Space((i, j), colours[(i+j)%2]) for j in range(8)] for i in range(8)]
self.live_Pieces = {}
self.dead_Pieces = {}
if to_setup:
self.setup_Game()
def setup_Game(self):
colours = ('white', 'black')
pieces = ('rook', 'knight', 'bishop', 'queen', 'king', 'bishop', 'knight', 'rook')
for colour_no in range(len(colours)):
colour = colours[colour_no]
for y in range(8):
x = colour_no * 5 + 1
self.add_Piece((x, y), 'pawn', colour)
for y in range(len(pieces)):
x = colour_no*7
self.add_Piece((x, y), pieces[y], colour)
self.check_live_Pieces(correct = True)
def get_Space(self, xy):
x, y = parse_xy(xy, True)
return self.board[x][y]
def add_Piece(self, xy, piece_type, piece_colour):
xy = parse_xy(xy)
assert xy is not None, 'Invalid xy'
assert self.xy_is_empty(xy), 'Space not empty'
i = 0
piece_name = piece_colour + '_' + piece_type + str(i)
self.check_live_Pieces(correct = True)
while piece_name in self.live_Pieces.keys() or piece_name in self.dead_Pieces.keys():
i += 1
piece_name = piece_name[:-1] + str(i)
new_Piece = Piece(piece_type, piece_colour, piece_name, xy)
self.get_Space(xy).occupy(new_Piece)
self.live_Pieces[piece_name] = xy
def clear_Space(self, xy, dead = True):
xy = parse_xy(xy)
piece = self.get_Space(xy).get_Piece()
self.get_Space(xy).vacate()
if dead:
self.dead_Pieces[piece.name] = xy
del self.live_Pieces[piece.name]
def get_live_Pieces(self, update = False):
all_Pieces = {}
for row in self.board:
for Space in row:
Piece = Space.get_Piece()
if Piece is not None:
#print(type(Piece), Space)
i = 0
piece_name = Piece.name
all_Pieces[piece_name] = Space.xy
return all_Pieces
def check_live_Pieces(self, correct = False):
correct_live_Pieces = self.get_live_Pieces()
if self.live_Pieces == correct_live_Pieces:
return True
else:
#print("live_Pieces don't match")
if correct:
self.live_Pieces = correct_live_Pieces
print('corrected live_Pieces')
return False
def xy_on_board(self, xy):
return xy[0] in range(8) and xy[1] in range(8)
def xy_is_empty(self, xy):
return xy not in self.live_Pieces.values()
def is_peace_Space(self, xy):
xy = parse_xy(xy)
if xy is None:
#print('Destination xy is not on board')
return False
return self.xy_is_empty(xy)
def is_kill_Move(self, xy, current_xy, is_pawn = False):
xy = parse_xy(xy)
current_xy = parse_xy(current_xy)
if xy is None:
#print('Destination xy is not on board')
return False, None
if current_xy is None:
print('Invalid current_xy. There may be an error.')
return False, None
current_Piece = self.get_Space(current_xy).get_Piece()
if current_Piece is None:
return False, None
if not is_pawn:
opp_Piece = self.get_Space(xy).get_Piece()
if opp_Piece is None:
#print('No Piece at ' + str(xy))
return False, None
else:
if opp_Piece.colour == current_Piece.colour:
return False, None
else:
return xy, xy
else: # if pawn
opp_Piece = self.get_Space(xy).get_Piece()
"""assert ( xy[0] == current_xy[0] + 1 and
current_Piece.colour == 'white') or (
xy[0] == current_xy[0] - 1 and
current_Piece.colour == 'black')"""
x, y = xy
if opp_Piece is None:
if current_Piece.colour == 'white' and current_xy[0] == 4:
opp_Piece2 = self.board[x-1][y].get_Piece()
if opp_Piece2.type == 'pawn' and opp_Piece2.open_to_passant and opp_Piece2.colour == 'black':
return xy, (x-1, y)
elif current_Piece.colour == 'black' and current_xy[0] == 3:
opp_Piece2 = self.board[x+1][y].get_Piece()
if opp_Piece2.type == 'pawn' and opp_Piece2.open_to_passant and opp_Piece2.colour == 'white':
return xy, (x+1, y)
else:
return False, None
else:
if opp_Piece.colour == current_Piece.colour:
return False, None
else:
return xy, xy
def update_all_Moves(self):
self.check_live_Pieces(correct = True)
for piece_name, xy in self.live_Pieces.items():
print('checking moves for', piece_name)
self.get_Space(xy).get_Piece().update_Moves(self)
def get_Space(self, xy):
x, y = parse_xy(xy)
return self.board[x][y]
def move_Piece(self, xy_1, xy_2):
p = self.get_Space(xy_1).get_Piece()
self.get_Space(xy_1).vacate()
self.get_Space(xy_2).occupy(p)
def clear_Board(self):
self.__init__()
def __str__(self):
rep = '\t ' + '_'*79+ '\n'
breaker = ['\t|'+''.join([' |*********|' for i in range(4)]) + '\n' +
'\t|'+''.join(['_________|_________|' for i in range(4)]) + '\n',
'\t|'+''.join(['*********| |' for i in range(4)]) + '\n' +
'\t|'+''.join(['_________|_________|' for i in range(4)]) + '\n']
for i in range(len(self.board), 0, -1):
row = self.board[i-1]
rep_row = str(i) + '\t'
for j in range(len(row)):
Space = row[j]
if Space.held_by is not None:
rep_row += '| '+str(Space.held_by.colour[0] + ' ' + Space.held_by.type).ljust(8)
else:
rep_row += '| '+' '.ljust(8)
rep_row += '|\n'
rep += rep_row + breaker[i%2]
rep += ' \t '
rep += ' '.join([l.ljust(9) for l in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']])
return rep
def __repr__(self):
return self.__str__()
class Space:
def __init__(self, xy, colour):
assert colour in ['black', 'white'], 'Invalid colour for Space object'
self.colour = colour
x, y = xy
self.x, self.y = x, y
self.xy = (x, y)
self.x_name, self.y_name = xy_to_board(xy)
self.held_by = None
def occupy(self, Piece):
self.held_by = Piece
def vacate(self):
self.held_by = None
def get_Piece(self):
return self.held_by
def __str__(self):
return 'Space '+ str(self.x_name) + str(self.y_name) + ' ('+self.colour+')'
def __repr__(self):
return self.__str__()
class Game:
def __init__(self):
self.gameboard = Board()
self.gameboard.setup_Game()
self.all_piece_types = ('pawn', 'bishop', 'rook', 'knight', 'king', 'queen')
self.all_colours = ('black', 'white')
def __str__(self):
return self.gameboard.__str__()
def __repr__(self):
return self.gameboard.__repr__()
def get_move_functions(piece_type):
bishfuncs = [ lambda xyi : (xyi[0] + xyi[2], xyi[1] + xyi[2]),
lambda xyi : (xyi[0] + xyi[2], xyi[1] - xyi[2]),
lambda xyi : (xyi[0] - xyi[2], xyi[1] + xyi[2]),
lambda xyi : (xyi[0] - xyi[2], xyi[1] - xyi[2])]
rookfuncs = [ lambda xyi : (xyi[0] + xyi[2], xyi[1]),
lambda xyi : (xyi[0] - xyi[2], xyi[1]),
lambda xyi : (xyi[0] , xyi[1] + xyi[2]),
lambda xyi : (xyi[0] , xyi[1] - xyi[2])]
queenfuncs = bishfuncs + rookfuncs
funcs = dict(zip(['bishop', 'rook', 'queen'], [bishfuncs, rookfuncs, queenfuncs]))
return funcs
def xy_to_board(xy):
if type(xy) == tuple and len(xy) == 2:
if xy < (8, 8) and (xy) >= (0, 0):
x, y = xy
return ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'][y] + str(x+1)
elif type(xy) == list:
return [xy_to_board(xy0) for xy0 in xy]
return None
def parse_xy(xy, report_error = False):
if type(xy) == tuple and len(xy) == 2:
if xy < (8, 8) and xy >= (0, 0):
return xy
elif type(xy) == str and len(xy) == 2:
y, x = xy[0], int(xy[1])
if y in 'abcdefgh' and x in range(1, 9):
y, x = dict(zip('abcdefgh', range(8)))[y], x-1
return (x, y)
if report_error:
print('invalid xy:', xy)
return None
"""
import checkmate as cm
g = cm.Game()
print(g.gameboard)
b = g.gameboard
b.update_all_Moves()
b.move_Piece('b2', 'b4')
b.move_Piece('b4', 'b5')
b.move_Piece('c7', 'c5')
b.update_all_Moves()
p = b.get_Space('b2').get_Piece()
b.move_Piece('b2', 'b4')
b.get_Space('b1').vacate()
b.get_Space('a3').occupy(p)
"""
|
the-stack_0_14042 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.ops import Tensor
__all__ = ['assert_same_float_dtype', 'is_numeric_tensor', 'assert_scalar_int']
NUMERIC_TYPES = frozenset([dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.qint8, dtypes.qint32,
dtypes.quint8, dtypes.complex64])
def is_numeric_tensor(tensor):
return isinstance(tensor, Tensor) and tensor.dtype in NUMERIC_TYPES
def _assert_same_base_type(items, expected_type=None):
"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
|
the-stack_0_14044 | import unittest
from slack_sdk.http_retry import RateLimitErrorRetryHandler
from slack_sdk.scim import SCIMClient
from tests.slack_sdk.scim.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from ..my_retry_handler import MyRetryHandler
class TestSCIMClient(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_retries(self):
retry_handler = MyRetryHandler(max_retry_count=2)
client = SCIMClient(
base_url="http://localhost:8888/",
token="xoxp-remote_disconnected",
retry_handlers=[retry_handler],
)
try:
client.search_users(start_index=0, count=1)
self.fail("An exception is expected")
except Exception as _:
pass
self.assertEqual(2, retry_handler.call_count)
def test_ratelimited(self):
client = SCIMClient(
base_url="http://localhost:8888/",
token="xoxp-ratelimited",
)
client.retry_handlers.append(RateLimitErrorRetryHandler())
response = client.search_users(start_index=0, count=1)
# Just running retries; no assertions for call count so far
self.assertEqual(429, response.status_code)
|
the-stack_0_14046 | import os
import pyttsx3
import pyaudio
import speech_recognition as sr
assistente = pyttsx3.init()
recon = sr.Recognizer()
inpvoz = ""
def retorno(frase):
assistente.say(frase)
assistente.setProperty("voice", b"brasil")
assistente.setProperty("rate", 210)
assistente.setProperty("volume", 1)
assistente.runAndWait()
def ouvir(source):
recon.adjust_for_ambient_noise(source)
audio = recon.listen(source)
inpvoz = recon.recognize_google(audio, language="pt-BR")
return inpvoz
def continuar(source):
retorno(
"Posso ajudar com algo mais? Responda sim para continuar e não para finalizar!"
)
continuar = ouvir(source)
print(f"Você disse {continuar}")
return continuar
|
the-stack_0_14047 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subtype assigner for lib2to3 trees.
This module assigns extra type information to the lib2to3 trees. This
information is more specific than whether something is an operator or an
identifier. For instance, it can specify if a node in the tree is part of a
subscript.
AssignSubtypes(): the main function exported by this module.
Annotations:
subtype: The subtype of a pytree token. See 'format_token' module for a list
of subtypes.
"""
from lib2to3 import pytree
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
def AssignSubtypes(tree):
"""Run the subtype assigner visitor over the tree, modifying it in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
# Map tokens in argument lists to their respective subtype.
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
':': format_token.Subtype.TYPED_NAME,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
"""_SubtypeAssigner - see file-level docstring for detailed description.
The subtype is added as an annotation to the pytree token.
"""
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [',']))
for child in node.children:
self.Visit(child)
comp_for = False
dict_maker = False
for child in node.children:
if pytree_utils.NodeName(child) == 'comp_for':
comp_for = True
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
elif pytree_utils.NodeName(child) in ('COLON', 'DOUBLESTAR'):
dict_maker = True
if not comp_for and dict_maker:
last_was_colon = False
unpacking = False
for child in node.children:
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.KWARGS_STAR_STAR)
if last_was_colon:
if style.Get('INDENT_DICTIONARY_VALUE'):
_InsertPseudoParentheses(child)
else:
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_VALUE)
elif (isinstance(child, pytree.Node) or
(not child.value.startswith('#') and child.value not in '{:,')):
# Mark the first leaf of a key entry as a DICTIONARY_KEY. We
# normally want to split before them if the dictionary cannot exist
# on a single line.
if not unpacking or pytree_utils.FirstLeafNode(child).value == '**':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
_AppendSubtypeRec(child, format_token.Subtype.DICTIONARY_KEY_PART)
last_was_colon = pytree_utils.NodeName(child) == 'COLON'
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
unpacking = True
elif last_was_colon:
unpacking = False
def Visit_expr_stmt(self, node): # pylint: disable=invalid-name
# expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist)
# | ('=' (yield_expr|testlist_star_expr))*)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
# comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not'
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
elif pytree_utils.NodeName(child) == 'comp_op':
for grandchild in child.children:
_AppendTokenSubtype(grandchild, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_STAR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
for child in node.children:
self.Visit(child)
if _IsAExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.A_EXPR_OPERATOR)
if _IsSimpleExpression(node):
for child in node.children:
if _IsAExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.SIMPLE_EXPRESSION)
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'/'|'%'|'//'|'@') factor)*
for child in node.children:
self.Visit(child)
if _IsMExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.M_EXPR_OPERATOR)
if _IsSimpleExpression(node):
for child in node.children:
if _IsMExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.SIMPLE_EXPRESSION)
def Visit_factor(self, node): # pylint: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node): # pylint: disable=invalid-name
# power ::= atom trailer* ['**' factor]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_trailer(self, node): # pylint: disable=invalid-name
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '[]':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_BRACKET)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node): # pylint: disable=invalid-name
# sliceop ::= ':' [test]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::=
# test [comp_for] | test '=' test
self._ProcessArgLists(node)
def Visit_arglist(self, node): # pylint: disable=invalid-name
# arglist ::=
# (argument ',')* (argument [',']
# | '*' test (',' argument)* [',' '**' test]
# | '**' test)
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_tname(self, node): # pylint: disable=invalid-name
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_decorator(self, node): # pylint: disable=invalid-name
# decorator ::=
# '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
for child in node.children:
if isinstance(child, pytree.Leaf) and child.value == '@':
_AppendTokenSubtype(child, subtype=format_token.Subtype.DECORATOR)
self.Visit(child)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::=
# 'def' NAME parameters ['->' test] ':' suite
for child in node.children:
if pytree_utils.NodeName(child) == 'NAME' and child.value != 'def':
_AppendTokenSubtype(child, format_token.Subtype.FUNC_DEF)
break
for child in node.children:
self.Visit(child)
def Visit_parameters(self, node): # pylint: disable=invalid-name
# parameters ::= '(' [typedargslist] ')'
self._ProcessArgLists(node)
if len(node.children) > 2:
_AppendFirstLeafTokenSubtype(node.children[1],
format_token.Subtype.PARAMETER_START)
_AppendLastLeafTokenSubtype(node.children[-2],
format_token.Subtype.PARAMETER_STOP)
def Visit_typedargslist(self, node): # pylint: disable=invalid-name
# typedargslist ::=
# ((tfpdef ['=' test] ',')*
# ('*' [tname] (',' tname ['=' test])* [',' '**' tname]
# | '**' tname)
# | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
tname = False
if not node.children:
return
_AppendFirstLeafTokenSubtype(node.children[0],
format_token.Subtype.PARAMETER_START)
_AppendLastLeafTokenSubtype(node.children[-1],
format_token.Subtype.PARAMETER_STOP)
tname = pytree_utils.NodeName(node.children[0]) == 'tname'
for i in range(1, len(node.children)):
prev_child = node.children[i - 1]
child = node.children[i]
if pytree_utils.NodeName(prev_child) == 'COMMA':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.PARAMETER_START)
elif pytree_utils.NodeName(child) == 'COMMA':
_AppendLastLeafTokenSubtype(prev_child,
format_token.Subtype.PARAMETER_STOP)
if pytree_utils.NodeName(child) == 'tname':
tname = True
_SetArgListSubtype(child, format_token.Subtype.TYPED_NAME,
format_token.Subtype.TYPED_NAME_ARG_LIST)
elif pytree_utils.NodeName(child) == 'COMMA':
tname = False
elif pytree_utils.NodeName(child) == 'EQUAL' and tname:
_AppendTokenSubtype(child, subtype=format_token.Subtype.TYPED_NAME)
tname = False
def Visit_varargslist(self, node): # pylint: disable=invalid-name
# varargslist ::=
# ((vfpdef ['=' test] ',')*
# ('*' [vname] (',' vname ['=' test])* [',' '**' vname]
# | '**' vname)
# | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
self._ProcessArgLists(node)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_LIST)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_FOR)
# Mark the previous node as COMP_EXPR unless this is a nested comprehension
# as these will have the outer comprehension as their previous node.
attr = pytree_utils.GetNodeAnnotation(node.parent,
pytree_utils.Annotation.SUBTYPE)
if not attr or format_token.Subtype.COMP_FOR not in attr:
_AppendSubtypeRec(node.parent.children[0], format_token.Subtype.COMP_EXPR)
self.DefaultNodeVisit(node)
def Visit_old_comp_for(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_for(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_IF)
self.DefaultNodeVisit(node)
def Visit_old_comp_if(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_if(node)
def _ProcessArgLists(self, node):
"""Common method for processing argument lists."""
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf):
_AppendTokenSubtype(
child,
subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value,
format_token.Subtype.NONE))
def _SetArgListSubtype(node, node_subtype, list_subtype):
"""Set named assign subtype on elements in a arg list."""
def HasSubtype(node):
"""Return True if the arg list has a named assign subtype."""
if isinstance(node, pytree.Leaf):
return node_subtype in pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SUBTYPE, set())
for child in node.children:
node_name = pytree_utils.NodeName(child)
if node_name not in {'atom', 'arglist', 'power'}:
if HasSubtype(child):
return True
return False
if not HasSubtype(node):
return
for child in node.children:
node_name = pytree_utils.NodeName(child)
if node_name not in {'atom', 'COMMA'}:
_AppendFirstLeafTokenSubtype(child, list_subtype)
def _AppendTokenSubtype(node, subtype):
"""Append the token's subtype only if it's not already set."""
pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
subtype)
def _AppendFirstLeafTokenSubtype(node, subtype):
"""Append the first leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendFirstLeafTokenSubtype(node.children[0], subtype)
def _AppendLastLeafTokenSubtype(node, subtype):
"""Append the last leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendLastLeafTokenSubtype(node.children[-1], subtype)
def _AppendSubtypeRec(node, subtype, force=True):
"""Append the leafs in the node to the given subtype."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
for child in node.children:
_AppendSubtypeRec(child, subtype, force=force)
def _InsertPseudoParentheses(node):
"""Insert pseudo parentheses so that dicts can be formatted correctly."""
comment_node = None
if isinstance(node, pytree.Node):
if node.children[-1].type == token.COMMENT:
comment_node = node.children[-1].clone()
node.children[-1].remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
if first == last and first.type == token.COMMENT:
# A comment was inserted before the value, which is a pytree.Leaf.
# Encompass the dictionary's value into an ATOM node.
last = first.next_sibling
last_clone = last.clone()
new_node = pytree.Node(syms.atom, [first.clone(), last_clone])
for orig_leaf, clone_leaf in zip(last.leaves(), last_clone.leaves()):
pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf)
if hasattr(orig_leaf, 'is_pseudo'):
clone_leaf.is_pseudo = orig_leaf.is_pseudo
node.replace(new_node)
node = new_node
last.remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
lparen = pytree.Leaf(
token.LPAR, u'(', context=('', (first.get_lineno(), first.column - 1)))
last_lineno = last.get_lineno()
if last.type == token.STRING and '\n' in last.value:
last_lineno += last.value.count('\n')
if last.type == token.STRING and '\n' in last.value:
last_column = len(last.value.split('\n')[-1]) + 1
else:
last_column = last.column + len(last.value) + 1
rparen = pytree.Leaf(
token.RPAR, u')', context=('', (last_lineno, last_column)))
lparen.is_pseudo = True
rparen.is_pseudo = True
if isinstance(node, pytree.Node):
node.insert_child(0, lparen)
node.append_child(rparen)
if comment_node:
node.append_child(comment_node)
_AppendFirstLeafTokenSubtype(node, format_token.Subtype.DICTIONARY_VALUE)
else:
clone = node.clone()
for orig_leaf, clone_leaf in zip(node.leaves(), clone.leaves()):
pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf)
new_node = pytree.Node(syms.atom, [lparen, clone, rparen])
node.replace(new_node)
_AppendFirstLeafTokenSubtype(clone, format_token.Subtype.DICTIONARY_VALUE)
def _IsAExprOperator(node):
return isinstance(node, pytree.Leaf) and node.value in {'+', '-'}
def _IsMExprOperator(node):
return isinstance(node,
pytree.Leaf) and node.value in {'*', '/', '%', '//', '@'}
def _IsSimpleExpression(node):
"""A node with only leafs as children."""
return all(isinstance(child, pytree.Leaf) for child in node.children)
|
the-stack_0_14051 | """
snp2counts.py - count number SNPs in geneset
============================================
:Tags: Python
Purpose
-------
read a list of genomic point locations (SNPs) and count the number
of SNPs falling in pre-defined windows.
The windows are given in gtf format.
.. note::
The script will be able to count snps in disjoint segments
using the gene_id field in gtf format. It will not check
if these segments are non-overlapping.
In case of a gene set, make sure to first flatten the gene set by combining
all transcript/exons per gene.
Usage
-----
Type::
python <script_name>.py --help
for command line help.
Command line options
--------------------
.. note::
The script currently uses ``variant`` in two meanings:
1. a variable site (SNP/INDEL)
2. a transcript variant (a transcript sequence that differs from the wild type)
I have started calling the latter ``allele``, though it is not
consistent across the whole script. However, the output is consistent and calls
the former ``variant_site`` and the latter ``allele``.
"""
import sys
import collections
import numpy
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import pysam
import CGAT.IndexedFasta as IndexedFasta
import CGAT.IndexedGenome as IndexedGenome
import CGAT.Genomics as Genomics
import CGAT.GTF as GTF
import alignlib_lite
CdsResult = collections.namedtuple('CdsResult',
'''strand, start, end,
exon_id, exon_start, exon_end,
cds_start, cds_end, cds_phase,
intron_id, intron_start, intron_end,
prev_exon_end, next_exon_start,
cds_seq, cds_seq_start, cds_seq_end,
nc_seq, nc_start, nc_end, exon_skipping
''')
Variant = collections.namedtuple('Variant',
'code,sequence,start,end')
SpliceEffect = collections.namedtuple('SpliceEffect',
'''exon_id, orig_seq, variant_seq''' )
SpliceChange = collections.namedtuple('SpliceChange',
'''exon_id,
is_frameshift,
orig_name, orig_seq5, orig_seq3,
variant_name, variant_seq5, variant_seq3''')
CdsEffect = collections.namedtuple('CdsEffect',
'''exon_id, orig_seq, variant_seq,
codon_orig_seq, codon_variant_seq''' )
CdsVariant = collections.namedtuple('CdsVariant',
'''transcript_id,
cds_start, cds_end,
code,
reference_seq,variant_seq,
is_homozygous''' )
SpliceVariant = collections.namedtuple('SpliceVariant',
'''transcript_id,
intron_id,
nc_start, nc_end,
code,
reference_seq,variant_seq,
is_homozygous''' )
TranscriptVariant = collections.namedtuple('TransciptVariant',
'''cds_variants,splice_variants''')
TranslationEffect = collections.namedtuple( 'TranslationEffect', '''
ncodons,
ninserted_bases,
ninserted_codons,
ndeleted_bases,
ndeleted_codons,
nincomplete_codons,
noframe_codons,
nwrong_frames,
ncorrected_frames,
first_stop,
nstops,
nunaffected_codons,
nsynonymous_codons,
nnonsynonymous_codons,
nstop_codons''' )
SplicingEffect = collections.namedtuple('SplicingEffect',
'''nintrons,
ncanonical,
nframeshifts,
nnoncanonical,
nunchanged_frames,
ncorrected_frames,
nuncorrected_frames,
nunchanged,
nsynonymous,
nnonsynonymous,
ndisrupted,
nnovel,
nnunknown,
ninserted_codons,
codes,
last_exon''' )
def iterateOverFrames(variant_seq):
'''return tuples of segments within/without
frame.
Yields only coordinates in frame (multiples of 3)
Everything that is out-of-frame is yielded together.
'''
frame_at_start = len("".join(variant_seq[:3])) % 3
frame_at_end = 0
start = 0
for x in range(3, len(variant_seq), 3):
var_codon = "".join(variant_seq[x:x + 3]).upper()
lvar = len(var_codon)
frame_at_end = (frame_at_start + lvar) % 3
# print x, frame_at_start, frame_at_end, start
# check for frame change
if frame_at_end != frame_at_start:
if frame_at_start == 0:
# exclude current codon
yield((True, start, x))
start = x
elif frame_at_start != 0 and frame_at_end == 0:
# include current codon
yield((False, start, x + 3))
start = x + 3
else:
# nothing to be done if frame changes
# between out-of-frame frames
pass
frame_at_start = frame_at_end
if start != len(variant_seq):
yield((frame_at_end == 0, start, len(variant_seq)))
def countEffectsOnTranscript(var_seq, ref_seq,
is_seleno=False):
'''count effects on transcript.
*var_seq* is a list of characters according to a known cds and
thus determines the reference frame.
Insertions contain more than one character at a position, deletions
are empty.
The function returns a namedtuple of type TranscriptEffect. Counts are in terms of base/codons
in the reference sequence.
Counting will continue after a stop-codon is encountered.
Note that codons inserted within a codon do not count as a frame shift. Instead
these will be recorded as an inserted codon.
ncodons
number of codons in transcript
ninserted_bases
number of inserted bases
ninserted_codons
number of fully inserted codons
ndeleted_bases
number of deleted bases
nincomplete_codons
number of incomplete codons at the end
ndeleted_codons
number of fully deleted codons
noframe_codons
number of codons that are out-of-frame. This will include all codons where
at least one base is out-of-frame. In case of an in+del, the codon will
still be out-of-frame.
nwrong_frames
number of times the sequence gets out of frame
ncorrected_frames
number of times the frame is recovered
nstops
number of stop codons in translation
nsynonymous_codons:
number of reference codons that have synonymous mutations
nnonsynonymous_codons:
number of referenec codons that have non-synonymous mutations
nstop_codons
number of reference codons that now encode for a stop
nunaffected_codons
number of reference codons that are still the same
first_stop
codon position of first stop codon in variant sequence
'''
assert len(var_seq) == len(ref_seq)
# values to fill
ncodons = 0
ninserted_bases, ndeleted_bases = 0, 0
ninserted_codons, ndeleted_codons = 0, 0
nincomplete_codons = 0
noframe_bases, noframe_codons = 0, 0
nwrong_frames, ncorrected_frames = 0, 0
nsynonymous_codons, nnonsynonymous_codons, nunaffected_codons = 0, 0, 0
nstop_codons = 0
last_exon_start = 0
# build sequences
var_seq_na = "".join(var_seq).upper()
ref_seq_na = "".join(ref_seq).upper()
lrefseq = len(ref_seq)
lvarseq = len(var_seq_na)
ncodons = lrefseq // 3
# truncate incomplete base at end of reference sequence
if lvarseq % 3 != 0:
var_seq_na = var_seq_na[:-(lvarseq % 3)]
var_seq_aa = Genomics.translate(var_seq_na,
is_seleno=is_seleno)
# check protein coding sequence for the first stop
nstops = 0
first_stop = len(var_seq_aa)
ntruncated_codons_stop = 0
for pos, c in enumerate(var_seq_aa):
if c == "X":
nstops += 1
first_stop = min(pos, first_stop)
# start position for out-of-frame region
var_pos = 0
map_ref2var = alignlib_lite.py_makeAlignmentVector()
alignator = alignlib_lite.py_makeAlignatorDPFull(alignlib_lite.py_ALIGNMENT_GLOBAL,
-10.0,
-2.0)
was_in_frame = True
for in_frame, start, end in iterateOverFrames(var_seq):
varseq = "".join(var_seq[start:end]).upper()
refseq = "".join(ref_seq[start:end]).upper()
# print in_frame, start, end
if in_frame:
for x in range(start, end, 3):
# ignore incomplete codons at the end:
if x + 3 > lrefseq:
break
var_codon = "".join(var_seq[x:x + 3]).upper()
assert len(var_codon) % 3 == 0
ref_codon = "".join(ref_seq[x:x + 3]).upper()
assert len(ref_codon) == 3
d = len(var_codon) - 3
for y in var_seq[x:x + 3]:
if y == "":
ndeleted_bases += 1
if len(y) > 1:
ninserted_bases += len(y) - 1
if var_codon == "":
ndeleted_codons -= d // 3
elif len(var_codon) > len(ref_codon):
# deal with in-frame inserted codons
ninserted_codons += d // 3
nunaffected_codons += 1
elif var_codon == ref_codon:
nunaffected_codons += 1
else:
var_aa = Genomics.translate(var_codon)
ref_aa = Genomics.translate(ref_codon)
if var_aa == "X":
nstop_codons += 1
elif ref_aa == var_aa:
nsynonymous_codons += 1
else:
nnonsynonymous_codons += 1
var_pos += len(var_codon)
else:
# count deletions/insertions in the variant
for x in range(start, end, 3):
var_codon = "".join(var_seq[x:x + 3]).upper()
# count insertion and deletion separately to avoid them
# compensating
for y in var_seq[x:x + 3]:
if y == "":
ndeleted_bases += 1
if len(y) > 1:
ninserted_bases += len(y) - 1
ninserted_codons += (len(y) - 1) // 3
# completely deleted codons
if var_codon == "":
ndeleted_codons += 1
else:
noframe_codons += 1
# count effect on protein coding sequence
var_frag_aa = Genomics.translate(varseq)
ref_frag_aa = Genomics.translate(refseq)
# count effect on protein coding sequence
var_s = alignlib_lite.py_makeSequence(var_frag_aa)
ref_s = alignlib_lite.py_makeSequence(ref_frag_aa)
diff_length = abs(len(ref_frag_aa) - len(var_frag_aa))
# very heuristic - might lead strange effects
alignment_band = max(10, diff_length * 2)
iterator = alignlib_lite.py_makeIterator2DBanded(
-alignment_band, +alignment_band)
alignlib_lite.py_setDefaultIterator2D(iterator)
E.debug("alignment: reference(%i) with variant(%i) (diff=%i) within diagonals %i and %i" %
(len(ref_frag_aa), len(var_frag_aa), diff_length, -alignment_band, alignment_band))
alignator.align(map_ref2var, ref_s, var_s)
# print alignlib_lite.py_AlignmentFormatExplicit( map_ref2var,
# ref_s, var_s )
for x, ref_aa in enumerate(ref_frag_aa):
p = map_ref2var.mapRowToCol(x)
if p < 0:
continue
var_aa = var_frag_aa[p]
if var_aa == ref_aa:
nsynonymous_codons += 1
else:
nnonsynonymous_codons += 1
nwrong_frames += 1
ncorrected_frames += 1
was_in_frame = in_frame
# if var_pos > first_stop * 3: break
if lvarseq % 3 != 0:
nincomplete_codons += 1
# reduce corrected frames by one if we do not end on frame
if not was_in_frame and lvarseq % 3 != 0:
ncorrected_frames -= 1
return TranslationEffect._make((ncodons,
ninserted_bases,
ninserted_codons,
ndeleted_bases,
ndeleted_codons,
nincomplete_codons,
noframe_codons,
nwrong_frames,
ncorrected_frames,
first_stop,
nstops,
nunaffected_codons,
nsynonymous_codons,
nnonsynonymous_codons,
nstop_codons))
def getCDSPosition(exons, start, end, fasta=None, lcontig=None):
'''return cds information for a (positive strand) genomic position.
exons is a list of exons in GTF format.
start, end: are the coordinates of the variant in forward strand coordinates.
if the first exon is not in frame, cds_start, cds_end will be
not multiples of 3, but that is correct, as cds_start and cds_end
start counting from 0.
If a region spans a whole intron, the region will be treated as
a single coding sequence variant. Such deletions usually concern
short frame-shifting introns.
'''
strand = exons[0].strand
contig = exons[0].contig
is_positive_strand = Genomics.IsPositiveStrand(strand)
if is_positive_strand:
coordinates = [(e.start, e.end, int(e.frame)) for e in exons]
else:
if not fasta and not lcontig:
raise ValueError(
"no fasta or lcontig option given for a negative strand transcript")
if fasta:
# convert all to negative strand coordinates
lcontig = fasta.getLength(contig)
start, end = lcontig - end, lcontig - start
coordinates = [(lcontig - e.end, lcontig - e.start, int(e.frame))
for e in exons]
coordinates.sort()
# phase is the complement to frame (i.e. position in codon, not base to
# next codon)
cds_start, cds_end, cds_phase = None, None, None
# coordinates for previous/next exons for snps spanning splice sites
prev_exon_end, next_exon_start = None, None
# intron positions
intron_start, intron_end = None, None
# start, end of feature within coding sequence
# sequence is a sequence of all codons that cover the feature
cds_seq_start, cds_seq_end, cds_seq = None, None, None
# flag to denote exon skipping
exon_skipping = False
# start, end of feature within non-coding sequence
nc_seq_start, nc_seq_end, nc_seq = None, None, None
exon_id = None
# empty result in case of no overlap
if end <= coordinates[0][0] or start >= coordinates[-1][1]:
return None
intron_id, exon_id = None, 0
nexons = len(coordinates)
last_exon = nexons - 1
start_phase = (3 - coordinates[0][2]) % 3
# start within frame
cds_start = start_phase
# find exon overlapping the region or exon immediately after it
while exon_id < len(coordinates):
exon_start, exon_end, exon_frame = coordinates[exon_id]
if start < exon_end:
break
cds_start += exon_end - exon_start
exon_id += 1
if end <= exon_start:
# overlap with intron only
cds_start, cds_end = None, None
if exon_id > 0:
intron_start, intron_end = coordinates[exon_id - 1][1], exon_start
nc_seq_start, nc_seq_end = start, end
intron_id = exon_id - 1
else:
# overlap with exon
#
# position of first complete codon in this feature:
first_full_codon_start = exon_start + exon_frame
# correction of frame at end of exon to due frame-shifting codons
frame_correction = False
# special treatment if region spans the complete intron
if exon_id < last_exon and end > coordinates[exon_id + 1][0]:
if end > coordinates[exon_id + 1][1]:
raise ValueError(
"can not deal with variants spanning multiple exons")
# simply increase the current exon
exon_end = coordinates[exon_id + 1][1]
# in order to adjust the frame, add the intron towards the exon
frame_correction = True
cds_x, cds_y = max(start, exon_start), min(end, exon_end)
cds_start += cds_x - exon_start
cds_end = cds_start + cds_y - cds_x
cds_phase = (3 - exon_frame + cds_x - exon_start) % 3
# print "exon_id=", exon_id, "start=", start, "end=", end, \
# "codon_start", cds_x, "codon_end", cds_y, \
# "cds_phase=", cds_phase, "cds_start", cds_start, "cds_end", cds_end, \
# "exon_start=",exon_start, "exon_end=", exon_end, "exon_frame=", exon_frame
# incomplete bases in a codon at 3' end of this feature:
# = frame of next feature
# (3 - ( exon_end - exon_start - exon_frame) % 3 ) % 3
if exon_id < last_exon:
last_full_codon_end = exon_end - \
(3 - coordinates[exon_id + 1][2]) % 3
if not frame_correction:
assert (3 - ( exon_end - exon_start - exon_frame + frame_correction ) % 3 ) % 3 == coordinates[exon_id + 1][2], \
"frame mismatch between exons %i and %i" % (
exon_id, exon_id + 1)
else:
last_full_codon_end = 0
# link to previous next exons in the case of split exons
is_split_start = exon_start <= cds_x < first_full_codon_start and exon_id > 0
is_split_end = last_full_codon_end < cds_y <= exon_end and exon_id < last_exon
if is_split_start:
prev_exon_end = coordinates[exon_id - 1][1]
if is_split_end:
next_exon_start = coordinates[exon_id + 1][0]
next_frame = coordinates[exon_id + 1][2]
# sort out the sequence
cds_seq = []
# position of variant in cds_sequence
cds_seq_start, cds_seq_end = cds_phase, cds_y - cds_x + cds_phase
if fasta:
# link to previous/next exons in the case of split codons
if is_split_start:
codon_start = prev_exon_end - (3 - exon_frame) % 3
codon_end = prev_exon_end
cds_seq.insert(0, fasta.getSequence(contig,
strand,
codon_start,
codon_end))
codon_start = cds_x - cds_phase
codon_end = cds_y + (3 - (cds_end % 3)) % 3
# deal with incomplete codon at start
if codon_start < exon_start and exon_id == 0:
assert exon_frame != 0
cds_seq.extend(list("X" * (exon_start - codon_start)))
# print "exon_id=", exon_id, "start=", start, "end=", end, "codon_start", codon_start, "codon_end", codon_end, "cdsx", cds_x, "cdsy", cds_y, cds_phase, "cds_start", cds_start, "cds_end", cds_end, \
# "exon_start=",exon_start, "exon_end=", exon_end, "start_phase=", start_phase, "first_start", first_full_codon_start, "last_end", last_full_codon_end, \
# "split_start", is_split_start, "split_end", is_split_end
cds_seq.extend(list(fasta.getSequence(contig,
strand,
max(exon_start,
codon_start),
min(exon_end, codon_end))))
# fill up, if last codon is incomplete
if codon_end > exon_end and exon_id == last_exon:
cds_seq.extend(list("X" * (codon_end - exon_end)))
if is_split_end:
cds_seq.append(fasta.getSequence(contig,
strand,
next_exon_start,
next_exon_start + next_frame
))
cds_seq = "".join(cds_seq)
lnoncoding = (end - start) - (cds_y - cds_x)
if start <= exon_start and end >= exon_end:
# special treatment if region spans the complete exon
if exon_id < nexons - 1:
nc_seq_start, nc_seq_end = start, end
intron_start = prev_exon_end
intron_end = next_exon_start
intron_id = exon_id - 1
else:
# unless it is last exon - truncate, but only
# it it extends into intron
if start < exon_start:
intron_start, inron_end = prev_exon_end, exon_start
nc_seq_start, nc_seq_end = start, exon_start
intron_id = exon_id - 1
exon_skipping = True
elif start < exon_start and exon_id > 0:
# disrupted intronic sequence
intron_start, intron_end = coordinates[exon_id - 1][1], exon_start
nc_seq_start, nc_seq_end = exon_start - lnoncoding, exon_start
intron_id = exon_id - 1
elif end > exon_end and exon_id < nexons - 1:
# disrupted intronic sequence
intron_start, intron_end = exon_end, coordinates[exon_id + 1][0]
nc_seq_start, nc_seq_end = exon_end, exon_end + lnoncoding
intron_id = exon_id
if fasta and nc_seq_start is not None:
nc_seq = fasta.getSequence(contig, strand, nc_seq_start, nc_seq_end)
# subtract starting frame
if cds_start is not None:
cds_start -= start_phase
cds_end -= start_phase
return CdsResult._make((strand, start, end,
exon_id, exon_start, exon_end,
cds_start, cds_end, cds_phase,
intron_id, intron_start, intron_end,
prev_exon_end, next_exon_start,
cds_seq, cds_seq_start, cds_seq_end,
nc_seq, nc_seq_start, nc_seq_end,
exon_skipping))
class Counter(object):
'''annotator for single bases in the genome.'''
mHeader = ()
def __init__(self, fasta=None, pattern="%s", *args, **kwargs):
self.mFasta = fasta
self.mFilenamePattern = pattern
def __str__(self):
return ""
def getHeader(self):
'''return header'''
return "\t".join(self.mHeader)
class CounterGenes(Counter):
'''count SNPs per gene that it overlaps with.'''
mHeader = ["exons_%s" % x for x in ("ntranscripts", "nused", "pos")]
def __init__(self, filename_exons, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
exons = IndexedGenome.IndexedGenome()
nexons = 0
inf = IOTools.openFile(filename_exons, "r")
for g in GTF.iterator(inf):
exons.add(g.contig, g.start, g.end, g)
nexons += 1
inf.close()
self.mExons = exons
E.info("indexed %i exons on %i contigs" % (nexons, len(exons)))
# create counter
self.mCounts = collections.defaultdict(int)
def update(self, snp):
'''update with snp.'''
exons = list(self.mExons.get(snp.chromosome, snp.pos, snp.pos + 1))
if exons:
for start, end, gtf in exons:
self.mCounts[gtf.gene_id] += 1
def writeTable(self, outfile):
outfile.write("gene_id\tnsnps\n")
for key in sorted(self.mCounts.keys()):
outfile.write("\t".join((key, str(self.mCounts[key]))) + "\n")
class CounterTranscripts(Counter):
'''count SNPs per transcripts that it overlaps with.
Variants are not phased, so is not always clear which of the two allelles of a transcript
is affected. Thus, the following heuristic is adopted:
1 Only homozygous variants: locus flagged as homozygous. Both alleles are assumed to be the same and
different from the wild type.
2 Only heterozygous variants: locus flagged as heterozygous. One allele is assumed to be the wild type,
the other one is a variant.
3 Mixture of homo- and heterozygous variants: locus flagged as mixture. A mixed allele is constructed with
all variants.
Columns
transcript_id
the transcript_id
cds_len
length of cds in bases
ncodons
number of codons in wild type
last_exon_start
start (cds coordinates) of last exon (useful for detecting nonsense-mediated decay)
max_variants
maximum number of variants per site
nvariant_sites
number of variable sites within the ntranscript
genotype
the genotype
nalleles
number of variants (1 = either homozygote variant or heterozygote variant/wild type)
stop_min
number of codons truncated either due to disrupted splice signal and/or stop codon.
This is the minimum between two transcripts. If the wildtype is still present,
this value will be 0.
stop_max
number of codons truncated either due to disrupted splice signal or stop codon.
This is the maximum between two transcripts.
Columns are prefixed with ``cds_`` and ``splice_`` for cds and splice variants,
respectively. Without prefix, it refers to the effecs of cds and splice variants
combined.
'''
# outfile.write(
mHeader = ["transcript_id",
"cds_len",
"ncodons",
"last_exon_start",
"cds_max_variants", "cds_nvariant_sites", "cds_genotype", "cds_nalleles",
"cds_stop_min", "cds_stop_max",
"splice_max_variants", "splice_nvariant_sites", "splice_genotype", "splice_nalleles",
"splice_stop_min", "splice_stop_max",
"max_vars", "nvariant_sites", "genotype", "nalleles",
"stop_min", "stop_max"]
# add this area to check for overlap with splice signals
# This should be larger than the longest deletion.
mSize = 500
# introns smaller than this size are considered to be frameshifts
mMinIntronSize = 5
def __init__(self, filename_exons, seleno, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
transcripts = IndexedGenome.IndexedGenome()
self.mExons = {}
nexons = 0
ntranscripts = 0
inf = IOTools.openFile(filename_exons, "r")
for gtfs in GTF.transcript_iterator(GTF.iterator(inf)):
start, end = min([x.start for x in gtfs]), max(
[x.end for x in gtfs])
transcripts.add(gtfs[0].contig, start, end, gtfs)
nexons += len(gtfs)
ntranscripts += 1
self.mExons[gtfs[0].transcript_id] = gtfs
inf.close()
self.mTranscripts = transcripts
self.mSeleno = seleno
E.info("indexed %i transcripts and %i exons on %i contigs" %
(ntranscripts, nexons, len(transcripts)))
E.info("received %i selenoprotein transcripts" % (len(self.mSeleno)))
# create counter
self.mCounts = collections.defaultdict(int)
self.mOutfileIntron = IOTools.openFile(
self.mFilenamePattern % "intron", "w")
self.mOutfileIntron.write(
"transcript_id\tcontig\tsnp_position\tvariant_type\tvariant_code\tvariant_seq\texon_id\tnexon\tcode\torig_name\torig_seq5\torig_seq3\tvariant_name\tvariant_seq5\tvariant_seq3\tintron_start\tintron_end\tstrand\tnc_start\tnc_end\n")
self.mOutfileCds = IOTools.openFile(self.mFilenamePattern % "cds", "w")
self.mOutfileCds.write("\t".join((
"transcript_id",
"contig",
"snp_position",
"reference",
"variant_type",
"variant_code",
"variant_bases",
"exon_id",
"nexons",
"code",
"orig_seq",
"orig_na",
"orig_codons",
"variant_seq",
"variant_na",
"variant_codons",
"cds_phase",
"cds_start",
"cds_end",
"cds_len")) + "\n")
self.mOutfileTranscripts = IOTools.openFile(
self.mFilenamePattern % "translation", "w")
self.mOutfileTranscripts.write(
"transcript_id\tvariant_id\tlast_exon_start\t%s\tseq_na\tseq_aa\n" % "\t".join(TranslationEffect._fields))
self.mOutfileSplicing = IOTools.openFile(
self.mFilenamePattern % "splicing", "w")
self.mOutfileSplicing.write(
"transcript_id\tvariant_id\t%s\n" % "\t".join(SplicingEffect._fields))
self.mTranscriptVariants = {}
def getVariantRange(self, snp):
'''return effective range of a variant.
The effective range is a single base in case of a SNP.
It is two bases if it is an insertion in case it is a
coding SNP.
Deletions are as large as the deletion.
'''
contig = snp.chromosome
lcontig = self.mFasta.getLength(contig)
reference_base = snp.reference_base
start, end = lcontig, 0
# process according to variant type
# indels need to be treated differently from SNPs as
# they have larger effects
if reference_base == "*":
variants = snp.genotype.split("/")
for variant in variants:
if variant[0] == "*":
continue
elif variant[0] == "+":
start = min(start, snp.pos)
end = max(end, snp.pos + 2)
elif variant[0] == "-":
# deletions are after the base denoted by snp.pos
start = min(start, snp.pos + 1)
# pos + 1 + len(var) - 1 = pos + len(var)
end = max(end, snp.pos + len(variant))
else:
raise ValueError("unknown variant sign '%s'" % variant[0])
else:
# a single base SNP
start = min(start, snp.pos)
end = max(end, snp.pos + 1)
start, end = max(0, start), min(end, lcontig)
if start == end:
return None, None
else:
return start, end
def getSequence(self, snp, r, variant):
'''return sequence of snp taking into account strandedness of transcript.'''
contig = snp.chromosome
# collect sequences (resolving strandedness)
reference_base = snp.reference_base
if reference_base != "*":
variant_bases = Genomics.resolveAmbiguousNA(variant.sequence)
assert len(variant_bases) == 1
else:
variant_bases = []
variant_seq = variant.sequence
if not Genomics.IsPositiveStrand(r.strand):
variant_seq = Genomics.complement(variant_seq)
variant_bases = [
Genomics.complement(base) for base in variant_bases]
reference_base = Genomics.complement(reference_base)
return reference_base, variant_seq, variant_bases
def collectSplicingEffects(self, snp, r, variant, reference_base, variant_seq, variant_bases):
'''compute effects of a variant on a transcript.
The effects are independent of any other variants.
return a list of splicing effects.
'''
intron_effects, intron_changes = [], []
# collect splicing effects only
if r.nc_start is None:
return intron_effects, intron_changes
contig = snp.chromosome
lvariant = len(variant_seq)
intron_seq = self.mFasta.getSequence(
contig, r.strand, r.intron_start, r.intron_end).upper()
is_frameshift = len(intron_seq) < self.mMinIntronSize
intron_name, intron_seq5, intron_seq3 = Genomics.GetIntronType(
intron_seq)
variant_introns = []
if (r.nc_start - r.intron_start) >= len(intron_seq5) and (r.intron_end - r.nc_end) >= len(intron_seq3):
# intronic variant - ignore if not actually overlapping with splice
# site
pass
else:
E.debug("cds=%s, variant=%s" % (str(r), str(snp)))
variant_intron_seq = list(intron_seq)
x, y = r.nc_start - r.intron_start, r.nc_end - r.intron_start
if variant.code == "=":
# add SNP
assert y - x == 1, "expect only single base substitutions"
if intron_seq[x:y] != reference_base:
raise ValueError("expected=%s, got=%s:%s:%s, snp=%s, cds=%s" %
(reference_base,
intron_seq[x - 3:x],
intron_seq[x:y],
intron_seq[y:y + 3],
str(snp), str(r)))
# record multiple substitutions
for base in variant_bases:
if base != reference_base:
variant_intron_seq[x:y] = base
variant_introns.append("".join(variant_intron_seq))
intron_effects.append(SpliceEffect._make((r.intron_id,
reference_base,
base)))
elif variant.code == "+":
# add insertion
# If the insertion is at an intron/exon boundary
# y -x = 1. In this case attribute this to a
# coding sequence change and ignore
if y - x == 2:
# python inserts before the index
variant_intron_seq[y:y] = list(variant_seq)
variant_introns.append("".join(variant_intron_seq))
intron_effects.append(SpliceEffect._make((r.intron_id,
"",
variant_seq)))
else:
if y - x != 1:
raise ValueError(
"expected an insert of length 1 or 2, got %i for %s" % (y - x, str(snp)))
elif variant.code == "-":
# add deletion
if x == 0 and y == r.intron_end - r.intron_start:
# deletion covers full length of intron
if r.intron_id < r.exon_id:
# truncate from start if intron preceding exon
xx, yy = 0, y - x
else:
# truncate from end if intron succceding exon
xx, yy = lvariant - (y - x), lvariant
elif x == 0:
# deletion at 3' end of intron: truncate from the end
xx, yy = lvariant - (y - x), lvariant
else:
xx, yy = 0, y - x
if intron_seq[x:y] != variant_seq[xx:yy]:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, %i:%i, snp=%s, cds=%s" %
(variant_seq[xx:yy],
intron_seq[x - 3:x],
intron_seq[x:y],
intron_seq[y:y + 3],
x, y,
xx, yy,
str(snp), str(r)))
intron_effects.append(SpliceEffect._make((r.intron_id,
variant_intron_seq[
x:y],
"")))
del variant_intron_seq[x:y]
variant_introns.append("".join(variant_intron_seq))
for variant_intron_seq in variant_introns:
variant_intron_name, variant_intron_seq5, variant_intron_seq3 = Genomics.GetIntronType(
variant_intron_seq)
# if intron is a frameshift, the full intron seq is returned
#if is_frameshift: reference_seq, variant_seq = intron_seq, variant_inseq
intron_changes.append(SpliceChange._make((r.exon_id - 1,
is_frameshift,
intron_name, intron_seq5, intron_seq3,
variant_intron_name, variant_intron_seq5, variant_intron_seq3)))
return intron_effects, intron_changes
def collectCodingEffects(self, snp, r, variant, reference_base, variant_seq, variant_bases):
'''compute effects of a variant on a transcript.
The effects are independent of any other variants.
return a list of cds effects
'''
coding_effects = []
# process coding effects, return empty if none
if r.cds_start is None:
return coding_effects
contig = snp.chromosome
lvariant = len(variant_seq)
cds_seq = r.cds_seq.upper()
variant_cds_seq = list(cds_seq)
x, y = r.cds_seq_start, r.cds_seq_end
if len(cds_seq) % 3 != 0:
raise ValueError("expected codon sequence, got=%s (%i), %s:%s:%s, %i:%i, snp=%s, cds=%s" %
(cds_seq,
len(cds_seq),
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
str(snp), str(r)))
if variant.code == "=":
# process substitution
assert y - x == 1, "expect only single base substitutions"
if cds_seq[x:y] != reference_base:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, snp=%s, cds=%s" %
(reference_base,
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
str(snp), str(r)))
# record multiple substitutions
for base in variant_bases:
if base != reference_base:
variant_cds_seq[x] = base
coding_effects.append(CdsEffect._make((r.exon_id,
reference_base,
base,
cds_seq,
"".join(
variant_cds_seq),
)))
elif variant.code == "+":
# add insertion - python inserts before index
variant_cds_seq[y:y] = variant_seq
coding_effects.append(CdsEffect._make((r.exon_id,
"",
variant_seq,
cds_seq,
"".join(variant_cds_seq))))
elif variant.code == "-":
# add deletion
if r.exon_skipping:
xx, yy = r.exon_start - r.nc_start, lvariant - \
(r.nc_end - r.exon_end)
elif r.nc_start is not None:
# deletion at exon boundary
if r.intron_id < r.exon_id:
# deletion at 5' end of exon, take only 3' bases of variant
xx, yy = lvariant - (y - x), lvariant
else:
# deletion at 3' end of exon, take only 5' bases of variant
xx, yy = 0, y - x
# removed the following condition: "and r.nc_start != r.intron_start:"
# deletion at 3' end of intron boundary - delete last bases
# xx, yy = lvariant - (y-x), lvariant
elif r.cds_start == 0:
# deletion at first codon - take only 3' bases of variant
xx, yy = lvariant - (y - x), lvariant
else:
# deletion after - delete last bases
xx, yy = 0, y - x
if cds_seq[x:y] != variant_seq[xx:yy]:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, %i:%i, snp=%s, cds=%s" %
(variant_seq[xx:yy],
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
xx, yy,
str(snp), str(r)))
del variant_cds_seq[x:y]
coding_effects.append(CdsEffect._make((r.exon_id,
cds_seq[x:y],
"",
cds_seq,
"".join(variant_cds_seq))))
return coding_effects
def update(self, snp):
'''update with snp.'''
# get effective range of snp
snp_start, snp_end = self.getVariantRange(snp)
# ignore snps that are out-of-range
if snp_start is None:
return
contig = snp.chromosome
transcripts = list(
self.mTranscripts.get(snp.chromosome, snp_start, snp_end))
if not transcripts:
return
reference_base = snp.reference_base
# collect all variants at this position
# indels and deletions might effect more than this
# position
variants_to_test = []
variant_types = []
is_homozygous = True
if reference_base == "*":
variants = snp.genotype.split("/")
codes = [x[0] for x in variants]
# variant is hetorozygous if wildtype is present, codes/sequences of
# variants are not identical.
if ("*" in codes) or (variants[0] != variants[1]):
is_homozygous = False
# note that I found an inconsistency between the genotype field and the second-allele field
# genotype='-GGG/-GGG', first_allelle='-GGG', second_allele='-GGGG'
# In other cases it is correct, even with longer deletions.
for variant in set(variants):
if variant[0] == "*":
variant_types.append("W")
elif variant[0] == "+":
variant_types.append("I")
# insertions affect the base before and after the insertion
variants_to_test.append(
Variant._make((variant[0], variant[1:], snp.pos, snp.pos + 1)))
elif variant[0] == "-":
variant_types.append("D")
# deletions are after the base denoted by snp.pos
start = snp.pos + 1
# pos + 1 + len(var) - 1 = pos + len(var)
end = snp.pos + len(variant)
variants_to_test.append(
Variant._make((variant[0], variant[1:], start, end)))
else:
if snp.genotype in 'ACGTacgt':
# homozygous substitution
variant_types.append("O")
else:
# heterozygous substitution
variant_types.append("E")
is_homozygous = False
for base in Genomics.resolveAmbiguousNA(snp.genotype).upper():
if base == snp.reference_base:
continue
variants_to_test.append(
Variant._make(("=", base, snp.pos, snp.pos + 1)))
self.mVariantTypes = variant_types
E.debug("snp: %s:%i variants_to_test=%i, transcripts=%i, is_homozygous=%s" %
(snp.chromosome, snp.pos,
len(variants_to_test), len(transcripts), str(is_homozygous)))
counts = E.Counter()
# intersect all transcripts in the gene with the possible substitutions
for transcript_start, transcript_end, exons in transcripts:
transcript_id = exons[0].transcript_id
all_splice_changes, all_splice_effects, all_cds_effects = [
], [], []
for variant in variants_to_test:
E.debug("snp: %s:%i variant=%i:%i:%s:%s, transcript=%s" % (snp.chromosome, snp.pos,
variant.start,
variant.end,
variant.code,
variant.sequence,
transcript_id))
r = getCDSPosition(exons,
variant.start, variant.end,
self.mFasta)
if not r:
continue
reference_base, variant_seq, variant_bases = self.getSequence(
snp, r, variant)
# assert variant_seq.lower() in r.cds_seq.lower(), \
# "variant sequence %s not in cds seq %s: %s" % (variant_seq, r.cds_seq, str(r))
cds_effects = self.collectCodingEffects(snp, r, variant,
reference_base, variant_seq, variant_bases)
splice_effects, splice_changes = self.collectSplicingEffects(snp, r, variant,
reference_base, variant_seq, variant_bases)
if len(splice_effects) + len(cds_effects) == 0:
counts.no_effect += 1
continue
all_splice_effects.extend(splice_effects)
all_cds_effects.extend(cds_effects)
all_splice_changes.extend(splice_changes)
if all_splice_changes:
self.outputSpliceEffects(
snp, exons, variant, all_splice_changes, r)
if all_cds_effects:
self.outputCDSEffects(snp, exons, variant, all_cds_effects, r)
if len(all_splice_effects) + len(all_cds_effects) == 0:
continue
self.updateVariantTranscripts(transcript_id, snp,
exons, variant,
all_splice_effects,
all_cds_effects,
r, is_homozygous)
def updateVariantTranscripts(self, transcript_id, snp, exons, variant, splice_effects, cds_effects, r, is_homozygous):
'''collect variation for each transcript.
'''
if transcript_id not in self.mTranscriptVariants:
self.mTranscriptVariants[
transcript_id] = TranscriptVariant._make(([], []))
v = self.mTranscriptVariants[transcript_id]
for e in cds_effects:
# splice variants cause all residues after a modified splice site
# to be deleted
v.cds_variants.append(
CdsVariant._make((transcript_id,
r.cds_start, r.cds_end,
variant.code,
e.orig_seq, e.variant_seq,
is_homozygous)))
for e in splice_effects:
# for splice effects save the full snps to sort out the intron sequence later.
# due to deletions, etc, the resolving might be difficult.
v.splice_variants.append(
SpliceVariant._make((transcript_id,
e.exon_id,
r.nc_start - r.intron_start, r.nc_end -
r.intron_start,
variant.code,
e.orig_seq, e.variant_seq,
is_homozygous)))
def getSpliceCode(self, splice_name, new_splice_name):
'''assign one-letter code to a splice-signal change.'''
if splice_name == "unknown" and new_splice_name == "unknown":
# unknown splice signal
code = "U"
elif new_splice_name == "unknown":
# disrupted splice site
code = "D"
elif splice_name == "unknown":
# newly created splice site
code = "C"
elif splice_name == new_splice_name:
# synonymous change
code = "S"
elif splice_name != new_splice_name:
# non-synonymous change
code = "N"
return code
def outputSpliceEffects(self, snp, exons, variant, splice_effects, r):
'''output effects of variants affecting splice sites.'''
for e in splice_effects:
self.mOutfileIntron.write("%s\n" % "\t".join(
(exons[0].transcript_id,
snp.chromosome,
"%i" % snp.pos,
",".join(self.mVariantTypes),
variant.code,
variant.sequence,
"%i" % e.exon_id,
"%i" % len(exons),
self.getSpliceCode(e.orig_name, e.variant_name),
str(e.orig_name),
e.orig_seq5,
e.orig_seq3,
str(e.variant_name),
e.variant_seq5,
e.variant_seq3,
"%i" % r.intron_start,
"%i" % r.intron_end,
r.strand,
"%i" % r.nc_start,
"%i" % r.nc_end,
)))
def getSubstitutionCode(self, original_codons, variant_codons):
'''assign one-letter code codon change.
'''
if variant_codons == "!":
# variant creates a frameshift
code = "F"
elif original_codons == variant_codons:
# a synonymous substitution
code = "S"
elif "X" in variant_codons:
# variant creates a stop codon
code = "X"
elif "U" in variant_codons:
# variant creates a stop codon - that might a selenocysteine
code = "U"
elif original_codons in variant_codons:
# a synonymous insertion
code = "I"
elif len(variant_codons) == 0 or variant_codons in original_codons:
# a synonymous deletion
code = "D"
else:
# a non-synonymous variant (substition or indel)
# removing the original codon and replacing it with others
code = "N"
return code
def outputCDSEffects(self, snp, exons, variant, cds_effects, r):
cds_len = sum([x.end - x.start for x in exons])
is_seleno = exons[0].transcript_id in self.mSeleno
for e in cds_effects:
assert len(e.codon_orig_seq) % 3 == 0
assert e.codon_orig_seq != e.codon_variant_seq
orig_codons = Genomics.translate(e.codon_orig_seq,
is_seleno=is_seleno)
if len(e.codon_variant_seq) % 3 == 0:
variant_codons = Genomics.translate(e.codon_variant_seq,
is_seleno=is_seleno)
else:
variant_codons = "!"
self.mOutfileCds.write("%s\n" % "\t".join(
(exons[0].transcript_id,
snp.chromosome,
"%i" % snp.pos,
snp.reference_base,
",".join(self.mVariantTypes),
variant.code,
variant.sequence,
"%i" % e.exon_id,
"%i" % len(exons),
self.getSubstitutionCode(orig_codons, variant_codons),
str(e.orig_seq),
str(e.codon_orig_seq),
orig_codons,
str(e.variant_seq),
str(e.codon_variant_seq),
variant_codons,
"%i" % r.cds_phase,
"%i" % r.cds_start,
"%i" % r.cds_end,
"%i" % cds_len)))
def buildCDSVariantsPerPosition(self, transcript_id, cds_variants, cds_len):
'''count the number of variants.
'''
variants_per_position = numpy.zeros(cds_len)
ncds_variants = len(cds_variants)
for v in cds_variants:
assert v.cds_end <= cds_len
variants_per_position[v.cds_start:v.cds_end] += 1
return variants_per_position
def buildIntronsVariantsPerPosition(self, transcript_id, variants, intron_seqs):
'''count the number of within introns
(variants have already been filtered to only include those that
affect splicing).
'''
s = self.mSize
lengths = [len(x) for x in intron_seqs]
# only count 2 * s positions within intron
variants_per_position = numpy.zeros(2 * s * len(lengths))
nvar = len(variants)
for v in variants:
offset = v.intron_id * 2 * s
l = lengths[v.intron_id]
start, end = v.nc_start, v.nc_end
if start < s:
assert end < s, "variant (%i) larger than mSize (%i)" % (
end, s)
elif l - end < s:
assert l - \
start < s, "variant (%i) larger than mSize (%i)" % (
l - start, s)
offset += s
start, end = l - end, l - start
else:
raise ValueError("count out of range")
variants_per_position[offset + start:offset + end] += 1
return variants_per_position
def getGenotype(self, variants, variants_per_position, counts):
'''compute the genotype and number of variants.
*variants_per_position* is a vector of variants affecting a position.
returns a genotype and the number of variants.
'''
max_variants_per_position = max(variants_per_position)
nvar = len(variants)
homo = [x.is_homozygous for x in variants]
nhomo = len([x for x in homo if x])
nhetero = len(homo) - nhomo
if nhomo == nvar and max_variants_per_position == 1:
# all are homozygous, one variant only
genotype = "O"
counts.is_homozygous += 1
counts.is_resolvable += 1
nvariants = 1
elif nhomo == 0 and nvar == 1 and max_variants_per_position == 1:
# one heterozygous position, rest is wild type
genotype = "W"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 1
elif nhomo == nvar - 1 and max_variants_per_position == 1:
# one heterozygous allowed if the rest are homozygous
genotype = "E"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 2
elif nvar == 1 and max_variants_per_position == 2:
# if there is only one heterozygous variant, which does not include
# the wild type
genotype = "E"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 2
elif nhetero == nvar and max_variants_per_position == 1:
# if all are heterozygous and one allele is always the wild type
# resolve towards one allele, though it might be ambiguous
genotype = "V"
counts.is_heterozygous += 1
counts.is_ambiguous += 1
nvariants = 1
elif max_variants_per_position == 1:
# if there is only one variant at each position but more than two
# heterozygous variants in total
# resolve towards two alleles
genotype = "v"
counts.is_heterozygous += 1
counts.is_ambiguous += 1
nvariants = 2
else:
genotype = "M"
counts.is_mixture += 1
counts.is_unresolvable += 1
nvariants = 2
return genotype, nvariants, max_variants_per_position
def buildCDSVariants(self,
transcript_id,
cds_variants,
reference_seq_na,
offset,
nvariants):
'''build variants for the coding sequence.
offset: offset to correct for starting frame != 0
'''
variant_cds_seqs = []
# the following code works with two variants at most
assert 0 < nvariants <= 2, "expected 1 or 2 variants, got %i" % nvariants
for x in range(nvariants):
variant_cds_seqs.append(list(reference_seq_na))
n = 0
for v in cds_variants:
# ignore variants at incomplete codons
if v.cds_start + offset < 0:
E.warn("skipping variant in %s in first out-frame codon: %s." %
(transcript_id, str(v)))
continue
if v.is_homozygous:
toupdate = list(range(nvariants))
else:
toupdate = (0,)
if v.code == "=":
assert len(v.variant_seq) == 1
assert reference_seq_na[v.cds_start + offset] == v.reference_seq.lower(), "transcript %s: base mismatch: %s != %s at %i, %s" %\
(transcript_id, reference_seq_na[
v.cds_start + offset], v.reference_seq.lower(), v.cds_start, str(v))
for x in toupdate:
variant_cds_seqs[x][v.cds_start + offset] = v.variant_seq
elif v.code == "+":
# indels are done without disrupting the frame
# prepend.
for x in toupdate:
variant_cds_seqs[x][
v.cds_start + offset] = v.variant_seq + variant_cds_seqs[x][v.cds_start + offset]
elif v.code == "-":
# indels are done without disrupting the frame
for x in toupdate:
for y in range(v.cds_start, v.cds_end):
variant_cds_seqs[x][y + offset] = ""
n += 1
if E.global_options.loglevel >= 10:
for x in range(nvariants):
Genomics.printPrettyAlignment(reference_seq_na,
variant_cds_seqs[x])
return variant_cds_seqs
def buildIntronVariants(self, transcript_id, splice_variants,
reference_seqs_na, nvariants):
'''build all intron variants.
Returns a list of variants. Each variant is a list of introns. Introns that are unchanged
are None.
The first entry in the list is the wildtype.
returns a list of variants.
'''
variant_intron_seqs = []
# the following code works with one or two variants
assert 0 < nvariants <= 2, "expected 1 or 2 variants, got %i" % nvariants
nintrons = len(reference_seqs_na)
for x in range(nvariants):
variant_intron_seqs.append([None for y in reference_seqs_na])
n = 0
for v in splice_variants:
E.debug("transcript_id=%s: splice=%s" % (transcript_id, str(v)))
if v.is_homozygous:
toupdate = list(range(nvariants))
else:
toupdate = (0,)
intron_id = v.intron_id
assert 0 <= intron_id < len(
reference_seqs_na), "intron id `%i` out of range" % intron_id
# instantiate intron sequence
for x in toupdate:
if variant_intron_seqs[x][intron_id] is None:
variant_intron_seqs[x][intron_id] = list(
reference_seqs_na[intron_id])
if v.code == "=":
assert len(v.variant_seq) == 1
assert reference_seqs_na[intron_id][v.nc_start] == v.reference_seq.lower(), \
"transcript %s: base mismatch: %s != %s at %i:%i" %\
(transcript_id, reference_seqs_na[v.nc_start], v.reference_seq.lower(),
v.intron_id, v.nc_start)
for x in toupdate:
variant_intron_seqs[x][intron_id][
v.nc_start] = v.variant_seq
elif v.code == "+":
# indels are done without disrupting the frame
# prepend to second residue
assert (v.nc_end - v.nc_start) == 2
for x in toupdate:
variant_intron_seqs[x][intron_id][v.nc_end] = v.variant_seq + \
variant_intron_seqs[x][intron_id][v.nc_end]
elif v.code == "-":
# indels are done without disrupting the frame
for x in toupdate:
for y in range(v.nc_start, v.nc_end):
variant_intron_seqs[x][intron_id][y] = ""
n += 1
return variant_intron_seqs
def countEffectsOnSplicing(self, variant_intron_seqs, reference_intron_seqs, min_intron_size=5):
'''collect all effects per intron
return a count for each intron.
'''
nintrons = len(reference_intron_seqs)
ncorrected_frames, nsynonymous, nnonsynonymous, ncanonical = 0, 0, 0, 0
ndisrupted, nunknown, nunchanged, nnovel = 0, 0, 0, 0
ncorrected_frames, nuncorrected_frames = 0, 0
nframeshifts, ninserted_codons, nunchanged_frames = 0, 0, 0
nnoncanonical = 0
codes = []
last_exon = nintrons + 1
for intron_id, reference_seq in enumerate(reference_intron_seqs):
reference_name, reference_seq5, reference_seq3 = Genomics.GetIntronType(
reference_seq)
e = 0
variant_seq = variant_intron_seqs[intron_id]
# process frameshift introns
if len(reference_seq) < min_intron_size:
nframeshifts += 1
if variant_seq is None:
variant_name, variant_seq5, variant_seq3 = reference_name, reference_seq5, reference_seq3
nunchanged_frames += 1
codes.append(".")
continue
variant_seq = "".join(variant_seq)
# there might be both sequences of mod 3 and not
fullseq = "".join(variant_seq)
if len(variant_seq) % 3 == 0:
# a fixed frame shift
ncorrected_frames += 1
# note that the inserted codon sequence might contian stops
# needs to be tested with the other exons as it might not be
# in frame.
code = "F"
ninserted_codons += len(variant_seq) // 3
else:
code = "P"
nuncorrected_frames += 1
# process real introns
else:
if reference_name != "unknown":
ncanonical += 1
else:
nnoncanonical += 1
if variant_seq is None:
variant_name, variant_seq5, variant_seq3 = reference_name, reference_seq5, reference_seq3
nunchanged += 1
codes.append(".")
continue
variant_seq = "".join(variant_seq)
variant_name, variant_seq5, variant_seq3 = Genomics.GetIntronType(
variant_seq)
code = self.getSpliceCode(reference_name, variant_name)
if code == "D":
last_exon = min(last_exon, intron_id)
ndisrupted += 1
elif code == "C":
nnovel += 1
elif code == "N":
nnonsynonymous += 1
elif code == "S":
nsynonymous += 1
elif code == "U":
nunknown += 1
codes.append(code)
return SplicingEffect._make((nintrons,
ncanonical,
nframeshifts,
nnoncanonical,
nunchanged_frames,
ncorrected_frames,
nuncorrected_frames,
nunchanged,
nsynonymous,
nnonsynonymous,
ndisrupted,
nnovel,
nunknown,
ninserted_codons,
"".join(codes),
last_exon))
def getTruncatedCodons(self, is_homozygous, stops, ncodons):
'''return codons that are truncated due to stop codons.
Note that if two variants are present and there is
a homozygous variant causing a stop codon, both variants
will have the same stop codon registered automatically.
return for each variant.
'''
if len(stops) == 0:
return 0, 0
# one stop - one variant
if len(stops) == 1:
# if homozygous: both allelles have the stop
if is_homozygous:
stop_min = stop_max = ncodons - stops[0]
else: # wildtype still present
stop_min, stop_max = 0, ncodons - stops[0]
else:
stop_min, stop_max = ncodons - max(stops), ncodons - min(stops)
return max(0, stop_min), max(0, stop_max)
def fixCDSTermini(self, variant_cds_seqs, contig, strand, start, end):
'''if the first codon in a sequence has been deleted, add
sequence from the UTR.
Not implemented yet - needs to take into account indels in
the UTR as well.
'''
return variant_cds_seqs
for vairant_cds_seq in variant_cds_seqs:
x = 0
# find first base that is not deleted
while x < len(variant_cds_seq) and variant_cds_seq[x] == "":
x += 1
# note: to be correct, this should take into account indels as
# well.
extra_seq = self.mFasta.getSequence(
contig, strand, start - x, start)
for xx in range(0, xx):
pass
def writeTable(self, outfile):
'''output summary for each transcript.
Output three tables;
1. mOutfileTranscripts: translation information
2. mOutfileSplicing: splicing
3. mOutfile: counts
'''
cds_counts = E.Counter()
splice_counts = E.Counter()
all_counts = E.Counter()
# TODO: the current code is not consistent when it comes
# to counting premature stop codons as it also includes
# the wild type as variant 0.
for transcript_id, exons in self.mExons.items():
###################################################
###################################################
###################################################
# sort out exons and get some chromosomal coordinates
exons = self.mExons[transcript_id]
exons.sort(key=lambda x: x.start)
cds_len = sum([x.end - x.start for x in exons])
ncodons = cds_len // 3
contig = exons[0].contig
lcontig = self.mFasta.getLength(contig)
strand = exons[0].strand
is_positive_strand = Genomics.IsPositiveStrand(strand)
# obtain cds sequences
reference_seq_na = GTF.toSequence(exons, self.mFasta).lower()
# obtain intron sequences
intron_intervals = GTF.toIntronIntervals(exons)
if not is_positive_strand:
intron_intervals = [(lcontig - end, lcontig - start)
for start, end in intron_intervals]
intron_intervals.reverse()
intron_sequences = [self.mFasta.getSequence(
contig, strand, x[0], x[1]).lower() for x in intron_intervals]
nintrons = len(intron_intervals)
is_seleno = transcript_id in self.mSeleno
# result variables - set to wildtype
all_genotype, all_nalleles, all_max_variants = "", 0, 0
cds_genotype, cds_nalleles, cds_max_variants = "", 0, 0
splice_genotype, splice_nalleles, splice_max_variants = "", 0, 0
cds_nvariant_positions, splice_nvariant_positions = 0, 0
variant_intron_seqs, splice_variants_per_position = [], []
variant_cds_seqs, cds_variants_per_position = [], []
exon2cds = []
if is_positive_strand:
frame = int(exons[0].frame)
cds_pos = frame
for x in exons:
exon2cds.append(cds_pos)
cds_pos += x.end - x.start
else:
frame = int(exons[-1].frame)
cds_pos = frame
for x in exons[::-1]:
exon2cds.append(cds_pos)
cds_pos += x.end - x.start
last_exon_start = exon2cds[-1]
exon2cds.append(cds_len)
if transcript_id in self.mTranscriptVariants:
variants = self.mTranscriptVariants[transcript_id]
E.debug("processing %s with %i cds effects and %i splice effects started" %
(transcript_id, len(variants.cds_variants), len(variants.splice_variants)))
# we should have some variants
assert len(variants.cds_variants) + \
len(variants.splice_variants) > 0
# correct for frame at start - truncate the reference_seq_na
if frame != 0:
E.debug("transcript_id %s - correcting frame %i" %
(transcript_id, frame))
reference_seq_na = reference_seq_na[frame:]
# all coordinates need to modified by this amount
offset = -frame
else:
offset = 0
reference_seq_aa = Genomics.translate(reference_seq_na,
is_seleno=is_seleno)
cds_nvariant_positions = len(variants.cds_variants)
splice_nvariant_positions = len(variants.splice_variants)
###################################################
###################################################
###################################################
# build coding sequence variants
if len(variants.cds_variants) > 0:
###################################################
###################################################
###################################################
# decide what variants to build
# 1. homozygous: 1 variant per position, all also flagged as homozygous
# 2. heterozygous + wildtype: only 1 variant per position, all flagged as heterozygous
# 3. heterozygous: 2 variants per position, but only if there is only one position modified
# 4: mixture: rest
cds_variants_per_position = self.buildCDSVariantsPerPosition(
transcript_id,
variants.cds_variants,
cds_len)
cds_genotype, cds_nalleles, cds_max_variants = self.getGenotype(
variants.cds_variants,
cds_variants_per_position,
cds_counts)
variant_cds_seqs = self.buildCDSVariants(transcript_id,
variants.cds_variants,
reference_seq_na,
offset,
cds_nalleles)
###################################################
###################################################
###################################################
# build intron variants
###################################################
###################################################
###################################################
# collect all intron sequences
if len(variants.splice_variants) > 0:
###################################################
###################################################
###################################################
# collect genotype and variants to build
splice_variants_per_position = self.buildIntronsVariantsPerPosition(
transcript_id,
variants.splice_variants,
intron_sequences)
splice_genotype, splice_nalleles, splice_max_variants = self.getGenotype(
variants.splice_variants,
splice_variants_per_position,
splice_counts)
variant_intron_seqs = self.buildIntronVariants(
transcript_id,
variants.splice_variants,
intron_sequences,
splice_nalleles)
###################################################
###################################################
###################################################
# collect overall genotype
all_genotype, all_nalleles, all_max_variants = self.getGenotype(
variants.cds_variants + variants.splice_variants,
numpy.concatenate(
(cds_variants_per_position, splice_variants_per_position)),
all_counts)
###################################################
###################################################
###################################################
# add the wild type at top of both cds and intron variants
#
# This is necessary so that stop codons originally present
# in the sequence will be taken into account.
#
# Note that this invalidates the cds_stop_min below.
#
# A better way would be to merge variants and only
# add the wild type if there is only variant allele.
#
# Then, treat the wildtype separately to get numbers for
# for the wildtype.
if len(variant_cds_seqs) == 0:
variant_cds_seqs = [list(reference_seq_na),
list(reference_seq_na)]
elif len(variant_cds_seqs) == 1:
if cds_genotype == "O":
# is homozygous - duplicate allele
variant_cds_seqs.append(variant_cds_seqs[0])
else:
# add wildtype
variant_cds_seqs[0:0] = [list(reference_seq_na), ]
if len(variant_intron_seqs) == 0:
variant_intron_seqs = [[None for x in range(nintrons)],
[None for x in range(nintrons)]]
elif len(variant_intron_seqs) == 1:
if splice_genotype == "O":
# is homozygous - duplicate allele
variant_intron_seqs.append(variant_intron_seqs[0])
else:
# add wildtype
variant_intron_seqs[0:0] = [
[None for x in range(nintrons)], ]
assert len(variant_cds_seqs) == 2
assert len(variant_intron_seqs) == 2
###################################################
###################################################
###################################################
# output information on splice/cds variants per transcript
# output also the wild type (variant_id = 0)
###################################################
cds_stops, splice_stops = [], []
for variant_id, variant_seq in enumerate(variant_intron_seqs):
variant_result = self.countEffectsOnSplicing(variant_seq,
intron_sequences)
self.mOutfileSplicing.write("%s\t%i\t%s\n" %
(transcript_id,
variant_id,
"\t".join(map(str, variant_result))))
splice_stops.append(exon2cds[variant_result.last_exon] // 3)
# estimate effect on protein coding sequence for each variant and
# output
for variant_id, variant_seq in enumerate(variant_cds_seqs):
variant_result = countEffectsOnTranscript(variant_seq,
reference_seq_na,
is_seleno=is_seleno)
s = "".join(variant_seq)
self.mOutfileTranscripts.write(
"%s\t%i\t%i\t%s\t%s\t%s\n" %
(transcript_id,
variant_id,
last_exon_start,
"\t".join(map(str, variant_result)),
"".join(s),
Genomics.translate(s, is_seleno=is_seleno),
))
cds_stops.append(variant_result.first_stop)
###################################################
###################################################
###################################################
# compute the shortest transcript variants
# due to splicing and cds changes separately and
# combined.
###################################################
if splice_nalleles > 0:
splice_stop_min, splice_stop_max = \
self.getTruncatedCodons(
splice_genotype == "O", splice_stops, ncodons)
else:
splice_stop_min, splice_stop_max = 0, 0
if cds_nalleles > 0:
cds_stop_min, cds_stop_max = \
self.getTruncatedCodons(
cds_genotype == "O", cds_stops, ncodons)
else:
cds_stop_min, cds_stop_max = 0, 0
# combine stops between cds and slice variants
# the two variants will have the overall maxima
all_stop_min, all_stop_max = (max(splice_stop_min, cds_stop_min),
max(splice_stop_max, cds_stop_max))
###################################################
###################################################
###################################################
# output stats per transcript
###################################################
outfile.write("%s\n" % "\t".join((
transcript_id,
"%i" % cds_len,
"%i" % ncodons,
"%i" % last_exon_start,
"%i" % cds_max_variants,
"%i" % cds_nvariant_positions,
"%s" % cds_genotype,
"%i" % cds_nalleles,
"%i" % cds_stop_min,
"%i" % cds_stop_max,
"%i" % splice_max_variants,
"%i" % splice_nvariant_positions,
"%s" % splice_genotype,
"%i" % splice_nalleles,
"%i" % splice_stop_min,
"%i" % splice_stop_max,
"%i" % all_max_variants,
"%i" % (cds_nvariant_positions + splice_nvariant_positions),
"%s" % all_genotype,
"%i" % all_nalleles,
"%i" % all_stop_min,
"%i" % all_stop_max,
)))
E.debug("processing %s with %i cds effects and %i splice effects finished" %
(transcript_id, cds_nvariant_positions, splice_nvariant_positions))
E.info("cds counts: %s" % (str(cds_counts)))
E.info("splice counts: %s" % (str(splice_counts)))
E.info("combined counts: %s" % (str(all_counts)))
class CounterContigs(Counter):
'''count variants across the genome per chromosome.'''
mHeader = ["genome_%s" % x for x in ("ntranscripts", "nused", "pos")]
def __init__(self, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
# create counter
self.mCountsSNPs = collections.defaultdict(int)
self.mCountsIndels = collections.defaultdict(int)
def update(self, snp):
'''update with snp.'''
if snp.reference_base == "*":
self.mCountsIndels[snp.chromosome] += 1
else:
self.mCountsSNPs[snp.chromosome] += 1
def writeTable(self, outfile):
outfile.write("contig\tsize\tnindels\tnsnps\n")
total_snps, total_indels, total_length = 0, 0, 0
for key in sorted(self.mCountsSNPs.keys()):
total_snps += self.mCountsSNPs[key]
total_indels += self.mCountsIndels[key]
total_length += self.mFasta.getLength(key)
outfile.write("\t".join((key,
"%i" % self.mFasta.getLength(key),
"%i" % self.mCountsIndels[key],
"%i" % self.mCountsSNPs[key])) + "\n")
outfile.write("\t".join(("total",
"%i" % total_length,
"%i" % total_indels,
"%i" % total_snps)) + "\n")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: snp2counts.py 2872 2010-03-03 10:21:13Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-f", "--exons-file", dest="filename_exons", type="string",
help="filename with exon information (gtf formatted file) [default=%default].")
parser.add_option("-s", "--seleno-tsv-file", dest="filename_seleno", type="string",
help="filename of a list of transcript ids that are selenoproteins [default=%default].")
parser.add_option("-c", "--vcf-file", dest="filename_vcf", type="string",
help="vcf file to parse [default=%default].")
parser.add_option("-m", "--module", dest="modules", type="choice", action="append",
choices=(
"gene-counts", "transcript-effects", "contig-counts"),
help="modules to apply [default=%default].")
parser.add_option("-i", "--input-format", dest="input_format", type="choice",
choices=("pileup", "vcf"),
help="input format [default=%default].")
parser.add_option("--vcf-sample", dest="vcf_sample", type="string",
help="sample id in vcf file to analyse [default=%default].")
parser.set_defaults(
genome_file=None,
filename_exons=None,
filename_seleno=None,
filename_vcf=None,
modules=[],
input_format="pileup",
vcf_sample=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
ninput, nskipped, noutput = 0, 0, 0
################################
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
else:
fasta = None
if options.filename_seleno:
seleno = set(
IOTools.readList(IOTools.openFile(options.filename_seleno, "r")))
else:
seleno = {}
# setup iterator
if options.input_format == "pileup":
iterator = pysam.Pileup.iterate(options.stdin)
elif options.input_format == "vcf":
if not options.vcf_sample:
raise ValueError(
"vcf format requires sample id (--vcf-sample) to be set")
if not options.filename_vcf:
raise ValueError(
"reading from vcf requires vcf filename (--filename-vcf) to be set)")
iterator = pysam.Pileup.iterate_from_vcf(
options.filename_vcf, options.vcf_sample)
################################
modules = []
for module in options.modules:
if module == "gene-counts":
if not options.filename_exons:
raise ValueError(
"please supply exon information (--filename-exons)")
modules.append(CounterGenes(options.filename_exons, fasta=fasta))
elif module == "transcript-effects":
if not options.filename_exons:
raise ValueError(
"please supply exon information (--filename-exons)")
modules.append(CounterTranscripts(options.filename_exons, fasta=fasta,
pattern=options.output_filename_pattern,
seleno=seleno))
elif module == "contig-counts":
modules.append(CounterContigs(fasta=fasta))
options.stdout.write("\t".join([x.getHeader() for x in modules]) + "\n")
for snp in iterator:
ninput += 1
# translate chromosome according to fasta
if fasta:
snp = snp._replace(chromosome=fasta.getToken(snp.chromosome))
for module in modules:
module.update(snp)
# if ninput > 1000: break
for module in modules:
module.writeTable(options.stdout)
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_14052 | # coding=utf-8
__author__ = "Dimitrios Karkalousos"
from typing import Union
import torch
from torch import nn
from mridc import ifft2c, complex_mul, complex_conj
from .e2evn import SensitivityModel
from .rim.rim_block import RIMBlock
from ..data.transforms import center_crop_to_smallest
class CIRIM(nn.Module):
"""Cascades of RIM blocks."""
def __init__(
self,
recurrent_layer: str = "IndRNN",
conv_filters=None,
conv_kernels=None,
conv_dilations=None,
conv_bias=None,
recurrent_filters=None,
recurrent_kernels=None,
recurrent_dilations=None,
recurrent_bias=None,
depth: int = 2,
time_steps: int = 8,
conv_dim: int = 2,
loss_fn: Union[nn.Module, str] = "l1",
num_cascades: int = 1,
no_dc: bool = False,
keep_eta: bool = False,
use_sens_net: bool = False,
sens_chans: int = 8,
sens_pools: int = 4,
sens_normalize: bool = True,
sens_mask_type: str = "2D",
fft_type: str = "orthogonal",
output_type: str = "SENSE",
):
"""
Args:
recurrent_layer: Recurrent Layer selected from rnn_cells
conv_filters: Number of filters in the convolutional layers
conv_kernels: Kernel size in the convolutional layers
conv_dilations: Dilation in the convolutional layers
conv_biased: Whether to use bias in the convolutional layers
recurrent_filters: Number of filters in the recurrent layers
recurrent_kernels: Kernel size in the recurrent layers
recurrent_dilations: Dilation in the recurrent layers
recurrent_biased: Whether to use bias in the recurrent layers
depth: Number of layers in the network
time_steps: Number of time steps in the input
conv_dim: Dimension of the input
loss_fn: Loss function to use
num_cascades: Number of cascades
no_dc: Whether to remove the DC component
keep_eta: Whether to keep the eta term
use_sens_net: Whether to use the sensitivity network
sens_chans: Number of channels in the sensitivity network
sens_pools: Number of pools in the sensitivity network
sens_normalize: Whether to normalize the sensitivity network
sens_mask_type: Type of mask to use for the sensitivity network, 1D or 2D
fft_type: Type of FFT to use, data/orthogonal or numpy-like
output_type: Type of output to use, SENSE or RSS
"""
super(CIRIM, self).__init__()
# Initialize the cascades with RIM blocks
if recurrent_bias is None:
recurrent_bias = [True, True, False]
if recurrent_dilations is None:
recurrent_dilations = [1, 1, 0]
if recurrent_kernels is None:
recurrent_kernels = [1, 1, 0]
if recurrent_filters is None:
recurrent_filters = [64, 64, 0]
if conv_bias is None:
conv_bias = [True, True, False]
if conv_dilations is None:
conv_dilations = [1, 2, 1]
if conv_kernels is None:
conv_kernels = [5, 3, 3]
if conv_filters is None:
conv_filters = [64, 64, 2]
self.fft_type = fft_type
self.no_dc = no_dc
self.time_steps = time_steps
self.cascades = nn.ModuleList(
[
RIMBlock(
recurrent_layer=recurrent_layer,
conv_filters=conv_filters,
conv_kernels=conv_kernels,
conv_dilations=conv_dilations,
conv_bias=conv_bias,
recurrent_filters=recurrent_filters,
recurrent_kernels=recurrent_kernels,
recurrent_dilations=recurrent_dilations,
recurrent_bias=recurrent_bias,
depth=depth,
time_steps=self.time_steps,
conv_dim=conv_dim,
no_dc=self.no_dc,
fft_type=self.fft_type,
)
for _ in range(num_cascades)
]
)
# Initialize the sensitivity network if use_sens_net is True
self.use_sens_net = use_sens_net
if self.use_sens_net:
self.sens_net = SensitivityModel(
sens_chans, sens_pools, fft_type=self.fft_type, mask_type=sens_mask_type, normalize=sens_normalize
)
self.loss_fn = loss_fn
# Initialize data consistency term
self.dc_weight = nn.Parameter(torch.ones(1))
# Keep estimation through the cascades if keep_eta is True or re-estimate it if False.
self.keep_eta = keep_eta
# Initialize the output layer
self.output_type = output_type
# TODO: replace print with logger
print("No of parameters: {:,d}".format(self.get_num_params()))
def get_num_params(self):
"""
Get the number of parameters in the model.
Returns:
Number of parameters in the model.
"""
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def forward(
self,
masked_kspace: torch.Tensor,
sense: torch.Tensor,
mask: torch.Tensor,
eta: torch.Tensor = None,
hx: torch.Tensor = None,
target: torch.Tensor = None,
max_value: float = 1.0,
sigma: float = 1.0,
accumulate_loss: bool = False,
) -> torch.Tensor:
"""
Forward pass of the network.
Args:
masked_kspace: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sense: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for hx
target: torch.Tensor, shape [batch_size, n_x, n_y, 2], target data
max_value: float, maximum value of the data
sigma: float, noise level
accumulate_loss: bool, accumulate loss or not
Returns:
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], estimated eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], estimated hx
loss: torch.Tensor, shape [1], loss value
"""
sense = self.sens_net(masked_kspace, mask) if self.use_sens_net and self.sens_net is not None else sense
pred = masked_kspace.clone()
# Accumulate loss over cascades
cascade_time_steps_loss = []
for i, cascade in enumerate(self.cascades):
# Forward pass through cascade
pred, hx = cascade(
pred, masked_kspace, sense, mask, eta, hx, sigma, keep_eta=False if i == 0 else self.keep_eta
)
# Accumulate loss over time steps
if accumulate_loss:
time_steps_loss = []
for p in pred:
if self.no_dc is False and self.keep_eta is False:
p = ifft2c(p, fft_type=self.fft_type)
if self.output_type == "SENSE":
p = complex_mul(p, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
p = torch.sqrt((p ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
output = torch.view_as_complex(p)
target, output = center_crop_to_smallest(target, output)
loss = (
self.loss_fn(output.unsqueeze(1), target.unsqueeze(1), data_range=max_value) # type: ignore
if "ssim" in str(self.loss_fn).lower()
else self.loss_fn(output, target) # type: ignore
)
time_steps_loss.append(loss)
# Add weighted loss for each cascade. Loss is weighted for total number of time-steps on range 0-1.
_loss = [
x * torch.logspace(-1, 0, steps=self.time_steps).to(time_steps_loss[0]) for x in time_steps_loss
]
# Take average of all time-steps loss
cascade_time_steps_loss.append(sum(sum(_loss) / self.time_steps)) # type: ignore
# Take average of all cascades loss
if accumulate_loss:
loss = sum(list(cascade_time_steps_loss)) / len(self.cascades)
yield loss
else:
if isinstance(pred, list):
# Use the prediction of the last time-step.
pred = pred[-1].detach()
if self.no_dc is False and self.keep_eta is False:
pred = ifft2c(pred, fft_type=self.fft_type)
if self.output_type == "SENSE":
pred = complex_mul(pred, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
pred = torch.sqrt((pred ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
pred = torch.view_as_complex(pred)
pred = torch.abs(pred / torch.max(torch.abs(pred)))
return pred
def inference(
self,
masked_kspace: torch.Tensor,
sense: torch.Tensor,
mask: torch.Tensor,
eta: torch.Tensor = None,
hx: torch.Tensor = None,
sigma: float = 1.0,
accumulate_estimates: bool = False,
) -> torch.Tensor:
"""
Inference step of the model.
Args:
masked_kspace: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sense: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for hx
sigma: float, noise level
accumulate_estimates: bool, if True, accumulate estimates for all time-steps
Returns
-------
pred: torch.Tensor, shape [batch_size, n_x, n_y, 2], predicted kspace data
"""
sense = self.sens_net(masked_kspace, mask) if self.use_sens_net and self.sens_net is not None else sense
preds = []
pred = masked_kspace.clone()
for i, cascade in enumerate(self.cascades):
pred, hx = cascade(
pred, masked_kspace, sense, mask, eta, hx, sigma, keep_eta=False if i == 0 else self.keep_eta
)
if self.no_dc is False and self.keep_eta is False:
output = []
for p in pred:
p = ifft2c(p, fft_type=self.fft_type)
if self.output_type == "SENSE":
p = complex_mul(p, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
p = torch.sqrt((p ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
output.append(p)
pred = output
if accumulate_estimates:
preds.append(pred)
pred = pred[-1].detach()
if accumulate_estimates:
yield preds
else:
return torch.view_as_complex(pred)
|
the-stack_0_14053 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
'''This file realize the function of run systembandwidth script.
for example this contain two part first run_script,
second is algorithm, this part is about how to judge the bottlenecks.
This test is using yardstick as a tool to begin test.'''
import os
import time
import uuid
import json
import utils.logger as log
from utils.parser import Parser as conf_parser
import utils.env_prepare.stack_prepare as stack_prepare
import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
import utils.infra_setup.runner.docker_env as docker_env
# --------------------------------------------------
# logging configuration
# --------------------------------------------------
LOG = log.Logger(__name__).getLogger()
test_dict = {
"action": "runTestCase",
"args": {
"opts": {
"task-args": {}
},
"testcase": "netperf_bottlenecks"
}
}
testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
def env_pre(con_dic):
LOG.info("yardstick environment prepare!")
stack_prepare._prepare_env_daemon(True)
def config_to_result(test_config, test_result):
testdata = {}
parser_result = test_result["benchmark"]["data"]
test_result.update(test_config)
test_result.update(parser_result)
test_result["throughput"] = float(test_result["throughput"])
test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"])
test_result["local_cpu_util"] = float(test_result["local_cpu_util"])
test_result["mean_latency"] = float(test_result["mean_latency"])
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
def testcase_parser(out_file="yardstick.out", **parameter_info):
cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
'samples/netperf_bottlenecks.yaml --output-file ' + out_file)
cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"'
LOG.info("yardstick test cmd is: %s" % cmd)
return cmd
def do_test(test_config, Use_Dashboard, context_conf):
yardstick_container = docker_env.yardstick_info['container']
out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
cmd = testcase_parser(out_file=out_file, **test_config)
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
loop_value = 0
while loop_value < 60:
time.sleep(2)
loop_value = loop_value + 1
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
LOG.info("yardstick run success")
break
elif data["status"] == 2:
LOG.error("yardstick error exit")
exit()
save_data = config_to_result(test_config, data['result'][1])
if Use_Dashboard is True:
DashBoard.dashboard_send_data(context_conf, save_data)
return save_data["data_body"]
def run(test_config):
con_dic = test_config["load_manager"]
Use_Dashboard = False
env_pre(None)
if test_config["contexts"]["yardstick_ip"] is None:
con_dic["contexts"]["yardstick_ip"] =\
conf_parser.ip_parser("yardstick_test_ip")
if "dashboard" in test_config["contexts"].keys():
if test_config["contexts"]["dashboard_ip"] is None:
test_config["contexts"]["dashboard_ip"] =\
conf_parser.ip_parser("dashboard")
LOG.info("Create Dashboard data")
Use_Dashboard = True
DashBoard.dashboard_system_bandwidth(test_config["contexts"])
data = {}
rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
data["rx_pkt_sizes"] = rx_pkt_a
data["tx_pkt_sizes"] = tx_pkt_a
con_dic["result_file"] = os.path.dirname(
os.path.abspath(__file__)) + "/test_case/result"
cur_role_result = 1
pre_role_result = 1
pre_reply = {}
data_return = {}
data_max = {}
data_return["throughput"] = 1
for test_x in data["tx_pkt_sizes"]:
data_max["throughput"] = 1
bandwidth_tmp = 1
for test_y in data["rx_pkt_sizes"]:
case_config = {
"tx_msg_size": float(test_x),
"rx_msg_size": float(test_y),
"test_time": con_dic['scenarios']['test_times'],
"pod_info": conf_parser.bottlenecks_config["pod_info"]
}
data_reply = do_test(case_config, Use_Dashboard,
test_config["contexts"])
conf_parser.result_to_file(data_reply, test_config["out_file"])
bandwidth = data_reply["throughput"]
if (data_max["throughput"] < bandwidth):
data_max = data_reply
if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
LOG.info("this group of data has reached top output")
break
else:
pre_reply = data_reply
bandwidth_tmp = bandwidth
cur_role_result = float(pre_reply["throughput"])
if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
LOG.info("The performance increases slowly")
if data_return["throughput"] < data_max["throughput"]:
data_return = data_max
pre_role_result = cur_role_result
LOG.info("Find bottlenecks of this config")
LOG.info("The max data is %d", data_return["throughput"])
return data_return
|
the-stack_0_14054 | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.constants import SANDBOX_MODE
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.base import test_organization
from openprocurement.tender.belowthreshold.tests.contract import (
TenderContractResourceTestMixin,
TenderContractDocumentResourceTestMixin,
)
from openprocurement.tender.limited.tests.base import (
BaseTenderContentWebTest,
test_lots,
test_tender_data,
test_tender_negotiation_data,
test_tender_negotiation_quick_data,
)
from openprocurement.tender.limited.tests.contract_blanks import (
# TenderNegotiationQuickAccelerationTest
create_tender_contract_negotiation_quick,
# TenderNegotiationLot2ContractResourceTest
sign_second_contract,
create_two_contract,
# TenderNegotiationLotContractResourceTest
lot_items,
lot_award_id_change_is_not_allowed,
activate_contract_cancelled_lot,
# TenderNegotiationContractResourceTest
patch_tender_negotiation_contract,
tender_negotiation_contract_signature_date,
items,
# TenderContractResourceTest
create_tender_contract,
patch_tender_contract,
tender_contract_signature_date,
award_id_change_is_not_allowed,
create_tender_contract_document,
patch_tender_contract_document,
put_tender_contract_document,
)
from openprocurement.tender.belowthreshold.tests.contract_blanks import (
patch_tender_contract_value_vat_not_included,
patch_tender_contract_value,
)
class TenderContractResourceTest(BaseTenderContentWebTest, TenderContractResourceTestMixin):
initial_status = "active"
initial_data = test_tender_data
initial_bids = None # test_bids
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
def setUp(self):
super(TenderContractResourceTest, self).setUp()
self.create_award()
test_create_tender_contract = snitch(create_tender_contract)
test_patch_tender_contract = snitch(patch_tender_contract)
test_patch_tender_contract_value = snitch(patch_tender_contract_value)
test_tender_contract_signature_date = snitch(tender_contract_signature_date)
test_award_id_change_is_not_allowed = snitch(award_id_change_is_not_allowed)
class TenderContractVATNotIncludedResourceTest(BaseTenderContentWebTest, TenderContractResourceTestMixin):
initial_status = "active"
initial_data = test_tender_data
initial_bids = None
def create_award(self):
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": False},
}
},
)
self.award_id = response.json["data"]["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
def setUp(self):
super(TenderContractVATNotIncludedResourceTest, self).setUp()
self.create_award()
test_patch_tender_contract_value_vat_not_included = snitch(patch_tender_contract_value_vat_not_included)
class TenderNegotiationContractResourceTest(TenderContractResourceTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
test_patch_tender_contract = snitch(patch_tender_negotiation_contract)
test_patch_tender_contract_value = snitch(patch_tender_contract_value)
test_tender_contract_signature_date = snitch(tender_negotiation_contract_signature_date)
test_items = snitch(items)
class TenderNegotiationContractVATNotIncludedResourceTest(TenderContractVATNotIncludedResourceTest):
initial_data = test_tender_negotiation_data
class TenderNegotiationLotContractResourceTest(TenderNegotiationContractResourceTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"]}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
test_items = snitch(lot_items)
test_award_id_change_is_not_allowed = snitch(lot_award_id_change_is_not_allowed)
test_activate_contract_cancelled_lot = snitch(activate_contract_cancelled_lot)
class TenderNegotiationLot2ContractResourceTest(BaseTenderContentWebTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
def setUp(self):
super(TenderNegotiationLot2ContractResourceTest, self).setUp()
self.create_award()
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot2 = response.json["data"]
self.lot2 = lot2
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}, {"relatedLot": lot2["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award1_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award1_id, self.tender_token),
{"data": {"status": "active"}},
)
# Create another award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot2["id"],
}
},
)
award = response.json["data"]
self.award2_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award2_id, self.tender_token),
{"data": {"status": "active"}},
)
test_sign_second_contract = snitch(sign_second_contract)
test_create_two_contract = snitch(create_two_contract)
class TenderNegotiationQuickContractResourceTest(TenderNegotiationContractResourceTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
class TenderNegotiationQuickLotContractResourceTest(TenderNegotiationLotContractResourceTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
class TenderNegotiationQuickAccelerationTest(BaseTenderContentWebTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
accelerator = "quick,accelerator=172800" # 5 days=432000 sec; 432000/172800=2.5 sec
time_sleep_in_sec = 3 # time which reduced
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"suppliers": [test_organization], "status": "pending"}},
)
award = response.json["data"]
self.award_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{
"data": {
"status": "active",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
def setUp(self):
super(TenderNegotiationQuickAccelerationTest, self).setUp()
if SANDBOX_MODE:
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"procurementMethodDetails": self.accelerator}},
)
self.assertEqual(response.status, "200 OK")
self.create_award()
test_create_tender_contract_negotiation_quick = snitch(create_tender_contract_negotiation_quick)
class TenderNegotiationQuickLotAccelerationTest(TenderNegotiationQuickAccelerationTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
accelerator = "quick,accelerator=172800" # 5 days=432000 sec; 432000/172800=2.5 sec
time_sleep_in_sec = 3 # time which reduced
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
class TenderNegotiationAccelerationTest(TenderNegotiationQuickAccelerationTest):
stand_still_period_days = 10
time_sleep_in_sec = 6
class TenderContractDocumentResourceTest(BaseTenderContentWebTest, TenderContractDocumentResourceTestMixin):
initial_status = "active"
initial_bids = None
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"suppliers": [test_organization], "status": "pending"}},
)
award = response.json["data"]
self.award_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{
"data": {
"status": "active",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
def setUp(self):
super(TenderContractDocumentResourceTest, self).setUp()
self.create_award()
response = self.app.get("/tenders/{}/contracts".format(self.tender_id))
self.contract_id = response.json["data"][0]["id"]
test_create_tender_contract_document = snitch(create_tender_contract_document)
test_patch_tender_contract_document = snitch(patch_tender_contract_document)
test_put_tender_contract_document = snitch(put_tender_contract_document)
class TenderContractNegotiationDocumentResourceTest(TenderContractDocumentResourceTest):
initial_data = test_tender_negotiation_data
class TenderContractNegotiationLotDocumentResourceTest(TenderContractDocumentResourceTest):
initial_data = test_tender_negotiation_data
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
class TenderContractNegotiationQuickDocumentResourceTest(TenderContractNegotiationDocumentResourceTest):
initial_data = test_tender_negotiation_quick_data
class TenderContractNegotiationQuickLotDocumentResourceTest(TenderContractNegotiationLotDocumentResourceTest):
initial_data = test_tender_negotiation_quick_data
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderContractResourceTest))
suite.addTest(unittest.makeSuite(TenderContractDocumentResourceTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
the-stack_0_14055 | """expand content column
Revision ID: 6dd556a95d2b
Revises: 599d269adf7f
Create Date: 2020-10-19 18:21:14.384304
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6dd556a95d2b'
down_revision = '599d269adf7f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=mysql.VARCHAR(collation='utf8_bin', length=200),
type_=sa.String(length=4000, collation='utf8_bin'),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=sa.String(length=4000, collation='utf8_bin'),
type_=mysql.VARCHAR(collation='utf8_bin', length=200),
existing_nullable=True)
# ### end Alembic commands ###
|
the-stack_0_14056 | import csv
import logging
import zipfile
from sqlalchemy.orm import sessionmaker
from opennem.db import db_connect
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class TableRecordSplitter(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "tables" not in item:
logger.error(item)
raise Exception("No tables passed to pipeline")
tables = item["tables"]
table = tables.pop()
records = table["records"]
for record in records:
yield record
class UnzipSingleFilePipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "body_stream" not in item:
return item
rs = item["body_stream"]
content = ""
with zipfile.ZipFile(rs) as zf:
zip_files = zf.namelist()
if len(zip_files) == 1:
content = zf.open(zip_files[0])
return {"file_handle": content, **item}
if len(zip_files) != 1:
raise Exception(
"Zero or more than one file in zip file. Have {}".format(
len(zip_files)
)
)
class ReadStringHandle(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "file_handle" not in item:
return item
fh = item["file_handle"]
content = fh.read()
if type(content) is bytes:
content = content.decode("utf-8")
return {"content": content, **item}
class ExtractCSV(object):
@check_spider_pipeline
def process_item(self, item, spider):
if not item:
logger.error("No item to parse")
return None
if "content" not in item:
logger.error("No content in item to parse")
return item
content = item["content"]
del item["content"]
item["tables"] = {}
table = {"name": None}
content_split = content.splitlines()
datacsv = csv.reader(content_split)
for row in datacsv:
if not row or type(row) is not list or len(row) < 1:
continue
record_type = row[0]
if record_type == "C":
# @TODO csv meta stored in table
if table["name"] is not None:
table_name = table["name"]
if table_name in item["tables"]:
item["tables"][table_name]["records"] += table[
"records"
]
else:
item["tables"][table_name] = table
elif record_type == "I":
if table["name"] is not None:
table_name = table["name"]
if table_name in item["tables"]:
item["tables"][table_name]["records"] += table[
"records"
]
else:
item["tables"][table_name] = table
table = {}
table["name"] = "{}_{}".format(row[1], row[2])
table["fields"] = fields = row[4:]
table["records"] = []
elif record_type == "D":
values = row[4:]
record = dict(zip(table["fields"], values))
table["records"].append(record)
return item
class DatabaseStore(object):
def __init__(self):
engine = db_connect()
self.session = sessionmaker(bind=engine)
|
the-stack_0_14059 | import argparse
import os
from PIL import Image
import numpy as np
import torch
from torchvision.transforms import Compose, Resize, ToTensor, Normalize
# import lung_segmentation.importAndProcess as iap
import importAndProcess as iap
from ..models import model as model
from ..models.unet_models import unet11, unet16
def save_mask(mask, out_dir, filename):
filter = np.asarray(np.argmax(mask, axis=0))
filter = (filter > 0).astype('uint8')
filter = filter*255
filter = np.stack((filter, filter, filter))
pil = Image.fromarray(filter)
pil = pil.save(f"{out_dir}/{filename}")
OUTDIR = '/home/dxtien/dxtien_research/COVID/CXR8_Segmentation'
parser = argparse.ArgumentParser()
parser.add_argument('img_path')
parser.add_argument('-m', '--model', choices=['unet11', 'unet16', 'resnet'], default='unet16')
parser.add_argument('-r', '--resume-from', help='resume from a specific savepoint', required=True)
parser.add_argument('-t', '--input-type', choices=['dicom', 'png'], default='dicom')
parser.add_argument('--non-montgomery', action='store_true', help='toggle this flag if you are working on a non-montgomery dataset')
parser.add_argument('--no-normalize', action='store_true')
args = parser.parse_args()
normalize = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
if args.model == 'resnet':
model = model.segmentNetwork().cuda()
resize_dim = (400, 400)
convert_to = 'L'
elif args.model == 'unet11':
model = unet11(out_filters=3).cuda()
resize_dim = (224, 224)
convert_to = 'RGB'
elif args.model == 'unet16':
model = unet16(out_filters=3).cuda()
resize_dim = (224, 224)
convert_to = 'RGB'
if args.no_normalize:
transforms = Compose([Resize(resize_dim),ToTensor()])
else:
transforms = Compose([Resize(resize_dim),ToTensor(),normalize])
convert_to = 'RGB'
if args.input_type == 'dicom':
dataset = iap.DicomSegment(args.img_path, transforms, convert_to)
elif args.input_type == 'png' and args.non_montgomery:
#dataset = iap.LungTest(args.img_path, transforms, convert_to)
dataset = iap.MyLungTest(args.img_path, transforms, convert_to)
elif args.input_type == 'png':
dataset = iap.lungSegmentDataset(
os.path.join(args.img_path, "CXR_png"),
os.path.join(args.img_path, "ManualMask/leftMask/"),
os.path.join(args.img_path, "ManualMask/rightMask/"),
imagetransform=transforms,
labeltransform=Compose([Resize((224, 224)),ToTensor()]),
convert_to='RGB',
)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(args.resume_from))
#show = iap.visualize(dataset)
with torch.no_grad():
for i, sample in enumerate(dataloader):
img = torch.autograd.Variable(sample['image']).cuda()
mask = model(img)
# if not args.non_montgomery:
# show.ImageWithGround(i,True,True,save=True)
# show.ImageWithMask(i, sample['filename'][0], mask.squeeze().cpu().numpy(), True, True, save=True)
mask_np = mask.squeeze().cpu().numpy()
filename = sample['filename']
filename = filename.split('/')[-1]
filename = filename[:-4]
save_mask(mask_np, OUTDIR, filename=filename+'_mask.png')
|
the-stack_0_14060 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the grad rules of neural network related operations."""
import os
from mindspore.ops.primitive import constexpr
from mindspore.common.tensor import Tensor
from mindspore.ops.operations import nn_ops as nps
from .grad_base import bprop_getters
from .. import functional as F
from .. import operations as P
from ...common import dtype as mstype
from ..composite.multitype_ops.zeros_like_impl import zeros_like
from ..operations import _grad_ops as G
from ..operations import _inner_ops as inner
from ... import context
env_force_bprop_seq = os.getenv("ENV_FORCE_BPROP_SEQ")
@bprop_getters.register(P.BiasAdd)
def get_bprop_bias_add(self):
"""Grad definition for `BiasAdd` operation."""
bias_grad = G.BiasAddGrad(self.data_format)
def bprop(x, w, out, dout):
return dout, bias_grad(dout)
return bprop
@bprop_getters.register(P.Conv2D)
def get_bprop_conv2d(self):
"""Grad definition for `Conv2D` operation."""
input_grad = P.Conv2DBackpropInput(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(dout, w, get_shape(x))
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(dout, x, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3D)
def get_bprop_conv3d(self):
"""Grad definition for `Conv3D` operation."""
input_grad = nps.Conv3DBackpropInput(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(w, dout, get_shape(x))
dw = filter_grad(x, dout, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3DTranspose)
def get_bprop_conv3d_transpose(self):
"""Grad definition for `Conv3DTranspose` operation."""
stride = (self.stride[2], self.stride[3], self.stride[4])
dilation = (self.dilation[2], self.dilation[3], self.dilation[4])
input_grad = nps.Conv3D(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad_list, stride=stride, dilation=dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad_list, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
def bprop(x, w, out, dout):
dx = input_grad(dout, w)
dw = filter_grad(dout, x, F.shape(w))
return dx, dw, zeros_like(out)
return bprop
@bprop_getters.register(inner.ExtractImagePatches)
def get_bprop_extract_image_patches(self):
"""Grad definition for `ExtractImagePatches` operation."""
get_shape = P.Shape()
reshape = P.Reshape()
extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,
strides=self.strides,
rates=self.rates,
padding=self.padding)
concat = P.Concat(axis=-1)
expand_dims = P.ExpandDims()
scatter_nd = P.ScatterNd()
dtype = P.DType()
fill = P.Fill()
slice_op = P.Slice()
transpose = P.Transpose()
cast = P.Cast()
matmul = P.MatMul()
_, _, ksizes_row, ksizes_col = self.ksizes
def bprop(x, out, dout):
x_shape = get_shape(x)
x_batch, x_depth, x_row, x_col = x_shape
x_indices_num = x_row * x_col + 1
x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)
x_idx = reshape(x_idx, (1, 1, x_row, x_col))
x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)
x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))
out_shape = get_shape(out)
_, _, out_row, out_col = out_shape
out_indices_num = out_row * out_col * ksizes_row * ksizes_col
out_idx = F.tuple_to_array(range(out_indices_num))
out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))
idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))
idx_tensor = reshape(idx_tensor, (-1, 2))
sp_shape = (x_indices_num, out_indices_num)
sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)
sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))
grad = transpose(dout, (0, 2, 3, 1))
grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))
grad = transpose(grad, (1, 2, 3, 4, 0, 5))
grad = reshape(grad, (-1, x_batch * x_depth))
jac = matmul(sp_tensor, grad)
dx = reshape(jac, (x_row, x_col, x_batch, x_depth))
dx = transpose(dx, (2, 3, 0, 1))
return (dx,)
return bprop
@bprop_getters.register(P.DepthwiseConv2dNative)
def get_bprop_depthwise_conv2d_native(self):
"""Grad definition for `DepthwiseConv2dNative` operation."""
input_grad = G.DepthwiseConv2dNativeBackpropInput(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
filter_grad = G.DepthwiseConv2dNativeBackpropFilter(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(get_shape(x), w, dout)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, get_shape(w), dout)
return dx, dw
return bprop
@bprop_getters.register(P.MaxPoolWithArgmax)
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
return (dx,)
return bprop
@bprop_getters.register(G.MaxPoolGrad)
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(G.MaxPoolGradGrad)
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(P.MaxPool)
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = maxpool_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.MaxPool3D)
def get_bprop_max_pool3d_grad(self):
"""Grad definition for `MaxPool3D` operation."""
max_pool3d_grad = G.MaxPool3DGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, out, dout):
dx = max_pool3d_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.MaxPool3DGrad)
def get_bprop_max_pool3d_grad_grad(self):
"""Grad definition for `MaxPool3Grad` operation."""
max_pool3d_grad_grad = G.MaxPool3DGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, y, grad, out, dout):
dgrad = max_pool3d_grad_grad(x, y, dout)
return zeros_like(x), zeros_like(y), dgrad
return bprop
@bprop_getters.register(G.MaxPool3DGradGrad)
def get_bprop_max_pool3d_grad_grad_grad(self):
"""Grad definition for `MaxPool3GradGrad` operation."""
max_pool3d_grad = G.MaxPool3DGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, y, grad, out, dout):
dgrad = max_pool3d_grad(x, y, dout)
return zeros_like(x), zeros_like(y), dgrad
return bprop
@bprop_getters.register(P.AvgPool)
def get_bprop_avg_pool_grad(self):
"""Grad definition for `AvgPool` operation."""
avgpool_grad = G.AvgPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = avgpool_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.DropoutGenMask)
def get_bprop_dropout_gen_mask(self):
"""Grad definition for `DropoutGenMask` operation."""
def bprop(shape, keep_prob, out, dout):
return (zeros_like(shape), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.DropoutDoMask)
def get_bprop_dropout_do_mask(self):
"""Grad definition for `DropoutDoMask` operation."""
do_mask = P.DropoutDoMask()
def bprop(x, y, keep_prob, out, dout):
return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.Mish)
def get_bprop_mish(self):
"""Grad definition for `Mish` operation."""
tanh = P.Tanh()
tanh_grad = G.TanhGrad()
softplus = P.Softplus()
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx1 = tanh(softplus(x))
dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)
dx = (dx1 * dout + dx2)
return (dx,)
return bprop
@bprop_getters.register(P.SeLU)
def get_bprop_selu(self):
"""Grad definition for `SeLU` operation."""
scale = 1.0507009873554804934193349852946
elu_grad = G.EluGrad()
def bprop(x, out, dout):
dx = elu_grad(dout, out) * scale
return (dx,)
return bprop
@bprop_getters.register(P.MulNoNan)
def get_bprop_mul_no_nan(self):
"""Grad definition for `MulNoNan` operation."""
mul_no_nan = P.MulNoNan()
reduce_sum = P.ReduceSum()
reshape = P.Reshape()
def bprop(x, y, out, dout):
x_shape = F.shape(x)
y_shape = F.shape(y)
dx = mul_no_nan(dout, y)
dy = mul_no_nan(x, dout)
broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)
if broadcast_x != ():
dx = reshape(reduce_sum(dx, broadcast_x), x_shape)
if broadcast_y != ():
dy = reshape(reduce_sum(dy, broadcast_y), y_shape)
return dx, dy
return bprop
@bprop_getters.register(P.ReLU)
def get_bprop_relu(self):
"""Grad definition for `ReLU` operation."""
input_grad = G.ReluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(G.ReluGrad)
def get_bprop_relu_grad(self):
"""Grad definition for `ReLUGrad` operation."""
input_grad = G.ReluGrad()
def bprop(grad, y, out, dout):
dgrad = input_grad(dout, y)
return dgrad, zeros_like(y)
return bprop
@bprop_getters.register(P.ReLU6)
def get_bprop_relu6(self):
"""Grad definition for `ReLU6` operation."""
input_grad = G.ReLU6Grad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.ReLUV2)
def get_bprop_relu_v2(self):
"""Grad definition for `ReLUV2` operation."""
input_grad = G.ReluGradV2()
def bprop(x, out, dout):
mask = out[1]
dx = input_grad(dout[0], mask)
return (dx,)
return bprop
@bprop_getters.register(P.HSwish)
def get_bprop_hswish(self):
"""Grad definition for `HSwish` operation."""
input_grad = G.HSwishGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.HSigmoid)
def get_bprop_hsigmoid(self):
"""Grad definition for `HSigmoid` operation."""
input_grad = G.HSigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Elu)
def get_bprop_elu(self):
"""Grad definition for `Elu` operation."""
input_grad = G.EluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(P.Sigmoid)
def get_bprop_sigmoid(self):
"""Grad definition for `Sigmoid` operation."""
input_grad = G.SigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.SigmoidGrad)
def get_bprop_sigmoid_grad(self):
"""Grad definition for `SigmoidGrad` operation."""
sigmoid_grad = G.SigmoidGrad()
def bprop(y, grad, out, dout):
dy = dout * grad * (1. - 2 * y)
dgrad = sigmoid_grad(y, dout)
return dy, dgrad
return bprop
@constexpr
def _get_transpose_axis(x_shp, axis):
rank = len(x_shp)
if axis < 0:
axis += rank
reverse_axis = [i for i in range(rank)]
reverse_axis[axis] = rank - 1
reverse_axis[rank - 1] = axis
return tuple(reverse_axis)
@bprop_getters.register(P.Softmax)
def get_bprop_softmax(self):
"""Grad definition for `Softmax` operation."""
sum_func = P.ReduceSum(keep_dims=True)
sub = P.Sub()
mul = P.Mul()
get_shape = P.Shape()
transpose = P.Transpose()
axis = self.axis
if not isinstance(axis, int):
axis = axis[0]
def bprop(x, out, dout):
# dx = (dout - sum(dout * out)) * out
# This formula is correct only when the `axis` is the last dimension.
# In order to support the scenario where the `axis` is other values,
# we transpose the data of the `axis` dimension to the last dimension for calculation,
# and then transpose it back after the calculation.
reverse_axis = _get_transpose_axis(get_shape(x), axis)
out = transpose(out, reverse_axis)
dout = transpose(dout, reverse_axis)
dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))
dx = transpose(dx, reverse_axis)
return (dx,)
return bprop
@bprop_getters.register(P.LogSoftmax)
def get_bprop_log_softmax(self):
"""Grad definition for `LogSoftmax` operation."""
logsoftmax_grad = G.LogSoftmaxGrad(self.axis)
def bprop(x, out, dout):
dx = logsoftmax_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.Softplus)
def get_bprop_softplus(self):
"""Grad definition for `Softplus` operation."""
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx = softplus_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation."""
tanh_grad = G.TanhGrad()
def bprop(x, out, dout):
dx = tanh_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.TanhGrad)
def get_bprop_tanh_grad(self):
"""Grad definition for `TanhGrad` operation."""
tanh_grad = G.TanhGrad()
def bprop(y, grad, out, dout):
dy = dout * -2.0 * grad * y
dgrad = tanh_grad(y, dout)
return dy, dgrad
return bprop
@bprop_getters.register(P.GeLU)
def get_bprop_gelu(self):
"""Grad definition for `GeLU` operation."""
input_grad = G.GeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.Gelu)
def get_bprop_gelu_2(self):
"""Grad definition for `GeLU` operation."""
input_grad = G.GeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.FastGeLU)
def get_bprop_fast_gelu(self):
"""Grad definition for `FastGeLU` operation."""
input_grad = G.FastGeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.FastGelu)
def get_bprop_fast_gelu_2(self):
"""Grad definition for `FastGeLU` operation."""
input_grad = G.FastGeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.InstanceNorm)
def get_bprop_instance_norm(self):
"""Grad definition for `InstanceNorm` operation."""
input_grad = G.InstanceNormGrad(self.epsilon, self.momentum)
def bprop(x, gamma, beta, mean, variance, out, dout):
saved_mean = out[1]
saved_variance = out[2]
out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)
dx = out[0]
dgamma = out[1]
dbeta = out[2]
return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.BatchNorm)
def get_bprop_batch_norm(self):
"""Grad definition for `BatchNorm` operation."""
is_training = self.is_training
input_grad = G.BatchNormGrad(is_training, self.epsilon, self.data_format)
def bprop(x, scale, b, mean, variance, out, dout):
if is_training:
saved_mean = out[3]
saved_variance = out[4]
reserve = out[2]
else:
saved_mean = mean
saved_variance = variance
reserve = out[2]
out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.LayerNorm)
def get_bprop_layer_norm(self):
"""Grad definition for `LayerNorm` operation."""
layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, gamma, beta, out, dout):
dx, d_gamma, d_beta = layer_norm_grad(
x, dout[0], out[2], out[1], gamma)
return dx, d_gamma, d_beta
return bprop
@bprop_getters.register(G.LayerNormGrad)
def get_bprop_layer_norm_grad(self):
"""Grad definition for `LayerNormGrad` operation."""
layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, dy, variance, mean, gamma, out, dout):
d_x, d_dy, d_gamma = layer_norm_grad_grad(
x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])
return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma
return bprop
@bprop_getters.register(P.L2Normalize)
def get_bprop_l2normalize(self):
"""Grad definition for `L2Normalize` operation."""
input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)
def bprop(x, out, dout):
dx = input_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)
def get_bprop_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SoftmaxCrossEntropyWithLogits` operation."""
expand = P.ExpandDims()
def bprop(logits, labels, out, dout):
grad = out[1]
grad = grad * expand(dout[0], -1)
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.NLLLoss)
def get_bprop_nll_loss(self):
"""Grad definition for `NLLLoss` operation."""
nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)
def bprop(x, target, weight, out, dout):
total_weight = out[1]
dout_x = dout[0]
dx = nll_loss_grad(x, dout_x, target, weight, total_weight)
return dx, zeros_like(target), zeros_like(weight)
return bprop
@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
def get_bprop_sparse_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SparseSoftmaxCrossEntropyWithLogits` operation."""
is_grad = self.is_grad
grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)
def bprop(logits, labels, out, dout):
grad = out[0]
if not is_grad:
# if construct use loss
grad = grad_op(logits, labels)
grad = F.depend(grad, out)
grad = grad * dout
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.ResizeBilinear)
def get_bprop_resize_bilinear(self):
"""Grad definition for `ResizeBilinear` operation."""
resize_grad = G.ResizeBilinearGrad(self.align_corners)
def bprop(x, out, dout):
dx = resize_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.OneHot)
def get_bprop_onehot(self):
"""Grad definition for `OneHot` operation."""
def bprop(indices, depth, on_value, off_value, out, dout):
return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)
return bprop
@constexpr
def _range_op(start, limit, delta, dtype):
"""helper function for Grad TopK"""
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor
@constexpr
def _get_1d_shape(in_shape):
"""helper function for Grad TopK"""
out_shape = 1
for i in in_shape:
out_shape *= i
return (out_shape,)
@bprop_getters.register(P.TopK)
def get_bprop_top_kv2(self):
"""Grad definition for `TopK` operation."""
scatter = P.ScatterNd()
expand_dims = P.ExpandDims()
shape_op = P.Shape()
reshape_op = P.Reshape()
dtype = P.DType()
def bprop(input_x, k, out, dout):
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]
# [0, outterdim, 2*outerdim, ..., (k-1)*outerdim]
indices_dtype = dtype(indices)
range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)
# expand_dims to (k, 1), then broadcast
ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))
in_shape_1d = _get_1d_shape(in_shape)
out_grad = reshape_op(
scatter(
expand_dims(ind, -1),
reshape_op(dout[0], (-1,)),
in_shape_1d),
in_shape)
return out_grad, zeros_like(k)
return bprop
@bprop_getters.register(P.SmoothL1Loss)
def get_bprop_smooth_l1_loss(self):
"""Grad definition for `SmoothL1Loss` operation."""
grad = G.SmoothL1LossGrad(self.beta)
def bprop(prediction, target, out, dout):
dx = grad(prediction, target, dout)
dy = grad(target, prediction, dout)
return dx, dy
return bprop
@bprop_getters.register(P.L2Loss)
def get_bprop_l2_loss(self):
"""Grad definition for `L2Loss` operation."""
def bprop(x, out, dout):
dx = x * dout
return (dx,)
return bprop
@bprop_getters.register(P.RNNTLoss)
def get_bprop_rnnt_loss(self):
"""Grad definition for `RNNTLoss` operation."""
def bprop(acts, labels, act_lens, label_lens, out, dout):
grad = out[1]
return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)
return bprop
@bprop_getters.register(P.PReLU)
def get_bprop_prelu(self):
"""Grad definition for `PReLU` operation."""
grad = G.PReLUGrad()
def bprop(x, w, out, dout):
dx, dw = grad(dout, x, w)
return dx, dw
return bprop
@bprop_getters.register(P.LSTM)
def get_bprop_lstm(self):
"""Grad definition for `LSTM` operation."""
lstm_grad_data = G.LSTMGradData(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad_weight = G.LSTMGradWeight(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad = G.LSTMGrad(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
def bprop(x, hx, cx, w, out, dout):
y, _, _, reserve, state = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)
dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)
return dx, dhx, dcx, dw
#
def bprop_cpu(x, hx, cx, w, out, dout):
y, hy, cy, reserve, _ = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)
return dx, dhx, dcx, dw
if context.get_context('device_target') == "CPU":
return bprop_cpu
return bprop
@bprop_getters.register(P.DynamicRNN)
def get_bprop_dynamic_rnn(self):
"""Grad definition for `DynamicRNN` operation."""
dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,
direction=self.direction,
cell_depth=self.cell_depth,
use_peephole=self.use_peephole,
keep_prob=self.keep_prob,
cell_clip=self.cell_clip,
num_proj=self.num_proj,
time_major=self.time_major,
forget_bias=self.forget_bias)
expand_dims = P.ExpandDims()
def bprop(x, w, b, seq_length, init_h, init_c, out, dout):
dy, dh, dc, _, _, _, _, _, = dout
dh = dh[-1]
dc = dc[-1]
y, h, c, i, j, f, o, tanhct = out
dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,
c, dy, dh, dc, i, j, f, o, tanhct)
dh_prev = expand_dims(dh_prev, 0)
dc_prev = expand_dims(dc_prev, 0)
return dx, dw, db, (0), dh_prev, dc_prev
return bprop
@bprop_getters.register(P.DynamicGRUV2)
def get_bprop_dynamic_gru_v2(self):
"""Grad definition for `DynamicGRUV2` operation."""
dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,
self.num_proj, self.time_major, self.gate_order,
self.reset_after)
def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):
y, out_h, update, reset, new, hidden_new = out
dy, dout_h, _, _, _, _ = dout
dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,
out_h, dy, dout_h[-1], update,
reset, new, hidden_new, None, None)
return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev
return bprop
@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)
def get_bprop_sigmoid_crossentropy_with_logits(self):
"""Grad definition for `SigmoidCrossEntropyWithLogits` operation."""
op = G.SigmoidCrossEntropyWithLogitsGrad()
def bprop(x, y, out, dout):
dx = op(x, y, dout)
return (dx, zeros_like(y))
return bprop
@bprop_getters.register(P.Pad)
def get_bprop_pad(self):
"""Grad definition for `Pad` operation."""
shape_op = P.Shape()
paddings = self.paddings
def bprop(x, out, dout):
begin = ()
for item in paddings:
begin += (item[0],)
shp = shape_op(x)
dx = P.Slice()(dout, begin, shp)
return (dx,)
return bprop
@bprop_getters.register(P.MirrorPad)
def get_bprop_mirror_pad(self):
"""Grad definition for `MirrorPad` operation."""
mirror_pad_grad = G.MirrorPadGrad(self.mode)
def bprop(x, paddings, out, dout):
dx = mirror_pad_grad(dout, paddings)
return (dx, zeros_like(paddings))
return bprop
@bprop_getters.register(P.ROIAlign)
def get_bprop_roi_align(self):
"""Grad definition for `ROIAlign` operation."""
shape_op = P.Shape()
pooled_height = self.pooled_height
pooled_width = self.pooled_width
spatial_scale = self.spatial_scale
sample_num = self.sample_num
def bprop(inputs, rois, out, dout):
inputs_shape = shape_op(inputs)
dx = G.ROIAlignGrad(inputs_shape,
pooled_height,
pooled_width,
spatial_scale,
sample_num,
)(dout, rois)
return dx, zeros_like(rois)
return bprop
@bprop_getters.register(P.Conv2DBackpropInput)
def get_bprop_conv2d_backprop_input(self):
"""Grad definition for `Conv2DBackpropInput` operation."""
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
input_grad = P.Conv2D(
self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
def bprop(x, w, f_sizes, out, dout):
dx = input_grad(dout, w)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, dout, F.shape(w))
return dx, dw, zeros_like(f_sizes)
return bprop
@bprop_getters.register(P.BinaryCrossEntropy)
def get_bprop_binary_cross_entropy(self):
"""Grad definition for `BinaryCrossEntropy` operation."""
grad = G.BinaryCrossEntropyGrad(self.reduction)
def bprop(x, y, weight, out, dout):
dx = grad(x, y, dout, weight)
return dx, zeros_like(y), zeros_like(weight)
return bprop
@bprop_getters.register(P.BCEWithLogitsLoss)
def get_bprop_ce_with_logits_loss(self):
"""Grad definition for `BCEWithLogitsLoss` operation."""
reduction = self.reduction
mul = P.Mul()
sigmoid = P.Sigmoid()
add = P.Add()
sub = P.Sub()
size = P.Size()
neg = P.Neg()
log = P.Log()
def bprop(predict, target, weight, pos_weight, out, dout):
sigmoid_input = sigmoid(predict)
if pos_weight is not None:
t = mul(target, pos_weight)
dx = mul(sub(mul(sub(add(t, 1), target), sigmoid_input), t), dout)
grad_target = mul(sub(log(sub(1, sigmoid_input)), mul(pos_weight, log(sigmoid_input))), dout)
else:
dx = mul((sigmoid_input - target), dout)
grad_target = mul(predict, neg(dout))
if weight is not None:
dx = mul(dx, weight)
grad_target = mul(grad_target, weight)
if reduction == 'mean':
dx = dx / size(dx)
grad_target = grad_target / size(target)
return dx, grad_target, zeros_like(weight), zeros_like(pos_weight)
return bprop
@bprop_getters.register(P.KLDivLoss)
def get_bprop_kl_div_loss(self):
"""Grad definition for `KLDivLoss` operation."""
grad = G.KLDivLossGrad(self.reduction)
def bprop(x, y, out, dout):
dx, dy = grad(x, y, dout)
return dx, dy
return bprop
@bprop_getters.register(P.Dropout)
def get_bprop_dropout(self):
"""Grad definition for `Dropout` operation."""
grad = G.DropoutGrad(self.keep_prob)
def bprop(x, out, dout):
_, mask = out
dy, _ = dout
dx = grad(dy, mask)
return (dx,)
return bprop
@bprop_getters.register(P.Dropout2D)
def get_bprop_dropout2d(self):
"""Grad definition for `Dropout2D` operation."""
dtype = P.DType()
cast = P.Cast()
mul = P.Mul()
keep_prob = self.keep_prob
def bprop(x, out, dout):
_, mask = dout
y = cast(mask, mstype.float32)
if keep_prob != 0:
y = y * (1 / keep_prob)
y = mul(x, y)
y = cast(y, dtype(x))
return (y,)
return bprop
@bprop_getters.register(P.Dropout3D)
def get_bprop_dropout3d(self):
"""Grad definition for `Dropout3D` operation."""
dtype = P.DType()
cast = P.Cast()
mul = P.Mul()
keep_prob = self.keep_prob
def bprop(x, out, dout):
_, mask = dout
y = cast(mask, mstype.float32)
if keep_prob != 0:
y = y * (1 / keep_prob)
y = mul(x, y)
y = cast(y, dtype(x))
return (y,)
return bprop
@bprop_getters.register(P.CTCLoss)
def get_bprop_ctc_loss(self):
"""Grad definition for `CTCLoss` operation"""
expand = P.ExpandDims()
def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):
grad_loss = out[1]
grad = grad_loss * expand(dout[0], -1)
return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)
return bprop
@bprop_getters.register(P.BasicLSTMCell)
def get_bprop_basic_lstm_cell(self):
"""Grad definition for `BasicLSTMCell` operation."""
basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(
forget_bias=self.forget_bias,
activation=self.activation
)
basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()
basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)
def bprop(x, h, c, w, b, out, dout):
_, _, it, jt, ft, ot, tanhct = out
dct, dht, _, _, _, _, _ = dout
dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)
dxt, dht = basic_lstm_cell_input_grad(dgate, w)
dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)
return dxt, dht, dct_1, dw, db
return bprop
@bprop_getters.register(P.LRN)
def get_bprop_lrn(self):
"""Grad definition for `LRN` operation."""
grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)
def bprop(x, out, dout):
dx = grad(dout, x, out)
return (dx,)
return bprop
|
the-stack_0_14064 | import re
import copy
from epjson_handler import EPJSON
from expand_objects import ExpandObjects, ExpandThermostat, ExpandZone, ExpandSystem, ExpandPlantLoop, \
ExpandPlantEquipment
from custom_exceptions import InvalidTemplateException, InvalidEpJSONException, PyExpandObjectsYamlStructureException
class HVACTemplate(EPJSON):
"""
Handle HVACTemplate conversion process and connect created objects together.
Attributes:
templates: HVACTemplate objects from epJSON file
base_objects: Non-HVACTemplate objects from epJSON file
templates_zones: HVACTemplate:Zone: objects
templates_systems: HVACTemplate:System: objects
templates_plant_equipment: HVACTemplate:Plant equipment objects
templates_plant_loops: HVACTemplate:Plant: loop objects
expanded_*: List of class objects for each template type
epjson: epJSON used to store connection objects
"""
def __init__(
self,
no_schema=False,
logger_level='WARNING',
logger_name='console_only_logger',
reset_stream=True):
"""
:param no_schema: Boolean flag for skipping schema validation
"""
super().__init__(no_schema=no_schema, logger_level=logger_level, logger_name=logger_name,
reset_stream=reset_stream)
self.logger_level = logger_level
self.logger_name = logger_name
self.templates = {}
self.base_objects = {}
self.templates_systems = {}
self.templates_zones = {}
self.templates_plant_equipment = {}
self.templates_plant_loops = {}
self.templates_thermostats = {}
self.expanded_thermostats = {}
self.expanded_zones = {}
self.expanded_systems = {}
self.expanded_plant_loops = {}
self.expanded_plant_equipment = {}
self.epjson = {}
return
def _hvac_template_preprocess(self, epjson):
"""
Organize epJSON and assign template objects to specific class attributes
:param epjson: Input epJSON object
:return: organized epJSON template objects into templates, and templates_* as class attributes
"""
self.logger.info('##### HVACTemplate #####')
for object_type, object_structure in epjson.items():
if re.match('^HVACTemplate:*', object_type):
if re.match('^HVACTemplate:Thermostat$', object_type):
self.merge_epjson(
super_dictionary=self.templates_thermostats,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Zone:('
'ConstantVolume|BaseboardHeat|FanCoil|IdealLoadsAirSystem|PTAC|PTHP|WaterToAirHeatPump|'
'VRF|Unitary|VAV|VAV:FanPowered|VAV:HeatAndCool|DualDuct)$',
object_type):
zone_default_map = {
'HVACTemplate:Zone:BaseboardHeat': {
'baseboard_heating_type': 'HotWater',
'outdoor_air_method': 'Flow/Person'
},
'HVACTemplate:Zone:ConstantVolume': {
'outdoor_air_method': 'Flow/Person',
'zone_cooling_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 12.8,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:FanCoil': {
'cooling_coil_type': 'ChilledWater',
'heating_coil_type': 'HotWater',
'outdoor_air_method': 'Flow/Person',
'supply_fan_delta_pressure': 75,
'cooling_coil_design_setpoint': 14.0,
'heating_coil_design_setpoint': 50.0,
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:PTAC': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'SingleSpeedDX',
'supply_fan_total_efficiency': 0.7,
'cooling_coil_gross_rated_cooling_cop': 3.0,
'heating_coil_type': 'Electric',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.0,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:PTHP': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'SingleSpeedDX',
'cooling_coil_gross_rated_cop': 3.0,
'supply_fan_total_efficiency': 0.7,
'heat_pump_heating_coil_type': 'SingleSpeedDXHeatPump',
'heat_pump_heating_coil_gross_rated_cop': 2.75,
'heat_pump_heating_minimum_outdoor_dry_bulb_temperature': -8.0,
'heat_pump_defrost_maximum_outdoor_dry_bulb_temperature': 5.0,
'heat_pump_defrost_strategy': 'ReverseCycle',
'heat_pump_defrost_control': 'Timed',
'supplemental_heating_coil_type': 'Electric',
'supplemental_heating_coil_maximum_outdoor_dry_bulb_temperature': 21.0,
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:Unitary': {
'outdoor_air_method': 'Flow/Person',
'zone_cooling_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 12.8,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:VRF': {
'outdoor_air_method': 'Flow/Person',
'supply_air_fan_placement': 'BlowThrough',
'cooling_coil_type': 'VariableRefrigerantFlowDX',
'supply_fan_total_efficiency': 0.7,
'heating_coil_type': 'VariableRefrigerantFlowDX',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:WaterToAirHeatPump': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'Coil:Cooling:WaterToAirHeatPump:EquationFit',
'cooling_coil_gross_rated_cop': 3.5,
# todo_eo: The template and ZoneHVAC:WaterToAirHeatPump defaults are mismatched for this
# field. This is not default efficiency for Fan:OnOff
'supply_fan_total_efficiency': 0.7,
'heat_pump_heating_coil_type': 'Coil:Heating:WaterToAirHeatPump:EquationFit',
'heat_pump_heating_coil_gross_rated_cop': 4.2,
'maximum_cycling_rate': 2.5,
'supplemental_heating_coil_type': 'Electric',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0,
'heat_pump_coil_water_flow_mode': 'Cycling'
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = zone_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
# set a mapping of zone template type to look up parent system
zone_template_map = {
('HVACTemplate:Zone:ConstantVolume', ):
(
'template_constant_volume_system_name',
['HVACTemplate:System:ConstantVolume', ]),
('HVACTemplate:Zone:BaseboardHeat', 'HVACTemplate:Zone:FanCoil', 'HVACTemplate:Zone:PTAC',
'HVACTemplate:Zone:PTHP', 'HVACTemplate:Zone:WaterToAirHeatPump', 'HVACTemplate:Zone:VRF', ):
(
'dedicated_outdoor_air_system_name',
['HVACTemplate:System:DedicatedOutdoorAir', ]),
('HVACTemplate:Zone:Unitary', ):
(
'template_unitary_system_name',
['HVACTemplate:System:Unitary', 'HVACTemplate:System:UnitaryHeatPump',
'HVACTemplate:System:UnitaryHeatPump:AirToAir', 'HVACTemplate:System:UnitarySystem']),
('HVACTemplate:Zone:VAV', 'HVACTemplate:Zone:VAVFanPowered'):
(
'template_vav_system_name',
['HVACTemplate:System:VAV', 'HVACTemplate:System:PackagedVAV']),
('HVACTemplate:Zone:DualDuct', ):
(
'template_dual_duct_system_name',
['HVACTemplate:System:DualDuct', ]),
('HVACTemplate:Zone:vrf', ):
(
'template_vrf_system_name',
['HVACTemplate:System:VRF', ])}
# Check the referenced system against the epjson and issue a warning if it isn't found
system_check_list = [v for k, v in zone_template_map.items() if object_type in k]
if system_check_list:
system_check_list = system_check_list[0]
for object_name, object_fields in object_structure.items():
system_name = object_fields.get(system_check_list[0])
if not system_name and system_check_list[0] == 'dedicated_outdoor_air_system_name':
continue
else:
template_system_name = None
for system_type in system_check_list[1]:
system_group = epjson.get(system_type)
if system_group:
template_system_name = True if system_name in system_group else False
if template_system_name:
break
if not template_system_name:
raise InvalidTemplateException(
'Error: In {} ({}) Could not find air handler name referenced ({})'
.format(object_type, object_name, system_name))
# check fields
for object_name, object_fields in object_structure.items():
# check for required info
if not object_fields.get('template_thermostat_name', None):
self.logger.info(
'In {} ({}) template thermostat name not provided'
.format(object_type, object_name))
# check baseboard settings
if object_fields.get('baseboard_heating_type', None) == 'HotWater' and (
not epjson.get('HVACTemplate:Plant:HotWaterLoop') or not
epjson.get('HVACTemplate:Plant:Boiler')):
self.logger.warning(
'Warning: Both a HVACTemplate:Plant:HotWaterLoop and a HVACTemplate:Plant:Boiler are '
'needed when using hot water baseboards. Template name: {}'.format(object_name))
# fan coil capacity control with doas
if object_type == 'HVACTemplate:Zone:FanCoil':
if object_fields.get('capacity_control_method') == 'ConstantFanVariableFlow' and \
object_fields.get('dedicated_outdoor_air_system_name', '') != '':
self.logger.warning(
'Warning: In {} ({})'
' the Capacity Control Method is {}'
' and the zone is served by a dedicated outdoor air system.'
.format(object_type, object_name, object_fields.get('capacity_control_method')))
# IdealLoads input check
if object_type == 'HVACTemplate:Zone:IdealLoadsAirSystem':
heating_limit = object_fields.get('heating_limit')
maximum_heating_air_flow_rate = object_fields.get('maximum_heating_air_flow_rate', '')
maximum_sensible_heating_capacity = \
object_fields.get('maximum_sensible_heating_capacity', '')
cooling_limit = object_fields.get('cooling_limit')
maximum_cooling_air_flow_rate = object_fields.get('maximum_cooling_air_flow_rate', '')
maximum_total_cooling_capacity = \
object_fields.get('maximum_total_cooling_capacity', '')
if heating_limit == 'LimitFlowRate' and maximum_heating_air_flow_rate == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Heating Air Flow Rate field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('heating_limit')))
elif heating_limit == 'LimitCapacity' and maximum_sensible_heating_capacity == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Sensible Heating Capacity field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('heating_limit')))
elif heating_limit == 'LimitFlowRateAndCapacity' and (
maximum_heating_air_flow_rate == '' or maximum_sensible_heating_capacity == ''):
msg = []
if maximum_heating_air_flow_rate == '':
msg.append('the Maximum Heating Air Flow Rate field is blank')
if maximum_sensible_heating_capacity == '':
msg.append('the Maximum Sensible Heating Capacity field is blank')
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but {}. Enter a value or autosize in this field.'
.format(
object_type,
object_name,
object_fields.get('heating_limit'),
' and '.join(msg)))
if cooling_limit == 'LimitFlowRate' and maximum_cooling_air_flow_rate == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Cooling Air Flow Rate field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('cooling_limit')))
elif cooling_limit == 'LimitCapacity' and maximum_total_cooling_capacity == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Cooling Limit field is {} but the Maximum Total Cooling Capacity field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('cooling_limit')))
elif cooling_limit == 'LimitFlowRateAndCapacity' and (
maximum_cooling_air_flow_rate == '' or maximum_total_cooling_capacity == ''):
msg = []
if maximum_cooling_air_flow_rate == '':
msg.append('the Maximum Cooling Air Flow Rate field is blank')
if maximum_total_cooling_capacity == '':
msg.append('the Maximum Total Cooling Capacity field is blank')
raise InvalidTemplateException(
'Error: In {} ({})'
' the Cooling Limit field is {} but {}. Enter a value or autosize in this field.'
.format(
object_type,
object_name,
object_fields.get('cooling_limit'),
' and '.join(msg)))
self.merge_epjson(
super_dictionary=self.templates_zones,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:System:('
'VRF|Unitary|UnitaryHeatPump:AirToAir|UnitarySystem|VAV|PackagedVAV|'
'ConstantVolume|DualDuct|DedicatedOutdoorAir'
')$', object_type):
# check for individual template issues
system_default_map = {
'HVACTemplate:System:ConstantVolume': {
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'economizer_type': 'NoEconomizer',
'heating_coil_type': 'HotWater',
'heating_coil_design_setpoint': 10,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 15.0,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 12.2,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2
},
'HVACTemplate:System:DedicatedOutdoorAir': {
'air_outlet_type': 'DirectIntoZone',
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'dx_cooling_coil_gross_rated_cop': 3.0,
'heating_coil_type': 'HotWater',
'heating_coil_design_setpoint': 12.2,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 15.0,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 12.2,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2,
'humidifier_rated_capacity': 1e-06,
'humidifier_constant_setpoint': 0.003
},
'HVACTemplate:System:DualDuct': {
'system_configuration_type': 'SingleFanConstantVolume',
'main_supply_fan_minimum_flow_fraction': 0.2,
'cold_duct_supply_fan_minimum_flow_fraction': 0.2,
'cold_duct_supply_fan_placement': 'DrawThrough',
'hot_duct_supply_fan_minimum_flow_fraction': 0.2,
'hot_duct_supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'ChilledWater',
'cooling_coil_setpoint_control_type': 'FixedSetpoint',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'heating_coil_type': 'HotWater',
'heating_coil_setpoint_control_type': 'FixedSetpoint',
'heating_coil_design_setpoint': 50,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 50,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 26,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2,
'preheat_coil_design_setpoint': 7.2
},
'HVACTemplate:System:PackagedVAV': {
'cooling_coil_type': 'TwoSpeedDX',
'cooling_coil_design_setpoint': 12.8,
'cooling_coil_gross_rated_cop': 3.0,
'heating_coil_design_setpoint': 10
},
'HVACTemplate:System:Unitary': {
'cooling_coil_type': 'SingleSpeedDX',
'cooling_design_supply_air_temperature': 12.8,
'cooling_coil_gross_rated_cop': 3.0,
'heating_design_supply_air_temperature': 50.0,
'economizer_type': 'NoEconomizer',
'economizer_lockout': 'NoLockout',
'supply_fan_placement': 'BlowThrough',
'dehumidification_setpoint': 60.0,
'humidifier_rated_capacity': 1e-06,
'humidifier_setpoint': 30.0
},
'HVACTemplate:System:UnitarySystem': {
'control_type': 'Load',
'supply_fan_placement': 'BlowThrough',
'cooling_coil_type': 'SingleSpeedDX',
'number_of_speeds_for_cooling': 1,
'dx_cooling_coil_gross_rated_cop': 3.0,
'heating_coil_type': 'Gas',
'number_of_speeds_or_stages_for_heating': 1,
'heat_pump_heating_coil_gross_rated_cop': 2.75,
'heat_pump_heating_minimum_outdoor_dry_bulb_temperature': -8.0,
'heat_pump_defrost_maximum_outdoor_dry_bulb_temperature': 5.0,
'heat_pump_defrost_strategy': 'ReverseCycle',
'heat_pump_defrost_control': 'Timed',
'supplemental_heating_or_reheat_coil_type': 'None',
'supplemental_heating_or_reheat_coil_maximum_outdoor_dry_bulb_temperature': 21.0,
'economizer_type': 'NoEconomizer',
'economizer_lockout': 'NoLockout',
'heat_recovery_frost_control_type': 'None',
'dehumidification_control_type': 'None',
'dehumidification_relative_humidity_setpoint': 60.0,
'humidifier_type': 'None',
'humidifier_rated_capacity': 1e-06,
'humidifier_relative_humidity_setpoint': 30.0,
'sizing_option': 'NonCoincident',
'return_fan': 'No'
},
'HVACTemplate:System:VAV': {
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint': 12.8,
'heating_coil_type': 'None',
'heating_coil_design_setpoint': 10,
'preheat_coil_design_setpoint': 7.2,
'humidifier_rated_capacity': 1e-06
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = system_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
system_names = [
zone_fields.get(zone_system_field) for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()]
if object_name not in system_names:
raise InvalidTemplateException(
'Error: In {} ({}) Did not find any HVACTemplate:Zone objects connected to system.'
'There must be at least one zone object which specifies '
'this system as the Template Unitary System Name.'
.format(object_type, object_name))
if object_fields.get('night_cycle_control', 'None') == 'CycleOnControlZone' and \
object_fields.get('night_cycle_control_zone_name', 'None') == 'None':
self.logger.warning('Warning: A zone name must be specified when Night Cycle Control is '
'set to Cycle on Control Zone for {} with unique name {}'
.format(object_type, object_name))
# check for control zones
if object_type in ['HVACTemplate:System:Unitary',
'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:UnitarySystem']:
for object_name, object_fields in object_structure.items():
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
try:
zones_served = [
zone_fields.get('zone_name') for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()
if zone_fields.get(zone_system_field) == object_name]
except AttributeError:
raise InvalidTemplateException(
'Error: In {} ({}) No HVACTemplate:Zone template objects reference'
' the system object'
.format(object_type, object_name))
if object_type in ['HVACTemplate:System:Unitary', 'HVACTemplate:System:UnitarySystem'] and \
object_fields.get('control_zone_or_thermostat_location_name') and \
object_fields.get('control_zone_or_thermostat_location_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} ({}) for the field control_zone_or_thermostat_location_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('control_zone_or_thermostat_location_name')))
elif object_type in ['HVACTemplate:System:Unitary',
'HVACTemplate:System:UnitarySystem'] and \
not object_fields.get('control_zone_or_thermostat_location_name'):
raise InvalidTemplateException(
'Error: control_zone_or_thermostat_location_name must '
'be specified for {} which is a {}'.format(object_name, object_type))
elif object_type == 'HVACTemplate:System:ConstantVolume' and \
object_fields.get('cooling_coil_control_zone_name') and \
object_fields.get('cooling_coil_control_zone_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} named {} for the field cooling_coil_control_zone_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('cooling_coil_control_zone_name')))
elif object_type == 'HVACTemplate:System:ConstantVolume' and \
object_fields.get('heating_coil_control_zone_name') and \
object_fields.get('heating_coil_control_zone_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} named {} for the field heating_coil_control_zone_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('heating_coil_control_zone_name')))
# check vrf master thermostat referenced zone
if object_type in ['HVACTemplate:System:VRF', ]:
for object_name, object_fields in object_structure.items():
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
try:
zones_served = [
zone_fields.get('zone_name') for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()
if zone_fields.get(zone_system_field) == object_name]
except AttributeError:
raise InvalidTemplateException('Error: No HVACTemplate:Zone:Unitary template objects reference'
' the {} object'.format(object_type))
if object_fields.get('master_thermostat_priority_control_type') == \
'MasterThermostatPriority' and \
object_fields.get('zone_name_for_master_thermostat_location') not in zones_served:
raise InvalidTemplateException(
'Error: In {} ({}) for the field Zone Name for '
'Master Thermostat Location could not find a matching '
'HVACTemplate:Zone:VRF named: {}'
.format(
object_type,
object_name,
object_fields.get('zone_name_for_master_thermostat_location')))
if object_fields.get('master_thermostat_priority_control_type') == 'Scheduled' and \
not object_fields.get('thermostat_priority_schedule_name'):
raise InvalidTemplateException(
'Error: In {} ({}) the Master Thermostat '
'Priority Control Type = Scheduled, but the Thermostat Priority Schedule Name '
'is blank.'.format(object_type, object_name))
self.merge_epjson(
super_dictionary=self.templates_systems,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Plant:(ChilledWater|HotWater|MixedWater)Loop$', object_type):
if len(object_structure.keys()) > 1:
self.logger.warning('Warning: Only one {} allowed per file.'.format(object_type))
plant_loop_default_map = {
'HVACTemplate:Plant:ChilledWaterLoop': {
'chilled_water_design_setpoint': 7.22,
'condenser_water_design_setpoint': 29.4,
'chilled_water_pump_configuration': 'ConstantPrimaryNoSecondary',
'chilled_water_setpoint_at_outdoor_dry_bulb_low': 12.2,
'chilled_water_reset_outdoor_dry_bulb_low': 15.6,
'chilled_water_setpoint_at_outdoor_dry_bulb_high': 6.7,
'chilled_water_reset_outdoor_dry_bulb_high': 26.7
},
'HVACTemplate:Plant:HotWaterLoop': {
'hot_water_design_setpoint': 82,
'hot_water_pump_configuration': 'ConstantFlow',
'hot_water_setpoint_at_outdoor_dry_bulb_low': 82.2,
'hot_water_reset_outdoor_dry_bulb_low': -6.7,
'hot_water_setpoint_at_outdoor_dry_bulb_high': 65.6,
'hot_water_reset_outdoor_dry_bulb_high': 10
},
'HVACTemplate:Plant:MixedWaterLoop': {
'high_temperature_design_setpoint': 33,
'low_temperature_design_setpoint': 20,
'water_pump_configuration': 'ConstantFlow'
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = plant_loop_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
if object_type == 'HVACTemplate:Plant:HotWaterLoop':
loop_system_list = [
'HVACTemplate:System:VAV', 'HVACTemplate:Zone:FanCoil', 'HVACTemplate:Zone:Unitary',
'HVACTemplate:Zone:PTAC', 'HVACTemplate:Zone:PTHP', 'HVACTemplate:Zone:WaterToAirHeatPump',
'HVACTemplate:System:UnitaryHeatPump:AirToAir', 'HVACTemplate:System:PackagedVAV',
'HVACTemplate:System:DedicatedOutdoorAir', 'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:DualDuct', 'HVACTemplate:Zone:BaseboardHeat',
'HVACTemplate:System:UnitarySystem', 'HVACTemplate:System:VRF']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:HotWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if object_type == 'HVACTemplate:Plant:ChilledWaterLoop':
loop_system_list = [
'HVACTemplate:System:VAV', 'HVACTemplate:Zone:FanCoil',
'HVACTemplate:System:DedicatedOutdoorAir', 'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:DualDuct', 'HVACTemplate:System:UnitarySystem']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:ChilledWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if object_type == 'HVACTemplate:Plant:MixedWaterLoop':
loop_system_list = [
'HVACTemplate:Zone:WaterToAirHeatPump', 'HVACTemplate:System:VRF',
'HVACTemplate:System:UnitarySystem']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:MixedWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if 'HVACTemplate:Plant:HotWaterLoop' in epjson.keys():
self.logger.warning(
'Warning: In {}'
' an HVACTemplate:Plant:HotWaterLoop is also present. All boilers with blank Template '
'Loop Type field will be connected to the Hot Water Loop.'
.format(object_type))
self.merge_epjson(
super_dictionary=self.templates_plant_loops,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Plant:(Chiller|Tower|Boiler)(:ObjectReference)*$', object_type):
boiler_default_map = {
'HVACTemplate:Plant:Boiler': {
'fuel_type': 'NaturalGas',
'priority': '1',
'efficiency': 0.8,
'water_outlet_upper_temperature_limit': 100.0
},
'HVACTemplate:Plant:Boiler:ObjectReference': {
'boiler_object_type': 'Boiler:HotWater',
'priority': '1'
},
'HVACTemplate:Plant:Chiller': {
'condenser_type': 'WaterCooled'
},
'HVACTemplate:Plant:Chiller:ObjectReference': {
'chiller_object_type': 'Chiller:Electric:EIR',
'priority': '1'
},
'HVACTemplate:Plant:Tower:ObjectReference': {
'cooling_tower_object_type': 'CoolingTower:SingleSpeed'
},
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = boiler_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
# Check boiler inputs
if object_type == 'HVACTemplate:Plant:Boiler':
for object_name, object_fields in object_structure.items():
if not object_fields.get('fuel_type') and \
object_fields.get('boiler_type') != 'DistrictHotWater':
raise InvalidTemplateException(
'Error: In {} ({}) fuel_type must be specified when boiler_type is not '
'DistrictHotWater'.format(object_type, object_name))
# Check tower inputs
if object_type == 'HVACTemplate:Plant:Tower':
for object_name, object_fields in object_structure.items():
high_speed_nominal_capacity = object_fields.get('high_speed_nominal_capacity', 'Autosize')
free_convection_capacity = object_fields.get('free_convection_capacity', 'Autosize')
if (str(high_speed_nominal_capacity).lower() == 'autosize' and str(
free_convection_capacity).lower() != 'autosize') or \
(str(high_speed_nominal_capacity).lower() != 'autosize' and str(
free_convection_capacity).lower() == 'autosize'):
raise InvalidTemplateException(
'Error: In {} ({}) For a {} tower the high speed capacity and free '
'convection capacity both need to be specified or set to autosize.'
.format(object_type, object_name, object_fields.get('tower_type')))
# for plant equipment object references, add the referenced object to epjson for complex input resolution
# later on. For chiller objects, also identify condenser type and make it a template attribute.
elif object_type == 'HVACTemplate:Plant:Boiler:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['boiler_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced boiler not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['boiler_name']:
if reference_object_fields.get('boiler_water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Inlet Node Name found in referenced boiler: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('boiler_water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Outlet Node Name found in referenced boiler: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('boiler_water_inlet_node_name') == \
reference_object_fields.get('boiler_water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate hot water node name found in '
'referenced boiler. All boiler inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['boiler_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced boiler not found: {}'
.format(object_type, object_name, object_fields))
elif object_type == 'HVACTemplate:Plant:Chiller:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['chiller_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced chiller not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['chiller_name']:
if reference_object_fields.get('chilled_water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank chilled water Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('chilled_water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank chilled water Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('chilled_water_inlet_node_name') == \
reference_object_fields.get('chilled_water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate chilled water node name found in '
'referenced chiller. All chiller inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
try:
object_structure[object_name]['condenser_type'] = reference_object_fields['condenser_type']
except (KeyError, AttributeError):
object_structure[object_name]['condenser_type'] = 'WaterCooled'
if object_structure[object_name]['condenser_type'] == 'WaterCooled':
if reference_object_fields.get('condenser_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank condenser water Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('condenser_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank condenser water Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('condenser_inlet_node_name') == \
reference_object_fields.get('condenser_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate condenser water node name found in '
'referenced chiller. All chiller inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['chiller_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced chiller not found: {}'
.format(object_type, object_name, object_fields))
elif object_type == 'HVACTemplate:Plant:Tower:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['cooling_tower_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced tower not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['cooling_tower_name']:
if reference_object_fields.get('water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('water_inlet_node_name') == \
reference_object_fields.get('water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate node name found in referenced tower. '
'All tower inlet and outlet node names must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['cooling_tower_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced tower not found: {}'
.format(object_type, object_name, object_fields))
self.merge_epjson(
super_dictionary=self.templates_plant_equipment,
object_dictionary={object_type: object_structure},
unique_name_override=False)
else:
raise InvalidTemplateException(
'Error: Template object type {} was not recognized'.format(object_type))
# store original templates into dictionary
self.merge_epjson(
super_dictionary=self.templates,
object_dictionary={object_type: object_structure},
unique_name_override=False)
else:
# store all non-template objects into a base epjson object.
self.merge_epjson(
super_dictionary=self.base_objects,
object_dictionary={object_type: object_structure},
unique_name_override=False)
return
def _expand_templates(self, templates, expand_class, **kwargs):
"""
Run Expand operations on multiple templates
:param templates: dictionary of HVACTemplate:.* objects
:param expand_class: ExpandObjects child class to operate on template (e.g. ExpandZone).
:return: dictionary of expanded objects with unique name as key
"""
expanded_template_dictionary = {}
templates = self.epjson_genexp(templates)
for template in templates:
(_, template_structure), = template.items()
(template_name, template_fields), = template_structure.items()
external_epjson_objects = template_fields.pop('epjson', None)
expanded_template = expand_class(
template=template,
epjson=external_epjson_objects,
logger_level=self.logger_level,
logger_name=self.logger_name,
**kwargs).run()
expanded_template_dictionary[template_name] = expanded_template
return expanded_template_dictionary
def _create_zonecontrol_thermostat(self, zone_class_object):
"""
Create ZoneControl:Thermostat objects. This operations is performed outside of ExpandObjects because it
requires cross-referencing between HVACTemplate:Zone and HVACTemplate:Thermostat objects
:param zone_class_object: ExpandZone object
:return: Updated class epJSON dictionary with ThermostatSetpoint objects added. Objects are also added
to the class self.epsjon dictionary.
"""
# Retreive the thermostat object
try:
thermostat_template_name = getattr(zone_class_object, 'template_thermostat_name')
except AttributeError:
self.logger.info(
'In {} ({}) Zone object does not reference a thermostat class object'
.format(zone_class_object.template_type, zone_class_object.unique_name))
return
except ValueError:
raise InvalidTemplateException('Error: Zone template ({}) is improperly formatted.'
.format(zone_class_object.unique_name))
try:
thermostat_object = self.expanded_thermostats[thermostat_template_name]
except (ValueError, KeyError):
raise InvalidTemplateException('Error: Thermostat object does not exist ({}) but is reference by '
'zone template {}'
.format(thermostat_template_name, zone_class_object.unique_name))
# Evaluate the thermostat type in the thermostat object and format the output object accordingly
try:
zone_name = getattr(zone_class_object, 'zone_name')
thermostat_epjson = {t_type: t_struct for t_type, t_struct
in thermostat_object.epjson.items()
if re.match(r'^ThermostatSetpoint.*', t_type)}
(thermostat_type, thermostat_structure), = thermostat_epjson.items()
(thermostat_name, _), = thermostat_structure.items()
# create control schedule based on thermostat type
if thermostat_type == "ThermostatSetpoint:SingleHeating":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[1, ])
elif thermostat_type == "ThermostatSetpoint:SingleCooling":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[2, ])
elif thermostat_type == "ThermostatSetpoint:DualSetpoint":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[4, ])
else:
raise InvalidTemplateException("Error: {} ({}) Invalid thermostat type set in ExpandThermostat"
.format(thermostat_type, thermostat_object.unique_name))
# create zonecontrol object
(_, schedule_structure), = control_schedule.items()
(schedule_name, _), = schedule_structure.items()
zonecontrol_thermostat = {
"ZoneControl:Thermostat": {
"{} Thermostat".format(zone_name): {
"control_1_name": thermostat_name,
"control_1_object_type": thermostat_type,
"control_type_schedule_name": schedule_name,
"zone_or_zonelist_name": "{}".format(zone_name)
}
}
}
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=dict(control_schedule, **zonecontrol_thermostat),
unique_name_override=True
)
return dict(control_schedule, **zonecontrol_thermostat)
except (ValueError, AttributeError, KeyError):
raise InvalidTemplateException(
"Error: HVACTemplate failed to build ZoneControl:Thermostat from zone template "
"{}".format(zone_class_object.unique_name)) # pragma: no cover - catchall
@staticmethod
def _get_zone_template_field_from_system_type(template_type):
"""
Retrieve the corresponding zone field name for a system template type
:param template_type: HVACTemplate:System object type
:return: zone field name
"""
# get the zone field_name that will identify the system template name
if re.match(r'HVACTemplate:System:ConstantVolume', template_type):
zone_system_template_field_name = 'template_constant_volume_system_name'
elif re.match(r'HVACTemplate:System:DedicatedOutdoorAir', template_type):
zone_system_template_field_name = 'dedicated_outdoor_air_system_name'
elif re.match(r'HVACTemplate:System:DualDuct', template_type):
zone_system_template_field_name = 'template_dual_duct_system_name'
elif re.match(r'HVACTemplate:System:Unitary.*', template_type):
zone_system_template_field_name = 'template_unitary_system_name'
elif re.match(r'HVACTemplate:System:.*VAV$', template_type):
zone_system_template_field_name = 'template_vav_system_name'
elif re.match(r'HVACTemplate:System:VRF', template_type):
zone_system_template_field_name = 'template_vrf_system_name'
else:
raise InvalidTemplateException(
"Error: Invalid system type passed to supply path creation function: {}".format(template_type))
return zone_system_template_field_name
def _create_system_path_connection_objects(self, system_class_object, expanded_zones):
"""
Create objects connecting system supply air to zone objects. An AirLoopHVAC:SupplyPath object is created with
either an AirLoopHVAC:SupplyPlenum or an AirLoopHVAC:ZoneSplitter object. The same is true for
AirLoopHVAC:ReturnPath and AirLoopHVAC:ReturnPlenum/AirLoopHVAC:ZoneMixer.
:param system_class_object: Expanded HVACTemplate:System:.* class object
:param expanded_zones: dictionary of ExpandZone objects
:return: system supply air connection objects. AirLoopHVAC:SupplyPath object and either
AirLoopHVAC:SupplyPlenum or AirLoopHVAC:ZoneSplitter object as well ass AirLoopHVAC:ReturnPath and either
AirLoopHVAC:ReturnPlenum or AirLoopHVAC:ZoneMixer.
"""
zone_system_template_field_name = \
self._get_zone_template_field_from_system_type(template_type=system_class_object.template_type)
# iterate over inlet node name types. For DualDuct, this is two entries (hot/cold). For all other systems,
# this is a single value
if system_class_object.template_type == 'HVACTemplate:System:DualDuct':
inlet_nodes = ['cold_air_inlet_node_name', 'hot_air_inlet_node_name']
else:
inlet_nodes = ['air_inlet_node_name', ]
# create ExpandObjects class object to use some yaml and epjson functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(system_class_object, 'template_name')
# iterate over expanded zones and if the system reference field exists, and is for the referenced system,
# append them in the splitter and mixer lists
zone_return_plenums = []
zone_induced_air_nodes = []
for node_idx, inlet_node in enumerate(inlet_nodes):
zone_splitters = []
zone_mixers = []
zone_supply_plenums = []
for _, ez in expanded_zones.items():
if getattr(ez, zone_system_template_field_name, None) == system_class_object.template_name:
if getattr(ez, 'flow_type', None) in ['SeriesFromPlenum', 'ParallelFromPlenum']:
zone_induced_air_node = ez.unique_name
else:
zone_induced_air_node = None
if getattr(ez, 'supply_plenum_name', None) or (
getattr(ez, 'cold_supply_plenum_name', None) and inlet_node == 'cold_air_inlet_node_name') or (
getattr(ez, 'hot_supply_plenum_name', None) and inlet_node == 'hot_air_inlet_node_name'):
try:
zone_supply_equipment = {'AirLoopHVAC:SupplyPlenum': ez.epjson['AirLoopHVAC:SupplyPlenum']}
except (KeyError, AttributeError):
raise InvalidTemplateException(
'Error: supply_plenum_name indicated for zone template {} but '
'AirLoopHVAC:SupplyPlenum was not created'.format(ez.unique_name))
else:
zone_supply_equipment = self.get_epjson_objects(
epjson=ez.epjson,
object_type_regexp=r'^AirTerminal:.*')
try:
(zone_supply_equipment_type, zone_supply_equipment_structure), = zone_supply_equipment.items()
(zone_supply_equipment_name, zone_supply_equipment_fields), = zone_supply_equipment_structure.items()
if zone_supply_equipment_type == 'AirLoopHVAC:SupplyPlenum':
outlet_node_name = zone_supply_equipment_fields['inlet_node_name']
zone_supply_plenums.append({
'component_name': zone_supply_equipment_name,
'component_object_type': zone_supply_equipment_type
})
elif zone_supply_equipment_type in ['AirTerminal:SingleDuct:SeriesPIU:Reheat',
'AirTerminal:SingleDuct:ParallelPIU:Reheat']:
# Raise error if inlet node name is overridden for multi-inlet node systems (DualDuct)
if len(inlet_nodes) > 1:
raise InvalidTemplateException(
'Error: Series or Parallel PIU is being referenced '
'by an invalid system {}'.format(system_class_object.template_type))
outlet_node_name = zone_supply_equipment_fields['supply_air_inlet_node_name']
else:
outlet_node_name = zone_supply_equipment_fields[inlet_node]
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for zone equipment from Supply Path creation failed for '
'outlet node. system {}, zone {}, zone equipment {}'
.format(system_class_object.template_name, ez.unique_name, zone_supply_equipment))
if getattr(ez, 'return_plenum_name', None):
try:
zone_return_equipment = {'AirLoopHVAC:ReturnPlenum': ez.epjson['AirLoopHVAC:ReturnPlenum']}
except (KeyError, AttributeError):
raise InvalidTemplateException(
'Error: return_plenum_name indicated for zone template {} but '
'AirLoopHVAC:ReturnPlenum was not created'.format(ez.unique_name))
else:
try:
zone_return_equipment = {'ZoneHVAC:EquipmentConnections': ez.epjson['ZoneHVAC:EquipmentConnections']}
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for ZoneHVAC:EquipmentConnections object from Supply '
'Path creation failed for inlet node. system {}, zone {}'
.format(system_class_object.template_name, ez.unique_name))
try:
(zone_return_equipment_type, zone_return_equipment_structure), = zone_return_equipment.items()
(zone_return_equipment_name, zone_return_equipment_fields), = zone_return_equipment_structure.items()
if zone_return_equipment_type == 'AirLoopHVAC:ReturnPlenum':
inlet_node_name = zone_return_equipment_fields['outlet_node_name']
# use node_idx to prevent multiple zone_return_plenum objects from being created in dualduct zones
if node_idx == 0:
zone_return_plenums.append({
'component_name': zone_return_equipment_name,
'component_object_type': zone_return_equipment_type
})
else:
inlet_node_name = zone_return_equipment_fields['zone_return_air_node_or_nodelist_name']
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for zone equipment from Return Path creation failed for '
'inlet node. system {}, zone {}, zone equipment {}'
.format(system_class_object.template_name, ez.unique_name, zone_return_equipment))
zone_splitters.append(
{
"outlet_node_name": outlet_node_name
}
)
zone_mixers.append(
{
"inlet_node_name": inlet_node_name
}
)
if zone_induced_air_node:
# This is for PIU objects that use SeriesFromPlenum or ParallelFromPlenum
zone_induced_air_nodes.append(
{
"node_name": '{} Return'.format(zone_induced_air_node)
}
)
# create plenums or spliters/mixers, depending on template inputs
supply_object = None
supply_plenum_name = getattr(system_class_object, 'supply_plenum_name', None)
cold_supply_plenum_name = getattr(system_class_object, 'cold_supply_plenum_name', None)
hot_supply_plenum_name = getattr(system_class_object, 'hot_supply_plenum_name', None)
if system_class_object.template_type == 'HVACTemplate:System:DualDuct' and \
cold_supply_plenum_name and inlet_node.startswith('cold_air'):
eo.cold_supply_plenum_name = cold_supply_plenum_name
cold_supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'DualDuct', 'Cold'])
cold_supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': cold_supply_object}
elif system_class_object.template_type == 'HVACTemplate:System:DualDuct' and \
hot_supply_plenum_name and inlet_node.startswith('hot_air'):
eo.hot_supply_plenum_name = hot_supply_plenum_name
hot_supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'DualDuct', 'Hot'])
hot_supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': hot_supply_object}
elif supply_plenum_name:
# set return plenum name attribute for transition and mapping processing
eo.supply_plenum_name = supply_plenum_name
supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'Base'])
supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': supply_object}
else:
supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ZoneSplitter', 'Base'])
supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:ZoneSplitter': supply_object}
# Add Path objects
supply_path_object = {'AirLoopHVAC:SupplyPath':
eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPath', 'Base'])}
# add zone supply plenums if they were created
if zone_supply_plenums:
(_, supply_path_object_fields), = supply_path_object.items()
supply_path_object_fields['components'].extend(zone_supply_plenums)
# Rename objects if multi-inlet node system is used
if system_class_object.template_type == 'HVACTemplate:System:DualDuct':
(_, supply_object_fields), = supply_object.items()
(_, supply_path_object_fields), = supply_path_object.items()
if inlet_node.startswith('cold_air'):
supply_object_fields['name'] = supply_object_fields['name'].replace('{}', '{} Cold')
supply_object_fields['inlet_node_name'] = supply_object_fields['inlet_node_name'].replace('{}', '{} Cold')
supply_path_object_fields['name'] = supply_path_object_fields['name'].replace('{}', '{} Cold')
if inlet_node.startswith('hot_air'):
supply_object_fields['name'] = supply_object_fields['name'].replace('{}', '{} Hot')
supply_object_fields['inlet_node_name'] = supply_object_fields['inlet_node_name'].replace('{}', '{} Hot')
supply_path_object_fields['name'] = supply_path_object_fields['name'].replace('{}', '{} Hot')
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[supply_object, supply_path_object])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
# Create return objects
return_plenum_name = getattr(system_class_object, 'return_plenum_name', None)
return_nodelist = {}
if return_plenum_name:
# set return plenum name attribute for transition and mapping processing
eo.return_plenum_name = return_plenum_name
return_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ReturnPlenum', 'Base'])
return_object['nodes'] = zone_mixers
return_object = {'AirLoopHVAC:ReturnPlenum': return_object}
if zone_induced_air_nodes:
return_object['AirLoopHVAC:ReturnPlenum']['induced_air_outlet_node_or_nodelist_name'] = \
'{} Induced Air Nodes'.format(system_class_object.template_name)
return_nodelist = {
'NodeList': {
'name': '{} Induced Air Nodes'.format(system_class_object.template_name),
"nodes": zone_induced_air_nodes
}
}
else:
return_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ZoneMixer', 'Base'])
return_object['nodes'] = zone_mixers
return_object = {'AirLoopHVAC:ZoneMixer': return_object}
# Add Path objects
return_path_object = {
'AirLoopHVAC:ReturnPath':
eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ReturnPath', 'Base'])}
# add zone return plenums if they were created
if zone_return_plenums:
(_, return_path_object_fields), = return_path_object.items()
# only take the first item, subsequent items are only duplicates from dualduct zone templates
return_path_object_fields['components'] = zone_return_plenums + return_path_object_fields['components']
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[return_object, return_path_object, return_nodelist])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
return resolved_path_dictionary
def _create_system_vrf_path_connection_objects(self, system_class_object, expanded_zones):
"""
Create objects connecting VRF system to zone objects.
:param system_class_object: Expanded HVACTemplate:System:.* class object
:param expanded_zones: dictionary of ExpandZone objects
:return: system supply air connection objects. AirLoopHVAC:SupplyPath object and either
AirLoopHVAC:SupplyPlenum or AirLoopHVAC:ZoneSplitter object as well ass AirLoopHVAC:ReturnPath and either
AirLoopHVAC:ReturnPlenum or AirLoopHVAC:ZoneMixer.
"""
# create ExpandObjects class object to use some yaml and epjson functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(system_class_object, 'template_name')
vrf_object_name_list = []
zone_system_template_field_name = \
self._get_zone_template_field_from_system_type(template_type=system_class_object.template_type)
for _, ez in expanded_zones.items():
if getattr(ez, zone_system_template_field_name, None) == system_class_object.template_name:
try:
vrf_object = ez.epjson['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']
(vrf_object_name, _), = vrf_object.items()
except (KeyError, AttributeError):
raise InvalidTemplateException(
"Error: VRF zone template {} expanded with no "
"ZoneHVAC:TerminalUnit:VariableRefrigerantFlow object".format(ez.unique_name))
except ValueError:
raise InvalidTemplateException(
'ZoneHVAC:TerminalUnit:VariableRefrigerantFlow object incorrectly formatted: {}'
.format(ez.epjson.get('ZoneHVAC:TerminalUnit:VariableRefrigerantFlow', 'None')))
vrf_object_name_list.append({'zone_terminal_unit_name': vrf_object_name})
if vrf_object_name_list:
vrf_terminal_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'ZoneTerminalUnitList', 'Base'])
vrf_terminal_object['terminal_units'] = vrf_object_name_list
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[{'ZoneTerminalUnitList': vrf_terminal_object}, ])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
else:
raise InvalidTemplateException(
'Error: Failed to create VRF terminal unit list for {}'.format(system_class_object.template_name))
return
def _create_templates_from_plant_equipment(self, plant_equipment_class_object, expanded_plant_loops):
"""
Create plant and platn equipment loop templates from ExpandPlantEquipment object attributes.
These outputs will be used as inputs to the initialize new ExpandPlantLoop and ExpandPlantLoopEquipment classes.
This process must be performed because ExpandPlantLoop must be
run before ExpandPlantEquipment. However, certain equipment inputs can cause for new loops to be created.
:param plant_equipment_class_object: ExpandPlantEquipment class object
:param expanded_plant_loops: ExpandPlantLoop objects
:return: Array of Dictionary of HVAC:Template:Plant template objects to create an ExpandPlantLoop object
"""
# create dictionary to store plant loops
plant_loop_dictionary = {}
plant_equipment_dictionary = {}
# get each loop type specified in the existing plant loop class objects
plant_loops = [getattr(pl, 'template_type').lower() for pl in expanded_plant_loops.values()]
# create condenser water loop for water cooled condensers
if getattr(plant_equipment_class_object, 'template_type', None).lower() in \
['hvactemplate:plant:chiller', 'hvactemplate:plant:chiller:objectreference'] \
and getattr(plant_equipment_class_object, 'condenser_type', 'WaterCooled').lower() == 'watercooled' \
and 'hvactemplate:plant:condenserwaterloop' not in plant_loops \
and getattr(plant_equipment_class_object, 'chiller_type', None) != 'DistrictChilledWater':
# try to get the chilled water loop attributes to transition to condenser water
chw_loop = [
pl for pl
in expanded_plant_loops.values()
if getattr(pl, 'template_type').lower() == 'hvactemplate:plant:chilledwaterloop']
cndw_attributes = {}
# transfer ChilledWaterLoop attributes to CondenserWaterLoop
if chw_loop:
for cndw_attribute, chw_attribute in zip(
['condenser_water_pump_rated_head', 'condenser_water_design_setpoint',
'condenser_plant_operation_scheme_type', 'condenser_equipment_operation_schemes_name',
'condenser_water_temperature_control_type', 'condenser_water_setpoint_schedule_name',
'pump_schedule_name', 'pump_control_type', 'condenser_water_pump_type',
'condenser_water_supply_side_bypass_pipe', 'condenser_water_demand_side_bypass_pipe',
'condenser_water_load_distribution_scheme'],
['condenser_water_pump_rated_head', 'condenser_water_design_setpoint',
'condenser_plant_operation_scheme_type', 'condenser_equipment_operation_schemes_name',
'condenser_water_temperature_control_type', 'condenser_water_setpoint_schedule_name',
'pump_schedule_name', 'pump_control_type', 'condenser_water_pump_type',
'condenser_water_supply_side_bypass_pipe', 'condenser_water_demand_side_bypass_pipe',
'condenser_water_load_distribution_scheme']):
try:
cndw_attributes[cndw_attribute] = getattr(chw_loop[0], chw_attribute)
except AttributeError:
self.logger.debug('Chilled water attribute {} not set by user, using default for '
'condenser water'.format(chw_attribute))
cndw_attributes['template_plant_loop_type'] = 'CondenserWaterLoop'
self.merge_epjson(
super_dictionary=plant_loop_dictionary,
object_dictionary={
'HVACTemplate:Plant:CondenserWaterLoop': {
'Condenser Water Loop': cndw_attributes
}
})
# append plant loop to list to prevent another one being added.
plant_loops.append('hvactemplate:plant:condenserwaterloop')
return plant_loop_dictionary, plant_equipment_dictionary
def _create_additional_plant_loops_and_equipment_from_equipment(
self,
expanded_plant_equipment,
expanded_plant_loops):
"""
Create additional HVACTemplate:Plant:.*Loops based on HVACTemplate:Plant:(Chiller|Tower|Boiler) inputs
:param expanded_plant_equipment: ExpandPlantEquipment objects
:param expanded_plant_loops: ExpandPlantLoop objects
:return: Additional plant loop and equipment templates and objects added to expanded classes attributes
"""
# create deepcopy to iterate over because the expanded_plant_equipment object may change size during iteration
epe = copy.deepcopy(expanded_plant_equipment)
for epl_name, epl in epe.items():
plant_loop_template, plant_equipment_template = self._create_templates_from_plant_equipment(
plant_equipment_class_object=epl,
expanded_plant_loops=expanded_plant_loops)
# If a plant loop was created, reprocess it here.
if plant_loop_template:
# add new plant loop to the templates
for tmpl in [self.templates, self.templates_plant_loops]:
self.merge_epjson(
super_dictionary=tmpl,
object_dictionary=plant_loop_template
)
# Expand new plant loop and add to the class objects
additional_plant_loops = self._expand_templates(
templates=plant_loop_template,
expand_class=ExpandPlantLoop
)
try:
for expanded_name, expanded_object in additional_plant_loops.items():
if expanded_name not in expanded_plant_loops.keys():
expanded_plant_loops[expanded_name] = expanded_object
except (AttributeError, ValueError):
InvalidTemplateException(
'Error: A Plant loop was specified to be created from a plant equipment object '
'{}, but the process failed to attach the created objects'.format(epl_name))
# if a plant equipment template was created, process it here
if plant_equipment_template:
# add new plant equipment to the templates
for tmpl in [self.templates, self.templates_plant_equipment]:
self.merge_epjson(
super_dictionary=tmpl,
object_dictionary=plant_equipment_template
)
# Expand new plant equipment and add to the class objects
# pass updated expanded_plant_loops to the class initialization as well.
additional_plant_equipment = self._expand_templates(
templates=plant_equipment_template,
expand_class=ExpandPlantEquipment,
plant_loop_class_objects=expanded_plant_loops
)
try:
for expanded_name, expanded_object in additional_plant_equipment.items():
if expanded_name not in expanded_plant_loops.keys():
expanded_plant_equipment[expanded_name] = expanded_object
except (AttributeError, ValueError):
raise InvalidTemplateException(
'Error: A Plant equipment was specified to be created from a plant '
'equipment object {}, but the process failed to attach the create objects'.format(epl_name))
return
@staticmethod
def _get_plant_equipment_waterloop_branches_by_loop_type(
plant_loop_class_object,
expanded_plant_equipment):
"""
Extract plant equipment branches by loop type and store in epJSON formatted dictionary
:param plant_loop_class_object: ExpandPlantLoop object
:param expanded_plant_equipment: dictionary of ExpandPlantEquipment objects
:return: epJSON formatted dictionary of branch objects for loop connections
"""
branch_dictionary = {}
for pe in expanded_plant_equipment.values():
branch_objects = copy.deepcopy(pe.epjson.get('Branch', {}))
for branch_name, branch_structure in branch_objects.items():
components = branch_structure.get('components')
if not components:
raise InvalidTemplateException(
'Error: In {} ({}) A branch object failed to create component fields {}'
.format(pe.template_type, pe.template_name, branch_name))
# Special handling for chillers with condenser water and chilled water branches
# todo_eo: Currently the chilled and condenser water branches are separated by parsing the names. A more
# robust solution should be investigated.
if pe.template_type in ['HVACTemplate:Plant:Chiller', 'HVACTemplate:Plant:Chiller:ObjectReference'] \
and getattr(pe, 'condenser_type', 'WaterCooled') == 'WaterCooled':
for branch_name, branch_structure in branch_objects.items():
if 'chilledwater' in plant_loop_class_object.template_type.lower() and 'chw' in branch_name.lower():
branch_dictionary.update({branch_name: branch_objects[branch_name]})
if 'condenserwater' in plant_loop_class_object.template_type.lower() and 'cnd' in branch_name.lower():
branch_dictionary.update({branch_name: branch_objects[branch_name]})
# typical handling when all plant equipment branches belong in one loop
elif pe.template_plant_loop_type in plant_loop_class_object.template_type:
branch_dictionary.update(branch_objects)
if branch_dictionary:
return {'Branch': branch_dictionary}
else:
return None
@staticmethod
def _get_zone_system_waterloop_branches_by_loop_type(
plant_loop_class_object,
expanded_zones,
expanded_systems):
"""
Extract zone and system branch objects by loop type and store in epJSON formatted dictionary
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_zones: ExpandZone objects
:param expanded_systems: ExpandSystem objects
:return: epJSON formatted dictionary of branch objects
"""
# create list of regex matches for the given loop
if 'chilledwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:Cooling:Water($|:DetailedGeometry)+', ]
elif 'hotwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:Heating:Water($|:DetailedGeometry)+', '^ZoneHVAC:Baseboard.*Water']
elif 'mixedwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:.*HeatPump.*', '^AirConditioner:VariableRefrigerantFlow$']
elif 'condenserwater' in plant_loop_class_object.template_type.lower():
return None
else:
InvalidTemplateException('an invalid loop type was specified when creating plant loop connections: {}'
.format(plant_loop_class_object.template_type))
branch_dictionary = {}
object_list = [expanded_zones or {}, expanded_systems or {}]
for class_object in object_list:
for co in class_object.values():
branch_objects = copy.deepcopy(co.epjson.get('Branch', {}))
for branch_name, branch_structure in branch_objects.items():
# the regex check for 'main branch' is to avoid DualDuct main branches from accidentally being
# included since they have coil objects in them as well. They typical main branch is never accidentally
# caught because the coil objects are never in the 0th position.
for br in branch_rgx:
if re.match(br, branch_structure['components'][0]['component_object_type']) and not \
re.match('.*main branch$', branch_name.lower()):
branch_dictionary.update({branch_name: branch_objects[branch_name]})
if branch_dictionary:
return {'Branch': branch_dictionary}
else:
return None
def _split_supply_and_demand_side_branches(
self,
plant_loop_class_object,
expanded_plant_equipment,
expanded_systems,
expanded_zones):
"""
Separate plant equipment, zone, and system branches into supply and demand sides for a given ExpandPlantLoop
object.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:param expanded_systems: expanded dictionary of ExpandSystem objects
:param expanded_zones: expanded dictionary of ExpandZone objects
:return: tuple of demand and supply side branches for processing
"""
# Get plant equipment, zone, and system branches
plant_equipment_branch_dictionary = self._get_plant_equipment_waterloop_branches_by_loop_type(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment
)
zone_system_branch_dictionary = self._get_zone_system_waterloop_branches_by_loop_type(
plant_loop_class_object=plant_loop_class_object,
expanded_zones=expanded_zones,
expanded_systems=expanded_systems
)
# get branches in the loop
demand_branches = {}
# Special handling for condenser water loop where the chiller objects are the demand side.
if 'condenserwater' in plant_loop_class_object.template_type.lower():
pebd = copy.deepcopy(plant_equipment_branch_dictionary)
for object_name, object_structure in plant_equipment_branch_dictionary['Branch'].items():
try:
if re.match(r'Chiller:.*', object_structure['components'][0]['component_object_type']):
demand_branches.update({object_name: pebd['Branch'].pop(object_name)})
except (AttributeError, KeyError):
raise InvalidTemplateException(
'Error: Branch object is incorrectly formatted: {}'.format(plant_equipment_branch_dictionary))
supply_branches = pebd['Branch']
else:
demand_branches = zone_system_branch_dictionary.get('Branch') if zone_system_branch_dictionary else None
supply_branches = plant_equipment_branch_dictionary.get('Branch') \
if plant_equipment_branch_dictionary else None
return demand_branches, supply_branches
def _create_water_loop_connectors_and_nodelist(
self,
plant_loop_class_object,
expanded_plant_equipment,
expanded_zones=None,
expanded_systems=None):
"""
Create Branchlist, Connector, ConnectorList, and supply NodeLists objects that connect the PlantLoop to supply
and demand water objects. This operation is performed outside of ExpandObjects because it requires outputs
from ExpandPlantEquipment, ExpandZone, and ExpandSystem objects.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:param expanded_systems: expanded dictionary of ExpandSystem objects
:param expanded_zones: expanded dictionary of ExpandZone objects
:return: Updated class epjson attribute with Branchlist, Connector, and ConnectorList objects.
"""
# Get plant equipment, zone, and system branches. Split them into demand and supply sides
demand_branches, supply_branches = self._split_supply_and_demand_side_branches(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment,
expanded_systems=expanded_systems,
expanded_zones=expanded_zones
)
# check to make sure loops aren't empty
if demand_branches:
if plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop':
try:
equipment_types = [
(component[-1]['component_name'], component[-1]['component_object_type']) for
object_name, object_structure in supply_branches.items()
for component in object_structure.values()]
except AttributeError:
raise PyExpandObjectsYamlStructureException(
'Error: In {} ({}) No supply branches found plant loop object'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name))
chillers = [i for i in equipment_types if re.match(r'Chiller:.*', i[1])]
towers = [i for i in equipment_types if re.match(r'CoolingTower:.*', i[1])]
# For water-cooled chillers, the tower is in the condenserloop so that needs to be checked instead of
# the chilledwaterloop
if 'CondenserWaterLoop' in [
ep_structure.template_plant_loop_type for ep_name, ep_structure in expanded_plant_equipment.items()
if ep_structure.template_type in ['HVACTemplate:Plant:Tower',
'HVACTemplate:Plant:Tower:ObjectReference']]:
towers = True
if chillers and not towers and 'CondenserWaterLoop' in [
ep_structure.template_plant_loop_type
for ep_name, ep_structure in expanded_plant_equipment.items()]:
raise InvalidTemplateException(
'Error: In {} ({})'
' there is one or more water cooled chiller(s) but there are no towers serving this loop.'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name))
if not demand_branches or not supply_branches:
msg = []
if not demand_branches:
msg.append('There is no demand-side equipment connected to this loop.')
if not supply_branches:
msg.append('There is no supply-side equipment serving this loop.')
raise InvalidTemplateException(
'Error: in {} ({}). {}'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name,
' '.join(msg)))
# Use ExpandObjects class for helper functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(plant_loop_class_object, 'template_name')
# create connector objects based on template attributes
if (plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop' and getattr(
plant_loop_class_object, 'chilled_water_supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:CondenserWaterLoop' and getattr(
plant_loop_class_object, 'condenser_water_supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:HotWaterLoop' and getattr(
plant_loop_class_object, 'supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:MixedWaterLoop' and getattr(
plant_loop_class_object, 'supply_side_bypass_pipe', 'Yes') == 'No'):
supply_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'SupplyNoBypass'])
connector_supply_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'SupplyNoBypass'])
connector_supply_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'SupplyNoBypass'])
# set the 'branches' value type to list if it's none
if not connector_supply_mixer['branches']:
connector_supply_splitter['branches'] = []
if not connector_supply_splitter['branches']:
connector_supply_mixer['branches'] = []
else:
supply_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'Supply'])
connector_supply_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'Supply'])
connector_supply_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'Supply'])
if (plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop' and getattr(
plant_loop_class_object, 'chilled_water_demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:CondenserWaterLoop' and getattr(
plant_loop_class_object, 'condenser_water_demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:HotWaterLoop' and getattr(
plant_loop_class_object, 'demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:MixedWaterLoop' and getattr(
plant_loop_class_object, 'demand_side_bypass_pipe', 'Yes') == 'No'):
demand_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'DemandNoBypass'])
connector_demand_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'DemandNoBypass'])
connector_demand_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'DemandNoBypass'])
# set the 'branches' value type to list if it's none
if not connector_demand_mixer['branches']:
connector_demand_splitter['branches'] = []
if not connector_demand_splitter['branches']:
connector_demand_mixer['branches'] = []
else:
demand_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'Demand'])
connector_demand_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'Demand'])
connector_demand_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'Demand'])
# create supply nodelist
supply_nodelist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'NodeList', 'Supply'])
# apply branches
try:
for branch in demand_branches:
demand_branchlist['branches'].insert(1, {'branch_name': branch})
connector_demand_splitter['branches'].append({'outlet_branch_name': branch})
connector_demand_mixer['branches'].append({'inlet_branch_name': branch})
for branch in supply_branches:
supply_branchlist['branches'].insert(1, {'branch_name': branch})
connector_supply_splitter['branches'].insert(-1, {'outlet_branch_name': branch})
connector_supply_mixer['branches'].insert(-1, {'inlet_branch_name': branch})
supply_nodelist['nodes'].insert(
0,
{'node_name': supply_branches[branch]['components'][-1]['component_outlet_node_name']})
except (KeyError, AttributeError):
raise PyExpandObjectsYamlStructureException(
'Error: In {} AutoCreated PlantLoop Connector YAML object was '
'improperly formatted'.format(plant_loop_class_object.template_type))
# add connector list
demand_connectorlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'ConnectorList', 'Demand']
)
supply_connectorlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'ConnectorList', 'Supply']
)
# format yaml objects into epJSON dictionaries, resolve, and output
connector_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[
{'BranchList': demand_branchlist},
{'BranchList': supply_branchlist},
{'Connector:Splitter': connector_demand_splitter},
{'Connector:Splitter': connector_supply_splitter},
{'Connector:Mixer': connector_demand_mixer},
{'Connector:Mixer': connector_supply_mixer},
{'ConnectorList': demand_connectorlist},
{'ConnectorList': supply_connectorlist},
{'NodeList': supply_nodelist}
])
resolved_path_dictionary = eo.resolve_objects(epjson=connector_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary
)
return
def _create_plant_equipment_lists(
self,
plant_loop_class_object,
expanded_plant_equipment):
"""
Create PlantEquipmentList and CondenserEquipmentList for a given ExpandPlantLoop class object.
This operation is performed outside of ExpandObjects because it requires outputs from
ExpandPlantEquipment objects.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:return: Updated class epjson attribute with PlantEquipmentList or CondenserEquipmentlist.
"""
# Get plant equipment, zone, and system branches. Split them into demand and supply sides
_, supply_branches = self._split_supply_and_demand_side_branches(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment,
expanded_systems=None,
expanded_zones=None
)
equipment = []
# Extract priority from each equipment object referenced by the branch and use it to order the equipment list
supply_branches_with_priority = []
for sb in supply_branches.values():
for equipment_name, equipment_class in expanded_plant_equipment.items():
if equipment_class.template_type == 'HVACTemplate:Plant:Boiler:ObjectReference':
equipment_name = equipment_class.boiler_name
elif equipment_class.template_type == 'HVACTemplate:Plant:Chiller:ObjectReference':
equipment_name = equipment_class.chiller_name
elif equipment_class.template_type == 'HVACTemplate:Plant:Tower:ObjectReference':
equipment_name = equipment_class.cooling_tower_name
if sb['components'][-1]['component_name'] == equipment_name:
# make tuple of (object, priority)
# if priority isn't set, use infinity to push it to the end when sorted
supply_branches_with_priority.append((sb, getattr(equipment_class, 'priority', float('inf'))))
supply_branches_ordered = [
branch for branch, priority
in sorted(supply_branches_with_priority, key=lambda s: s[1])]
for sb in supply_branches_ordered:
equipment.append({
'equipment_name': sb['components'][-1]['component_name'],
'equipment_object_type': sb['components'][-1]['component_object_type']
})
# use ExpandObjects functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(plant_loop_class_object, 'template_name')
if 'hotwater' in plant_loop_class_object.template_type.lower() or \
'chilledwater' in plant_loop_class_object.template_type.lower():
list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentList'])
list_dictionary['equipment'] = equipment
equipment_list_dictionary = [{'PlantEquipmentList': list_dictionary}, ]
elif 'mixedwater' in plant_loop_class_object.template_type.lower():
heating_equipment = [i for i in equipment if re.match(r'Boiler:.*', i['equipment_object_type'])]
heating_list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentListMixedWaterHeating'])
heating_list_dictionary['equipment'] = heating_equipment
cooling_equipment = [i for i in equipment if re.match(r'CoolingTower:.*', i['equipment_object_type'])]
cooling_list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentListMixedWaterCooling'])
cooling_list_dictionary['equipment'] = cooling_equipment
equipment_list_dictionary = [
{'PlantEquipmentList': cooling_list_dictionary},
{'PlantEquipmentList': heating_list_dictionary}]
elif 'condenserwater' in plant_loop_class_object.template_type.lower():
list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'CondenserEquipmentList'])
list_dictionary['equipment'] = equipment
equipment_list_dictionary = [{'CondenserEquipmentList': list_dictionary}, ]
else:
raise InvalidTemplateException(
'Error: an invalid loop type was specified when creating plant loop connections: {}'
.format(plant_loop_class_object.template_type))
equipment_list_formatted_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=equipment_list_dictionary)
resolved_path_dictionary = eo.resolve_objects(epjson=equipment_list_formatted_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
return
def run(self, input_epjson=None):
"""
Execute HVAC Template process workflow
:param input_epjson: input epJSON file
:return: epJSON containing expanded objects from templates
"""
if not input_epjson:
if self.input_epjson:
input_epjson = self.input_epjson
else:
raise InvalidEpJSONException("No epJSON file loaded or provided to HVACTemplate processor")
self.epjson_process(epjson_ref=input_epjson)
self.logger.info('##### PreProcessing Data #####')
self._hvac_template_preprocess(epjson=self.input_epjson)
self.logger.info('##### Processing Thermostats #####')
self.expanded_thermostats = self._expand_templates(
templates=self.templates_thermostats,
expand_class=ExpandThermostat)
self.logger.info('##### Processing Systems #####')
self.expanded_systems = self._expand_templates(
templates=self.templates_systems,
expand_class=ExpandSystem)
self.logger.info('##### Processing Zones #####')
self.expanded_zones = self._expand_templates(
templates=self.templates_zones,
expand_class=ExpandZone,
system_class_objects=self.expanded_systems)
self.logger.info('##### Building Zone-Thermostat Connections #####')
for _, zone_class_object in self.expanded_zones.items():
self._create_zonecontrol_thermostat(zone_class_object=zone_class_object)
self.logger.info('##### Building System-Zone Connections #####')
for _, system_class_object in self.expanded_systems.items():
# VRF systems do not connect via air paths, and need a separate function.
if system_class_object.template_type == 'HVACTemplate:System:VRF':
self._create_system_vrf_path_connection_objects(
system_class_object=system_class_object,
expanded_zones=self.expanded_zones)
else:
self._create_system_path_connection_objects(
system_class_object=system_class_object,
expanded_zones=self.expanded_zones)
self.logger.info('##### Processing Plant Loops #####')
self.expanded_plant_loops = self._expand_templates(
templates=self.templates_plant_loops,
expand_class=ExpandPlantLoop)
self.logger.info('##### Processing Plant Equipment #####')
self.expanded_plant_equipment = self._expand_templates(
templates=self.templates_plant_equipment,
expand_class=ExpandPlantEquipment,
plant_loop_class_objects=self.expanded_plant_loops)
# Pass through expanded plant equipment objects to create additional plant loops and equipment if necessary
self._create_additional_plant_loops_and_equipment_from_equipment(
expanded_plant_equipment=self.expanded_plant_equipment,
expanded_plant_loops=self.expanded_plant_loops)
self.logger.info('##### Building Plant-Plant Equipment Connections #####')
for expanded_pl in self.expanded_plant_loops.values():
self._create_water_loop_connectors_and_nodelist(
plant_loop_class_object=expanded_pl,
expanded_plant_equipment=self.expanded_plant_equipment,
expanded_systems=self.expanded_systems,
expanded_zones=self.expanded_zones)
self._create_plant_equipment_lists(
plant_loop_class_object=expanded_pl,
expanded_plant_equipment=self.expanded_plant_equipment)
self.logger.info('##### Creating epJSON #####')
# Merge each set of epJSON dictionaries
merge_list = [
self.epjson,
self.base_objects,
*[j.epjson for i, j in self.expanded_thermostats.items()],
*[j.epjson for i, j in self.expanded_zones.items()],
*[j.epjson for i, j in self.expanded_systems.items()],
*[j.epjson for i, j in self.expanded_plant_loops.items()],
*[j.epjson for i, j in self.expanded_plant_equipment.items()]]
output_epjson = {}
# The unique_name_override option is enabled here due to ObjectReference templates having the base equipment
# in them as well as being present in the base epjson. A better solution should be investigated so that this
# option can be turned back off.
for merge_dictionary in merge_list:
self.merge_epjson(
super_dictionary=output_epjson,
object_dictionary=merge_dictionary,
unique_name_override=True)
# Use this for file debugging
# import json
# with open('test.epJSON', 'w') as base_file:
# json.dump(output_epjson, base_file, indent=4, sort_keys=True)
# Create output format
output_epjson = {
"epJSON": output_epjson,
"epJSON_base": self.base_objects,
"epJSON_hvac_templates": self.templates,
'Output:PreprocessorMessage': self.stream.getvalue()
}
return output_epjson
|
the-stack_0_14065 | from __future__ import print_function
import pandas as pd
from sklearn.model_selection import train_test_split
from keras_text_summarization.library.utility.plot_utils import plot_and_save_history
from keras_text_summarization.library.seq2seq import Seq2SeqSummarizer
from keras_text_summarization.library.applications.fake_news_loader import fit_text
import numpy as np
import os
import tensorflow as tf
LOAD_EXISTING_WEIGHTS = False
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
def main():
np.random.seed(42)
data_dir_path = './data'
report_dir_path = './reports'
model_dir_path = './models'
print('loading csv file ...')
#df = pd.read_csv(data_dir_path + "/fake_or_real_news.csv")
print('extract configuration from input texts ...')
with open(data_dir_path + '/train_preprocessed.en') as f:
X = f.read().split('\n');
with open(data_dir_path + '/train_preprocessed.de') as f:
Y = f.read().split('\n');
config = fit_text(X, Y)
summarizer = Seq2SeqSummarizer(config)
if LOAD_EXISTING_WEIGHTS:
summarizer.load_weights(weight_file_path=Seq2SeqSummarizer.get_weight_file_path(model_dir_path=model_dir_path))
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.2, random_state=42)
print('demo size: ', len(Xtrain))
print('testing size: ', len(Xtest))
print('start fitting ...')
history = summarizer.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=100)
history_plot_file_path = report_dir_path + '/' + Seq2SeqSummarizer.model_name + '-history.png'
if LOAD_EXISTING_WEIGHTS:
history_plot_file_path = report_dir_path + '/' + Seq2SeqSummarizer.model_name + '-history-v' + str(summarizer.version) + '.png'
plot_and_save_history(history, summarizer.model_name, history_plot_file_path, metrics={'loss', 'acc'})
if __name__ == '__main__':
main()
|
the-stack_0_14067 | # -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import no_symlinks
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import salt libs
from salt.modules.inspectlib.collector import Inspector
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(no_symlinks(), "Git missing 'core.symlinks=true' config")
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def setUp(self):
patcher = patch("os.mkdir", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
cachedir = os.sep + os.sep.join(['foo', 'cache'])
piddir = os.sep + os.sep.join(['foo', 'pid'])
inspector = Inspector(cachedir=cachedir, piddir=piddir, pidfilename='bar.pid')
self.assertEqual(
inspector.dbfile,
os.sep + os.sep.join(['foo', 'cache', '_minion_collector.db']))
self.assertEqual(
inspector.pidfile,
os.sep + os.sep.join(['foo', 'pid', 'bar.pid']))
def test_file_tree(self):
'''
Test file tree.
:return:
'''
inspector = Inspector(cachedir=os.sep + 'test',
piddir=os.sep + 'test',
pidfilename='bar.pid')
tree_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'inspectlib', 'tree_test')
expected_tree = ([os.sep + os.sep.join(['a', 'a', 'dummy.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.b']),
os.sep + os.sep.join(['b', 'b.1']),
os.sep + os.sep.join(['b', 'b.2']),
os.sep + os.sep.join(['b', 'b.3'])],
[os.sep + 'a',
os.sep + os.sep.join(['a', 'a']),
os.sep + os.sep.join(['a', 'b']),
os.sep + os.sep.join(['a', 'c']),
os.sep + 'b',
os.sep + 'c'],
[os.sep + os.sep.join(['a', 'a', 'dummy.ln.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.ln.b']),
os.sep + os.sep.join(['a', 'c', 'b.1']),
os.sep + os.sep.join(['b', 'b.4']),
os.sep + os.sep.join(['b', 'b.5']),
os.sep + os.sep.join(['c', 'b.1']),
os.sep + os.sep.join(['c', 'b.2']),
os.sep + os.sep.join(['c', 'b.3'])])
tree_result = []
for chunk in inspector._get_all_files(tree_root):
buff = []
for pth in chunk:
buff.append(pth.replace(tree_root, ''))
tree_result.append(buff)
tree_result = tuple(tree_result)
self.assertEqual(expected_tree, tree_result)
def test_get_unmanaged_files(self):
'''
Test get_unmanaged_files.
:return:
'''
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
managed = (
['a', 'b', 'c'],
['d', 'e', 'f'],
['g', 'h', 'i'],
)
system_all = (
['a', 'b', 'c'],
['d', 'E', 'f'],
['G', 'H', 'i'],
)
self.assertEqual(inspector._get_unmanaged_files(managed=managed, system_all=system_all),
([], ['E'], ['G', 'H']))
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
the-stack_0_14068 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
fMRIprep base processing workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_fmriprep_wf
.. autofunction:: init_single_subject_wf
"""
import sys
import os
from copy import deepcopy
from nipype import __version__ as nipype_ver
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.nilearn import NILEARN_VERSION
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.bids import (
BIDSInfo, BIDSDataGrabber, BIDSFreeSurferDir
)
from niworkflows.utils.bids import collect_data
from niworkflows.utils.misc import fix_multi_T1w_source_name
from smriprep.workflows.anatomical import init_anat_preproc_wf
from ..interfaces import SubjectSummary, AboutSummary, DerivativesDataSink
from ..__about__ import __version__
from .bold import init_func_preproc_wf
def init_fmriprep_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
fs_subjects_dir,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
omp_nthreads,
output_dir,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
run_uuid,
skull_strip_fixed_seed,
skull_strip_template,
spaces,
subject_list,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
work_dir,
):
"""
Build *fMRIPrep*'s pipeline.
This workflow organizes the execution of FMRIPREP, with a sub-workflow for
each subject.
If FreeSurfer's ``recon-all`` is to be run, a corresponding folder is created
and populated with any needed template subjects under the derivatives folder.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
import os
from collections import namedtuple, OrderedDict
BIDSLayout = namedtuple('BIDSLayout', ['root'])
from fmriprep.workflows.base import init_fmriprep_wf
from niworkflows.utils.spaces import Reference, SpatialReferences
os.environ['FREESURFER_HOME'] = os.getcwd()
wf = init_fmriprep_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
fs_subjects_dir=None,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
omp_nthreads=1,
output_dir='.',
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
run_uuid='X',
skull_strip_fixed_seed=False,
skull_strip_template=Reference('OASIS30ANTs'),
spaces=SpatialReferences(
spaces=['MNI152Lin',
('fsaverage', {'density': '10k'}),
'T1w',
'fsnative'],
checkpoint=True),
subject_list=['fmripreptest'],
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
work_dir='.',
)
Parameters
----------
anat_only : bool
Disable functional workflows
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_dvars_th
Criterion for flagging DVARS outliers
regressors_fd_th
Criterion for flagging framewise displacement outliers
run_uuid : str
Unique identifier for execution instance
skull_strip_template : tuple
Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
and corresponding dictionary of output-space modifiers.
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
subject_list : list
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
work_dir : str
Directory in which to store workflow execution state and temporary files
"""
fmriprep_wf = Workflow(name='fmriprep_wf')
fmriprep_wf.base_dir = work_dir
if freesurfer:
fsdir = pe.Node(
BIDSFreeSurferDir(
derivatives=output_dir,
freesurfer_home=os.getenv('FREESURFER_HOME'),
spaces=spaces.get_fs_spaces()),
name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)
if fs_subjects_dir is not None:
fsdir.inputs.subjects_dir = str(fs_subjects_dir.absolute())
reportlets_dir = os.path.join(work_dir, 'reportlets')
for subject_id in subject_list:
single_subject_wf = init_single_subject_wf(
anat_only=anat_only,
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
echo_idx=echo_idx,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
hires=hires,
ignore=ignore,
layout=layout,
longitudinal=longitudinal,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
name="single_subject_" + subject_id + "_wf",
omp_nthreads=omp_nthreads,
output_dir=output_dir,
regressors_all_comps=regressors_all_comps,
regressors_dvars_th=regressors_dvars_th,
regressors_fd_th=regressors_fd_th,
reportlets_dir=reportlets_dir,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
spaces=spaces,
subject_id=subject_id,
t2s_coreg=t2s_coreg,
task_id=task_id,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
single_subject_wf.config['execution']['crashdump_dir'] = (
os.path.join(output_dir, "fmriprep", "sub-" + subject_id, 'log', run_uuid)
)
for node in single_subject_wf._get_all_nodes():
node.config = deepcopy(single_subject_wf.config)
if freesurfer:
fmriprep_wf.connect(fsdir, 'subjects_dir',
single_subject_wf, 'inputnode.subjects_dir')
else:
fmriprep_wf.add_nodes([single_subject_wf])
return fmriprep_wf
def init_single_subject_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
name,
omp_nthreads,
output_dir,
reportlets_dir,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
skull_strip_fixed_seed,
skull_strip_template,
spaces,
subject_id,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
):
"""
This workflow organizes the preprocessing pipeline for a single subject.
It collects and reports information about the subject, and prepares
sub-workflows to perform anatomical and functional preprocessing.
Anatomical preprocessing is performed in a single workflow, regardless of
the number of sessions.
Functional preprocessing is performed using a separate workflow for each
individual BOLD series.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from collections import namedtuple
from niworkflows.utils.spaces import Reference, SpatialReferences
from fmriprep.workflows.base import init_single_subject_wf
BIDSLayout = namedtuple('BIDSLayout', ['root'])
wf = init_single_subject_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
name='single_subject_wf',
omp_nthreads=1,
output_dir='.',
reportlets_dir='.',
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
skull_strip_fixed_seed=False,
skull_strip_template=Reference('OASIS30ANTs'),
spaces=SpatialReferences(
spaces=['MNI152Lin',
('fsaverage', {'density': '10k'}),
'T1w',
'fsnative'],
checkpoint=True),
subject_id='test',
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
)
Parameters
----------
anat_only : bool
Disable functional workflows
aroma_melodic_dim : int
Maximum number of components identified by MELODIC within ICA-AROMA
(default is -200, i.e., no limitation).
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
name : str
Name of workflow
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
reportlets_dir : str
Directory in which to save reportlets
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_fd_th
Criterion for flagging framewise displacement outliers
regressors_dvars_th
Criterion for flagging DVARS outliers
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
skull_strip_template : tuple
Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
and corresponding dictionary of output-space modifiers.
subject_id : str
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
Inputs
------
subjects_dir : str
FreeSurfer's ``$SUBJECTS_DIR``.
"""
if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
# for documentation purposes
subject_data = {
't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
}
else:
subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]
# Make sure we always go through these two checks
if not anat_only and subject_data['bold'] == []:
raise Exception("No BOLD images found for participant {} and task {}. "
"All workflows require BOLD images.".format(
subject_id, task_id if task_id else '<all>'))
if not subject_data['t1w']:
raise Exception("No T1w images found for participant {}. "
"All workflows require T1w images.".format(subject_id))
workflow = Workflow(name=name)
workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).
""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
workflow.__postdesc__ = """
Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").
### Copyright Waiver
The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.
### References
""".format(nilearn_ver=NILEARN_VERSION)
inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
name='inputnode')
bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
name='bidssrc')
bids_info = pe.Node(BIDSInfo(
bids_dir=layout.root, bids_validate=False), name='bids_info')
summary = pe.Node(SubjectSummary(std_spaces=spaces.get_spaces(nonstandard=False),
nstd_spaces=spaces.get_spaces(standard=False)),
name='summary', run_without_submitting=True)
about = pe.Node(AboutSummary(version=__version__,
command=' '.join(sys.argv)),
name='about', run_without_submitting=True)
ds_report_summary = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='summary', keep_dtype=True),
name='ds_report_summary', run_without_submitting=True)
ds_report_about = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='about', keep_dtype=True),
name='ds_report_about', run_without_submitting=True)
# Preprocessing of T1w (includes registration to MNI)
anat_preproc_wf = init_anat_preproc_wf(
bids_root=layout.root,
debug=debug,
freesurfer=freesurfer,
hires=hires,
longitudinal=longitudinal,
name="anat_preproc_wf",
num_t1w=len(subject_data['t1w']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
reportlets_dir=reportlets_dir,
spaces=spaces,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
)
workflow.connect([
(inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
(bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
(inputnode, summary, [('subjects_dir', 'subjects_dir')]),
(bidssrc, summary, [('t1w', 't1w'),
('t2w', 't2w'),
('bold', 'bold')]),
(bids_info, summary, [('subject', 'subject_id')]),
(bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
(bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
('t2w', 'inputnode.t2w'),
('roi', 'inputnode.roi'),
('flair', 'inputnode.flair')]),
(bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(summary, ds_report_summary, [('out_report', 'in_file')]),
(bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(about, ds_report_about, [('out_report', 'in_file')]),
])
# Overwrite ``out_path_base`` of smriprep's DataSinks
for node in workflow.list_node_names():
if node.split('.')[-1].startswith('ds_'):
workflow.get_node(node).interface.out_path_base = 'fmriprep'
if anat_only:
return workflow
for bold_file in subject_data['bold']:
func_preproc_wf = init_func_preproc_wf(
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
bold_file=bold_file,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
ignore=ignore,
layout=layout,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
num_bold=len(subject_data['bold']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
reportlets_dir=reportlets_dir,
regressors_all_comps=regressors_all_comps,
regressors_fd_th=regressors_fd_th,
regressors_dvars_th=regressors_dvars_th,
spaces=spaces,
t2s_coreg=t2s_coreg,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
workflow.connect([
(anat_preproc_wf, func_preproc_wf,
[(('outputnode.t1w_preproc', _pop), 'inputnode.t1w_preproc'),
('outputnode.t1w_brain', 'inputnode.t1w_brain'),
('outputnode.t1w_mask', 'inputnode.t1w_mask'),
('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
('outputnode.template', 'inputnode.template'),
('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
('outputnode.joint_template', 'inputnode.joint_template'),
('outputnode.joint_anat2std_xfm', 'inputnode.joint_anat2std_xfm'),
('outputnode.joint_std2anat_xfm', 'inputnode.joint_std2anat_xfm'),
# Undefined if --fs-no-reconall, but this is safe
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id'),
('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'),
('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')]),
])
return workflow
def _prefix(subid):
if subid.startswith('sub-'):
return subid
return '-'.join(('sub', subid))
def _pop(inlist):
if isinstance(inlist, (list, tuple)):
return inlist[0]
return inlist
|
the-stack_0_14069 | from django.urls import path
from apps.superbonus import views
urlpatterns = [
path('', views.app, name='bonus-app-view'),
path('add-condo', views.add_condo, name='bonus-add-condo'),
path('add-villa', views.add_villa, name='bonus-add-villa'),
path('interventions/<int:id>', views.interventions, name='bonus-add-intervention'),
path('preview/<int:id>', views.preview, name='bonus-preview'),
path('catastal/<int:id>', views.catastal, name='bonus-catastal'),
path('beneficiary/<int:id>', views.beneficiary, name='bonus-beneficiary'),
path('interventions-costs/<int:id>/<str:type>', views.intervention_costs, name='bonus-costs'),
path('add-interventions-costs/<int:id>/<str:type>', views.add_intervention_costs, name='add-bonus-costs'),
path('edit-interventions-costs/<int:id>/<str:type>', views.edit_intervention_costs, name='edit-bonus-costs'),
path('add-professional/<int:id>', views.professionals, name='bonus-professional'),
path('add-professional/<int:id>/<str:type>/<str:prof>/', views.add_professionals, name='bonus-add-professional'),
path('delete/<str:type>/<int:id>', views.delete_prop, name='bonus-delete'),
]
|
the-stack_0_14070 | '''
Template tags for Stripe Non PCI Complaince
'''
from django import template
from django.template.loader import render_to_string
register = template.Library()
class StripeNode(template.Node):
def __init__(self, integration):
self.integration = template.Variable(integration)
def render(self, context):
int_obj = self.integration.resolve(context)
form_str = render_to_string("billing/stripe.html",
{"form": int_obj.generate_form(),
"integration": int_obj}, context)
return form_str
@register.tag
def stripe_payment(parser, token):
try:
tag, int_obj = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r was expecting a single argument" %token.split_contents()[0])
return StripeNode(int_obj)
|
the-stack_0_14071 | import torch
import torchaudio
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from utils.config import config
from typing import Optional
from transforms.audio import RandomSoxAugmentations, NoSoxAugmentations
from transforms.mfsc import ToMelSpec, SpecAug
from dataset.test_dataset import SimClrTestDataset
from pytorch_lightning.utilities import move_data_to_device
class UnsupervisedCommonVoiceDataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sox_augmentations = RandomSoxAugmentations(
config.dataset.sample_rate)
self.no_augmentation = NoSoxAugmentations(config.dataset.sample_rate)
self.mel_then_specaug = torch.jit.script(
torch.nn.Sequential(ToMelSpec(), SpecAug()))
self.only_mel = torch.jit.script(torch.nn.Sequential(ToMelSpec()))
def setup(self, stage: Optional[str] = None):
if stage == 'test':
self.test_dataset = SimClrTestDataset(
root=config.dataset.test_root, tsv=config.dataset.test)
self.transform = self.only_mel
self.augmentation = self.no_augmentation
else:
self.unsupervised_dataset = torchaudio.datasets.COMMONVOICE(
root=config.dataset.root, tsv=config.dataset.unsupervised_train)
self.transform = self.mel_then_specaug
self.augmentation = self.sox_augmentations
def num_train_samples(self):
return len(self.unsupervised_dataset)
def num_test_samples(self):
return len(self.test_dataset)
def train_dataloader(self):
return DataLoader(
self.unsupervised_dataset,
batch_size=config.dataloader.batch_size,
num_workers=config.dataloader.num_workers,
pin_memory=True,
drop_last=True,
shuffle=True,
collate_fn=self._collate_fn
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=len(self.test_dataset),
num_workers=0,
pin_memory=True,
drop_last=False,
collate_fn=self._collate_fn
)
def transfer_batch_to_device(self, batch, device):
device = device or self.device
self.transform = self.transform.to(device)
return move_data_to_device(batch, device)
def on_after_batch_transfer(self, batch, dataloader_idx):
input_a, input_b, input_a_lengths, input_b_lengths = batch
input_a = self.transform(input_a)
input_b = self.transform(input_b)
input_a_lengths = (input_a_lengths / (config.audio.model_sample_rate /
1000 * config.audio.stride_in_ms)).ceil_()
input_b_lengths = (input_b_lengths / (config.audio.model_sample_rate /
1000 * config.audio.stride_in_ms)).ceil_()
return (input_a, input_b, input_a_lengths, input_b_lengths)
# input: batch -> (waveform, sample_rate, dictionary)
# returns: (aug1, aug2, aug1_len, aug2_len) where aug1 == (batch, time)
def _collate_fn(self, batch):
raw_inputs = [b[0] for b in batch if b]
input_a = [self.augmentation(raw_input).transpose(1, 0)
for raw_input in raw_inputs]
input_b = [self.augmentation(raw_input).transpose(1, 0)
for raw_input in raw_inputs]
input_a_lengths = torch.tensor(
[t.size(0) for t in input_a],
dtype=torch.int32,
device=input_a[0].device,
)
input_b_lengths = torch.tensor(
[t.size(0) for t in input_b],
dtype=torch.int32,
device=input_b[0].device,
)
input_a = torch.nn.utils.rnn.pad_sequence(
input_a, batch_first=True).transpose(1, -1).squeeze(1)
input_b = torch.nn.utils.rnn.pad_sequence(
input_b, batch_first=True).transpose(1, -1).squeeze(1)
return (input_a, input_b, input_a_lengths, input_b_lengths)
if __name__ == "__main__":
loader = UnsupervisedCommonVoiceDataModule()
loader.setup()
for i, batch in enumerate(loader.train_dataloader()):
print(batch[0].shape, batch[1].shape, batch[2], batch[3])
if i > 0 and i % 20 == 0:
break
|
the-stack_0_14072 | # -*- coding: utf-8 -*-
import random
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.db import transaction
from allauth.account.models import EmailAddress
from datetime import date
from faker import Faker
from wye.profiles.models import UserType, Profile
from wye.organisations.models import Organisation
from wye.regions.models import Location, State
from wye.workshops.models import WorkshopSections, Workshop
from wye.base.constants import WorkshopStatus, WorkshopLevel
NUMBER_OF_USERS = getattr(settings, "NUMBER_OF_USERS", 10)
NUMBER_OF_LOCATIONS = getattr(settings, "NUMBER_OF_LOCATIONS", 10)
NUMBER_OF_ORGANISATIONS = getattr(settings, "NUMBER_OF_ORGANISATIONS", 10)
NUMBER_OF_WORKSHOP_SECTIONS = getattr(
settings, "NUMBER_OF_WORKSHOP_SECTIONS", 5)
class Command(BaseCommand):
help = "Creating Initial demo data for testing application"
fake = Faker()
@transaction.atomic
def handle(self, *args, **options):
self.fake.seed(4321)
self.stdout.write(' Updating domain to localhost:8000') # Update site url
site = Site.objects.get_current()
site.domain, site.name = 'localhost:8000', 'Local'
site.save()
self.stdout.write(' Creating Superuser')
email = '[email protected]'
user = self.create_user(is_superuser=True, username='admin',
email=email, is_active=True, is_staff=True,
first_name='Admin')
# User
self.stdout.write(' Creating sample users')
for i in range(NUMBER_OF_USERS):
self.create_user()
# Location
self.stdout.write(' Creating sample locations')
self.create_locations(counter=NUMBER_OF_LOCATIONS)
# Organization
self.stdout.write(' Creating sample organisations')
self.create_organisations(counter=NUMBER_OF_ORGANISATIONS)
# Workshop
self.stdout.write(' Creating sample workshop sections')
self.create_workshop_sections()
# User Type
self.stdout.write(' Creating User Types')
self.create_user_type(counter=NUMBER_OF_WORKSHOP_SECTIONS)
# Profile
self.stdout.write(' Creating Profile')
self.create_profile(user)
# Sample Workshops
self.stdout.write(' Creating Sample Workshop')
self.create_sample_workshops(user)
user_email = EmailAddress.objects.create(
email=user.email, user=user, verified=True)
user_email.save()
def create_user(self, counter=None, **kwargs):
params = {
"first_name": kwargs.get('first_name', self.fake.first_name()),
"last_name": kwargs.get('last_name', self.fake.last_name()),
"username": kwargs.get('username', self.fake.user_name()),
"email": kwargs.get('email', self.fake.email()),
"is_active": kwargs.get('is_active', self.fake.boolean()),
"is_superuser": kwargs.get('is_superuser', False),
"is_staff": kwargs.get('is_staff', kwargs.get('is_superuser', self.fake.boolean())),
}
user, created = get_user_model().objects.get_or_create(**params)
if params['is_superuser']:
password = '123123'
user.set_password(password)
user.save()
self.stdout.write("SuperUser created with username: {username} and password: {password}".format(
username=params['username'], password=password)
)
return user
def create_locations(self, counter=None):
for i in range(counter):
state, updated = State.objects.update_or_create(
name=self.fake.state())
Location.objects.update_or_create(
name=self.fake.city(), state=state)
def create_user_type(self, counter=None):
user_type_tuple = [
('tutor', 'Tutor'),
('lead', 'Regional Lead'),
('poc', 'College POC'),
('admin', 'admin')]
for i in user_type_tuple:
obj, updated = UserType.objects.update_or_create(
slug=i[0])
obj.display_name = i[1]
obj.save()
def create_organisations(self, counter=None):
users = get_user_model().objects.all()
locations = Location.objects.all()
for i in range(counter):
number = self.fake.random_digit()
text = self.fake.text()
name = self.fake.company()
org, updated = Organisation.objects.update_or_create(
name=name,
location=locations[number],
organisation_type=number,
organisation_role=text,
description=text,
)
org.user.add(users[number])
def create_workshop_sections(self):
sections = ["Python2", "Python3", "Django", "Flask", "Gaming"]
for section in sections:
self.stdout.write(' Creating %s' % section)
WorkshopSections.objects.create(name=section)
def create_profile(self, user):
django = WorkshopSections.objects.get(name='Django')
python3 = WorkshopSections.objects.get(name='Python3')
location = Location.objects.all()[0]
user_type = UserType.objects.get(slug='admin')
profile = Profile(
user=user,
mobile='8758885872',
location=location)
profile.usertype.add(user_type)
profile.interested_locations.add(location)
profile.interested_sections.add(django, python3)
profile.save()
return profile
def create_sample_workshops(self, user):
organisations = Organisation.objects.all()
locations = Location.objects.all()
sections = WorkshopSections.objects.all()
for i in range(50):
w = Workshop.objects.create(
no_of_participants=random.randrange(10, 100),
expected_date=date(2015, random.randrange(1, 12), random.randrange(1, 29)),
description=self.fake.text(),
requester=random.choice(organisations),
location=random.choice(locations),
workshop_level=WorkshopLevel.BEGINNER,
workshop_section=random.choice(sections),
status=WorkshopStatus.COMPLETED
)
w.presenter.add(user)
w.save()
|
the-stack_0_14073 | # Tic Tac Toe
import random
def drawBoard(board):
# This function prints out the board that it was passed.
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
def inputPlayerLetter():
# Lets the player type which letter they want to be.
# Returns a list with the player's letter as the first item, and the computer's letter as the second.
letter = ''
while not (letter == 'X' or letter == 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the tuple is the player's letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
# Randomly choose the player who goes first.
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def makeMove(board, letter, move):
board[move] = letter
def isWinner(bo, le):
# Given a board and a player's letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don't have to type as much.
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
def getBoardCopy(board):
# Make a duplicate of the board list and return it the duplicate.
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
# Return true if the passed move is free on the passed board.
return board[move] == ' '
def getPlayerMove(board):
# Let the player type in his move.
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print('What is your next move? (1-9)')
move = input()
return int(move)
def chooseRandomMoveFromList(board, movesList):
# Returns a valid move from the passed list on the passed board.
# Returns None if there is no valid move.
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board, computerLetter):
# Given a board and the computer's letter, determine where to move and return that move.
if computerLetter == 'X':
playerLetter = 'O'
else:
playerLetter = 'X'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, computerLetter, i)
if isWinner(copy, computerLetter):
return i
# Check if the player could win on his next move, and block them.
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, playerLetter, i)
if isWinner(copy, playerLetter):
return i
# Try to take one of the corners, if they are free.
move = chooseRandomMoveFromList(board, [1, 3, 7, 9])
if move != None:
return move
# Try to take the center, if it is free.
if isSpaceFree(board, 5):
return 5
# Move on one of the sides.
return chooseRandomMoveFromList(board, [2, 4, 6, 8])
def isBoardFull(board):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
print('Welcome to Tic Tac Toe!')
while True:
# Reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
# Player's turn.
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print('Hooray! You have won the game!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'computer'
else:
# Computer's turn.
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print('The computer has beaten you! You lose.')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'player'
if not playAgain():
break |
the-stack_0_14075 |
"""Tests for `inne` package."""
import time
from unittest.mock import Mock, patch
import numpy as np
import pytest
from inne import IsolationNNE
from scipy.sparse import csc_matrix, csr_matrix
from sklearn.datasets import (load_diabetes, load_digits, load_iris,
make_blobs, make_moons)
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid, train_test_split
from sklearn.utils import check_random_state
from sklearn.utils._testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal, ignore_warnings)
from sklearn.ensemble import IsolationForest
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the diabetes dataset
# and randomly permute it
diabetes = load_diabetes()
perm = rng.permutation(diabetes.target.size)
diabetes.data = diabetes.data[perm]
diabetes.target = diabetes.target[perm]
# also load the digits dataset
# and randomly permute it
digit = load_diabetes()
perm = rng.permutation(digit.target.size)
digit.data = digit.data[perm]
digit.target = digit.target[perm]
def test_inne():
"""Check Isolation NNE for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid(
{"n_estimators": [100, 200], "max_samples": [10, 20, 30]}
)
with ignore_warnings():
for params in grid:
IsolationNNE(random_state=0, **
params).fit(X_train).predict(X_test)
def test_inne_performance():
"""Test Isolation NNE performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationNNE(n_estimators=100, max_samples=16).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert roc_auc_score(y_test, y_pred) > 0.98
@pytest.mark.parametrize("contamination", [0.25, "auto"])
def test_inne_works(contamination):
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test IsolationForest
clf = IsolationNNE(random_state=0, contamination=contamination)
clf.fit(X)
decision_func = -clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf1 = IsolationNNE(contamination=0.1)
clf1.fit(X_train)
clf2 = IsolationNNE()
clf2.fit(X_train)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]),
clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
)
assert_array_equal(
clf2.score_samples([[2.0, 2.0]]),
clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
)
def test_fit_time():
data = digit.data
print(data.shape)
clf = IsolationNNE(n_estimators=200, max_samples=256)
t1 = time.time()
clf.fit(data)
t2 = time.time()
anomaly_labels = clf.predict(data)
t3 = time.time()
print(t2-t1)
print(t3-t2)
clf2 = IsolationForest(n_estimators=200, max_samples=256)
t1 = time.time()
clf2.fit(data)
t2 = time.time()
anomaly_labels = clf2.predict(data)
t3 = time.time()
print(t2-t1)
print(t3-t2) |
the-stack_0_14078 |
import copy
import six
import sqlalchemy.pool
from .pool import DjangoQueuePool
class DjangoPoolParams(object):
_slow_and_safe = {
'django_pool_class': sqlalchemy.pool.QueuePool, # sqlalchemy's builtin queue pool class
'django_pre_ping': True, # pre ping by django if dialect is None
'django_reset_on_return': False, # use sqlalchemy's reset on conn return
'pool_size': 5, # daily traffic: reuse long connections
'max_overflow': 0, # burst traffic: do not overload the db
'timeout': 30, # burst traffic: > external api timeout
'recycle': 120, # should be smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': True, # every thread always get its same conn
'reset_on_return': 'rollback', # reset on every conn return by rollback
}
_fast_and_sane = {
'django_pool_class': sqlalchemy.pool.QueuePool, # sqlalchemy's builtin queue pool class
'django_pre_ping': False, # no pre ping due to long mysql timeout
'django_reset_on_return': True, # reset by rollback only when necessary
'pool_size': 5, # daily traffic: reuse long connections
'max_overflow': 10, # burst traffic: do not overload the db
'timeout': 30, # burst traffic: > external api timeout
'recycle': 3600, # to be much smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': False, # diff threads share the db connections
'reset_on_return': None, # do not use sqlalchemy reset on return
}
_fast_and_wild = {
'django_pool_class': DjangoQueuePool, # customized from sqlalchemy queue pool
'django_pre_ping': False, # no pre ping due to long mysql timeout
'django_reset_on_return': True, # reset by rollback only when necessary
'django_core_pool_size': 5, # retire no conn if achieving core size
'django_unload_timeout': 2, # wait some random time before overload
'django_retire_interval': 5, # retire few non-core conn per interval
'django_retire_quantity': 1, # retire few non-core conn per interval
'pool_size': 30, # daily traffic: recycle or retire conn
'max_overflow': 0, # burst traffic: put overflow into pool
'timeout': 30, # burst traffic: > external api timeout
'recycle': 3600, # to be much smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': False, # diff threads share the db connections
'reset_on_return': None, # do not use sqlalchemy reset on return
}
_supported_params = set(six.iterkeys(_fast_and_wild))
_params_to_kwargs = {
'django_pool_class': None,
'django_pre_ping': None,
'django_reset_on_return': None,
'django_core_pool_size': 'core_pool_size',
'django_unload_timeout': 'unload_timeout',
'django_retire_interval': 'retire_interval',
'django_retire_quantity': 'retire_quantity',
}
if not _supported_params.issuperset(_params_to_kwargs.viewkeys()):
raise Exception('invalid supported params: %s' % _supported_params)
def __init__(self, pool_params):
"""
:type pool_params: dict
"""
self.pool_params = pool_params
@classmethod
def unsupported(cls, params):
return six.viewkeys(params) - cls._supported_params
@classmethod
def new_slow_safe(cls, **updated):
return cls.new(cls._slow_and_safe, **updated)
@classmethod
def new_fast_sane(cls, **updated):
return cls.new(cls._fast_and_sane, **updated)
@classmethod
def new_fast_wild(cls, **updated):
return cls.new(cls._fast_and_wild, **updated)
@classmethod
def new(cls, default, **updated):
"""
:rtype: dict
"""
params = dict(default, **updated)
unsupported = cls.unsupported(params)
if unsupported:
raise Exception('unsupported pool params: %s' % unsupported)
return params
def get_pool_kwargs(self):
"""
:rtype: dict
"""
pool_class = self.django_pool_class
pool_kwargs = copy.deepcopy(self.pool_params)
for _k in self._params_to_kwargs:
pool_kwargs.pop(_k, None)
if pool_class == DjangoQueuePool:
for _k, _v in six.iteritems(self._params_to_kwargs):
if _k is not None and _v is not None:
pool_kwargs[_v] = self.pool_params.get(_k, None)
return pool_kwargs
@property
def django_pool_class(self):
return self.pool_params.get('django_pool_class', None)
@property
def django_pre_ping(self):
return self.pool_params.get('django_pre_ping', None)
@property
def django_reset_on_return(self):
return self.pool_params.get('django_reset_on_return', None)
|
the-stack_0_14080 | """
This file is uses slightly modified code from pyDRMetrics [1]_, see:
- https://doi.org/10.1016/j.heliyon.2021.e06199 - the article.
- https://data.mendeley.com/datasets/jbjd5fmggh/1 - the supplementary files.
The following changes have been made:
- :mod:`numba` JIT for performance reasons
- use broadcasting instead of a 3rd loop in :func:`_ranking_matrix`
[1] Zhang, Yinsheng (2021),
“Source code, sample data, and case study report for pyDRMetrics”,
Mendeley Data, V1, doi: 10.17632/jbjd5fmggh.1
"""
from ....tools.decorators import metric
from ....tools.normalize import log_cpm_hvg
from anndata import AnnData
from numba import njit
from scipy.sparse import issparse
from sklearn.metrics import pairwise_distances
from typing import Tuple
import numpy as np
__original_author__ = "Yinsheng Zhang"
__original_author_email__ = "[email protected]"
__license__ = "CC BY 4.0"
__license_link__ = (
"https://data.mendeley.com/datasets/"
"jbjd5fmggh/1/files/da1bca42-c4da-4376-9177-bd2d9a308108"
)
_K = 30
@njit(cache=True, fastmath=True)
def _ranking_matrix(D: np.ndarray) -> np.ndarray: # pragma: no cover
assert D.shape[0] == D.shape[1]
R = np.zeros(D.shape)
m = len(R)
ks = np.arange(m)
for i in range(m):
for j in range(m):
R[i, j] = np.sum(
(D[i, :] < D[i, j]) | ((ks < j) & (np.abs(D[i, :] - D[i, j]) <= 1e-12))
)
return R
@njit(cache=True, fastmath=True)
def _coranking_matrix(R1: np.ndarray, R2: np.ndarray) -> np.ndarray: # pragma: no cover
assert R1.shape == R2.shape
Q = np.zeros(R1.shape, dtype=np.int32)
m = len(Q)
for i in range(m):
for j in range(m):
k = int(R1[i, j])
l = int(R2[i, j]) # noqa: E741
Q[k, l] += 1
return Q
@njit(cache=True, fastmath=True)
def _metrics(
Q: np.ndarray,
) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, float, np.ndarray, int, float, float
]: # pragma: no cover
Q = Q[1:, 1:]
m = len(Q)
T = np.zeros(m - 1) # trustworthiness
C = np.zeros(m - 1) # continuity
QNN = np.zeros(m) # Co-k-nearest neighbor size
LCMC = np.zeros(m) # Local Continuity Meta Criterion
for k in range(m - 1):
Qs = Q[k:, :k]
# a column vector of weights. weight = rank error = actual_rank - k
W = np.arange(Qs.shape[0]).reshape(-1, 1)
# 1 - normalized hard-k-intrusions. lower-left region.
# weighted by rank error (rank - k)
T[k] = 1 - np.sum(Qs * W) / (k + 1) / m / (m - 1 - k)
Qs = Q[:k, k:]
# a row vector of weights. weight = rank error = actual_rank - k
W = np.arange(Qs.shape[1]).reshape(1, -1)
# 1 - normalized hard-k-extrusions. upper-right region
C[k] = 1 - np.sum(Qs * W) / (k + 1) / m / (m - 1 - k)
for k in range(m):
# Q[0,0] is always m. 0-th nearest neighbor is always the point itself.
# Exclude Q[0,0]
QNN[k] = np.sum(Q[: k + 1, : k + 1]) / ((k + 1) * m)
LCMC[k] = QNN[k] - (k + 1) / (m - 1)
kmax = np.argmax(LCMC)
Qlocal = np.sum(QNN[: kmax + 1]) / (kmax + 1)
# skip the last. The last is (m-1)-nearest neighbor, including all samples.
Qglobal = np.sum(QNN[kmax:-1]) / (m - kmax - 1)
AUC = np.mean(QNN)
return T, C, QNN, AUC, LCMC, kmax, Qlocal, Qglobal
def _high_dim(adata: AnnData) -> np.ndarray:
adata.X = adata.layers["counts"]
adata = log_cpm_hvg(adata)
high_dim = adata.X
return high_dim.A if issparse(high_dim) else high_dim
def _fit(
X: np.ndarray, E: np.ndarray
) -> Tuple[float, float, float, float, float, float, float]:
if np.any(np.isnan(E)):
return 0.0, 0.0, 0.0, 0.5, -np.inf, -np.inf, -np.inf
Dx = pairwise_distances(X)
De = pairwise_distances(E)
Rx, Re = _ranking_matrix(Dx), _ranking_matrix(De)
Q = _coranking_matrix(Rx, Re)
T, C, QNN, AUC, LCMC, _kmax, Qlocal, Qglobal = _metrics(Q)
return T[_K], C[_K], QNN[_K], AUC, LCMC[_K], Qlocal, Qglobal
@metric("continuity", maximize=True)
def continuity(adata: AnnData) -> float:
_, C, _, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return float(np.clip(C, 0.0, 1.0)) # in [0, 1]
@metric("co-KNN size", maximize=True)
def qnn(adata: AnnData) -> float:
_, _, QNN, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
# normalized in the code to [0, 1]
return float(np.clip(QNN, 0.0, 1.0))
@metric("co-KNN AUC", maximize=True)
def qnn_auc(adata: AnnData) -> float:
_, _, _, AUC, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return float(np.clip(AUC, 0.5, 1.0)) # in [0.5, 1]
@metric("local continuity meta criterion", maximize=True)
def lcmc(adata: AnnData) -> float:
*_, LCMC, _, _ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return LCMC
@metric("local property", maximize=True)
def qlocal(adata: AnnData) -> float:
# according to authors, this is usually preferred to
# qglobal, because human are more sensitive to nearer neighbors
*_, Qlocal, _ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return Qlocal
@metric("global property", maximize=True)
def qglobal(adata: AnnData) -> float:
*_, Qglobal = _fit(_high_dim(adata), adata.obsm["X_emb"])
return Qglobal
|
the-stack_0_14082 | # -*- coding: utf-8 -*-
import json
import threading
from plexapi import log
class AlertListener(threading.Thread):
""" Creates a websocket connection to the PlexServer to optionally receive alert notifications.
These often include messages from Plex about media scans as well as updates to currently running
Transcode Sessions. This class implements threading.Thread, therefore to start monitoring
alerts you must call .start() on the object once it's created. When calling
`PlexServer.startAlertListener()`, the thread will be started for you.
Known `state`-values for timeline entries, with identifier=`com.plexapp.plugins.library`:
:0: The item was created
:1: Reporting progress on item processing
:2: Matching the item
:3: Downloading the metadata
:4: Processing downloaded metadata
:5: The item processed
:9: The item deleted
When metadata agent is not set for the library processing ends with state=1.
Parameters:
server (:class:`~plexapi.server.PlexServer`): PlexServer this listener is connected to.
callback (func): Callback function to call on received messages. The callback function
will be sent a single argument 'data' which will contain a dictionary of data
received from the server. :samp:`def my_callback(data): ...`
"""
key = '/:/websockets/notifications'
def __init__(self, server, callback=None):
super(AlertListener, self).__init__()
self.daemon = True
self._server = server
self._callback = callback
self._ws = None
def run(self):
try:
import websocket
except ImportError:
log.warning("Can't use the AlertListener without websocket")
return
# create the websocket connection
url = self._server.url(self.key, includeToken=True).replace('http', 'ws')
log.info('Starting AlertListener: %s', url)
self._ws = websocket.WebSocketApp(url, on_message=self._onMessage,
on_error=self._onError)
self._ws.run_forever()
def stop(self):
""" Stop the AlertListener thread. Once the notifier is stopped, it cannot be directly
started again. You must call :func:`plexapi.server.PlexServer.startAlertListener()`
from a PlexServer instance.
"""
log.info('Stopping AlertListener.')
self._ws.close()
def _onMessage(self, *args):
""" Called when websocket message is received.
In earlier releases, websocket-client returned a tuple of two parameters: a websocket.app.WebSocketApp
object and the message as a STR. Current releases appear to only return the message.
We are assuming the last argument in the tuple is the message.
This is to support compatibility with current and previous releases of websocket-client.
"""
message = args[-1]
try:
data = json.loads(message)['NotificationContainer']
log.debug('Alert: %s %s %s', *data)
if self._callback:
self._callback(data)
except Exception as err: # pragma: no cover
log.error('AlertListener Msg Error: %s', err)
def _onError(self, *args): # pragma: no cover
""" Called when websocket error is received.
In earlier releases, websocket-client returned a tuple of two parameters: a websocket.app.WebSocketApp
object and the error. Current releases appear to only return the error.
We are assuming the last argument in the tuple is the message.
This is to support compatibility with current and previous releases of websocket-client.
"""
err = args[-1]
log.error('AlertListener Error: %s' % err)
|
the-stack_0_14085 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint:disable=line-too-long
r"""Counts average audio length.
"""
# pylint:enable=line-too-long
import os
from typing import Any, Dict, Iterable, Optional, List, Tuple
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import numpy as np
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import data_prep_utils as utils
flags.DEFINE_string('output_file', None, 'Output file.')
flags.DEFINE_string('debug', None, 'If present, only count this dataset.')
flags.DEFINE_list(
'audio_keys', ['audio', 'processed/audio_samples', 'audio_waveform',
'WAVEFORM/feature/floats'],
'Possible audio keys in tf.Examples.')
flags.DEFINE_list(
'sr_keys', ['audio_sample_rate'],
'Possible sample rate keys in tf.Examples.')
flags.DEFINE_integer(
'batch_size', None, 'Number of examples to process at once.')
FLAGS = flags.FLAGS
def duration_from_tfex(k_v):
"""Duration from a tf.Example."""
k, ex = k_v
audio_vals = []
for possible_audio_key in FLAGS.audio_keys:
if possible_audio_key in ex.features.feature:
logging.info('Found audio key: %s', possible_audio_key)
audio_feats = ex.features.feature[possible_audio_key]
cur_audio_vals = (audio_feats.int64_list.value or
audio_feats.float_list.value)
assert cur_audio_vals
audio_vals.append(cur_audio_vals)
assert len(audio_vals) == 1, ex
audio_vals = audio_vals[0]
logging.info('%s audio: %s', k, audio_vals)
sr_vals = []
for possible_sr_key in FLAGS.sr_keys:
if possible_sr_key in ex.features.feature:
logging.info('Found sample rate key: %s', possible_sr_key)
cur_audio = ex.features.feature[possible_sr_key].int64_list.value[0]
sr_vals.append(cur_audio)
assert len(sr_vals) in [0, 1], ex
if len(sr_vals) == 1:
sr = sr_vals[0]
else:
logging.info('Used default sr.')
sr = 16000
return len(audio_vals) / float(sr)
def durations_from_tfexs(k_vs):
for k_v in k_vs:
yield duration_from_tfex(k_v)
def durations(root, ds_file, ds_name,
reader_type, suffix):
"""Beam pipeline for durations from a particular file or glob."""
logging.info('Reading from %s: (%s, %s)', reader_type, ds_name, ds_file)
input_examples = utils.reader_functions[reader_type](
root, ds_file, f'Read-{suffix}')
if FLAGS.batch_size:
input_examples = input_examples | f'Batch-{suffix}' >> beam.BatchElements(
min_batch_size=FLAGS.batch_size, max_batch_size=FLAGS.batch_size)
return input_examples | f'Lens-{suffix}' >> beam.FlatMap(
durations_from_tfexs)
else:
return input_examples | f'Lens-{suffix}' >> beam.Map(duration_from_tfex)
def duration_and_num_examples(
root, ds_files, ds_name,
reader_type):
"""Beam pipeline for durations from a list of files or globs."""
durations_l = []
for i, ds_file in enumerate(ds_files):
cur_dur = durations(
root, ds_file, ds_name, reader_type, suffix=f'{ds_name}_{i}')
durations_l.append(cur_dur)
def _mean_and_count(durs):
return np.mean(durs), len(durs)
return (durations_l
| f'Flatten-{ds_name}' >> beam.Flatten()
| f'ToList-{ds_name}' >> beam.combiners.ToList()
| f'Stats-{ds_name}' >> beam.Map(_mean_and_count))
def get_dataset_info_dict(
debug):
"""Get dictionary of dataset info."""
def _tfds_fns(ds_name):
fns = [
x # pylint:disable=g-complex-comprehension
for s in ('train', 'validation', 'test')
for x in utils.tfds_filenames(ds_name, s)] # pylint:disable=protected-access
fns = [fns] # TFRecords require a list.
return (fns, 'tfrecord')
dss = {
'crema_d':
_tfds_fns('crema_d'),
'savee':
_tfds_fns('savee'),
'speech_commands':
_tfds_fns('speech_commands'),
'voxceleb':
_tfds_fns('voxceleb'),
}
if debug:
dss = {debug: dss[debug]}
return dss
def main(unused_argv):
dss = get_dataset_info_dict(FLAGS.debug)
out_file = FLAGS.output_file
assert not tf.io.gfile.exists(out_file)
if not tf.io.gfile.exists(os.path.dirname(out_file)):
tf.io.gfile.makedirs(os.path.dirname(out_file))
pipeline_option = None
with beam.Pipeline(pipeline_option) as root:
stats = [] # (ds name, avg duration, num examples)
for ds_name, (ds_files, reader_type) in dss.items():
cur_stat = duration_and_num_examples(root, ds_files, ds_name, reader_type)
cur_stat = cur_stat | f'AddName-{ds_name}' >> beam.Map(
lambda x, name: (name, x[0], x[1]), name=ds_name)
stats.append(cur_stat)
# Write output.
_ = (
stats
| 'CombineDSes' >> beam.Flatten()
| 'ToStr' >> beam.Map(lambda x: ','.join([str(r) for r in x]))
| 'WriteOutput' >> beam.io.WriteToText(out_file, num_shards=1))
if __name__ == '__main__':
flags.mark_flag_as_required('output_file')
app.run(main)
|
the-stack_0_14087 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2020-2021 The Hive Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from datetime import datetime, timezone
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
import subprocess
from subprocess import CalledProcessError
import time
import socket
from contextlib import closing
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
##########################################################################################
# Assert functions
##########################################################################################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_contains(val, arr):
if not (val in arr):
raise AssertionError("val %s not in arr" % val)
def assert_does_not_contain(val, arr):
if val in arr:
raise AssertionError("val %s is in arr" % val)
def assert_contains_pair(key, val, dict_data):
if not (key in dict_data and val == dict_data[key]):
raise AssertionError("k/v pair (%s,%s) not in dict" % (key, val))
def assert_contains_key(key, dict_data):
if key not in dict_data:
raise AssertionError("key %s is not in dict" % key)
def assert_does_not_contain_key(key, dict_data):
if key in dict_data:
raise AssertionError("key %s is in dict" % key)
def assert_fee_amount(fee, tx_size, fee_per_kb):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kb / 1000
if fee < target_fee:
raise AssertionError("Fee of %s HVN too low! (Should be %s HVN)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kb / 1000:
raise AssertionError("Fee of %s HVN too high! (Should be %s HVN)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2, err_msg="Greater"):
if thing1 <= thing2:
raise AssertionError("%s ~~ %s <= %s" % (err_msg, str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
def assert_happening(date_str, within_secs=120):
""" Make sure date_str happened withing within_secs seconds of now.
Assumes date_str is in rpc results cust_format e.g. '2019-11-07 17:50:06' and assumed to represent UTC.
Using a big default to eliminate inaccurate wall clocks...
"""
cust_format = '%Y-%m-%d %H:%M:%S'
then = datetime.strptime(date_str, cust_format).replace(tzinfo=timezone.utc)
now = datetime.now(timezone.utc)
diff_secs = (now - then).total_seconds()
if abs(diff_secs) > within_secs:
raise AssertionError("More than expected %s second difference between %s and now(%s) (%ss)" % (within_secs, date_str, now, diff_secs))
##########################################################################################
# Utility functions
##########################################################################################
def check_json_precision():
"""Make sure json library being used does not lose precision converting HVN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
x16r_hash_cmd = os.path.dirname(os.path.realpath(__file__)) + "/../../../src/test/test_hive_hash"
def x16_hash_block(hex_str, algorithm="2"):
"""
:param hex_str: Blockhash to convert
:param algorithm: Which algorithm ~~ "1" = x16r "2" = x16rv2
:return: Converted hash
"""
cmd = [x16r_hash_cmd, hex_str, algorithm]
blk_hash = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout.decode('ascii')
return blk_hash
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, err_msg, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt, err_msg + " ~~ Exceeded Attempts")
assert_greater_than(time.ctime(timeout), time.ctime(), err_msg + " ~~ Exceeded Timeout")
raise RuntimeError('Unreachable')
##########################################################################################
# RPC/P2P connection constants and functions
##########################################################################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
# List to store P2P ports
p2p_ports = [-1, -1, -1, -1, -1, -1, -1, -1]
# List to store RPC ports
rpc_ports = [-1, -1, -1, -1, -1, -1, -1, -1]
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def find_free_port():
"""
Ask the system for a free port.
In case of error return error message.
:return: {Tuple}
"""
port = None
error = {}
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', 0))
sock_name = s.getsockname()
if type(sock_name) is tuple and len(sock_name) == 2:
port = sock_name[1]
except socket.error as e:
error = {'errno': e.errno, 'msg': str(e)}
return port, error
def get_rpc_proxy(url, node_number, timeout=None, coverage_dir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
timeout: time to wait
coverage_dir: directory to watch
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coverage_dir, node_number) if coverage_dir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
if p2p_ports[n] is -1:
# Port isn't in the list, find one that is available
p2p_ports[n] = find_free_port()[0]
return p2p_ports[n]
else:
return p2p_ports[n]
def rpc_port(n):
if rpc_ports[n] is -1:
# Port isn't in the list, find one that is available
rpc_ports[n] = find_free_port()[0]
return rpc_ports[n]
else:
return rpc_ports[n]
def rpc_url(data_dir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(data_dir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
##########################################################################################
# Node functions
##########################################################################################
def initialize_data_dir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "hive.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "hive.conf")):
with open(os.path.join(datadir, "hive.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
user_pass = f.read()
split_user_pass = user_pass.split(':')
user = split_user_pass[0]
password = split_user_pass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, log_name):
return os.path.join(dirname, "node" + str(n_node), "regtest", log_name)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if not [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def disconnect_all_nodes(nodes):
for i in range(0, len(nodes)):
for j in range(i + 1, len(nodes)):
disconnect_nodes(nodes[i], j)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b, wait=False):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
if wait:
wait_for_block_sync(nodes, a, b)
def connect_all_nodes_bi(nodes, wait=False):
for i in range(0, len(nodes)):
for j in range(i + 1, len(nodes)):
connect_nodes_bi(nodes, i, j, wait)
def wait_for_block_sync(nodes, a, b, timeout=60):
# Wait for the nodes to connect and sync which caused some tests to randomly fail.
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
block_count_diff = abs(nodes[a].getblockcount() - nodes[b].getblockcount())
if block_count_diff == 0:
return
time.sleep(0.1)
cur_time = time.time()
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
max_height = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
tips = None
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(max_height, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == max_height for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format("".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(max_height, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
##########################################################################################
# Transaction/Block functions
##########################################################################################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert (confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return total_in, inputs
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return txid, signresult["hex"], fee
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while node.getmempoolinfo()['size'] > 0:
node.generate(1)
utxos = node.listunspent()
assert (len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for _ in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
|
the-stack_0_14088 | # 导入类
from collections import OrderedDict
# 创建有序空字典
glossary = OrderedDict()
# 给有序空字典添加键-值对
glossary['print'] = '打印'
glossary['title'] = '首字母大写'
glossary['lower'] = '全部小写'
glossary['upper'] = '全部大写'
glossary['str'] = '字符串'
glossary['key'] = '键'
glossary['value'] = '值'
glossary['items'] = '项目'
glossary['sorted'] = '排序'
glossary['set'] = '集合'
# 遍历字典并打印
for vocabulary, explanation in glossary.items():
print(f"{vocabulary.title()}'s explanation is {explanation.title()}") |
the-stack_0_14091 | class Solution:
def floodFill(self, grid, sr, sc, newColor):
m, n = len(grid), len(grid[0])
self.target = grid[sr][sc]
def dfs(x, y):
grid[x][y] = newColor
for i, j in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
if (0 <= x + i < m and 0 <= y + j < n) and grid[x + i][y + j] == self.target:
dfs(x + i, y + j)
if self.target == newColor:
return grid
dfs(sr, sc)
return grid
|
the-stack_0_14092 | '''
snpPriority.py - score SNPs based on their LD score and SE weighted effect sizes
===============================================================================
:Author: Mike Morgan
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. Score SNPs based on their LD score and SE weighted effect sizes from
association analysis.
Usage
-----
.. Example use case
Example::
python snpPriority.py
Type::
python snpPriority.py --help
for command line help.
Command line options
--------------------
'''
import sys
import CGAT.Experiment as E
import PipelineGWAS as gwas
import re
import pandas as pd
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--score-method", dest="method", type="choice",
choices=["PICS", "LDscore", "ABF", "R2_rank",
"get_eigen", "calc_prior", "credible_set",
"summarise"],
help="SNP scoring/prioritisation method to apply.")
parser.add_option("--database", dest="database", type="string",
help="SQL database containing LD information "
"in table format. Expects columns SNP_A, "
"SNP_B, R2, BP_A and BP_B (Plink --r2 output)")
parser.add_option("--ld-directory", dest="ld_dir", type="string",
help="directory containing tabix-index BGZIP "
"LD files. Assumes Plink used to calculate LD")
parser.add_option("--table-name", dest="table", type="string",
help="name of the SQL table containing the LD"
"values")
parser.add_option("--chromosome", dest="chromosome", type="string",
help="chromosome to subset the association results "
"file on")
parser.add_option("--ld-threshold", dest="ld_threshold", type="float",
help="the threshold of LD above which variants will "
"be taken forward.")
parser.add_option("--rank-threshold", dest="rank_threshold", type="float",
help="the threshold in terms of the top n% SNPs to "
"output based on the ranking metric. e.g. "
"--rank-threshold=0.01 is the top 1% SNPs")
parser.add_option("--credible-interval", dest="interval", type="float",
help="The credible set interval size to generate the "
"credible set of SNPs")
parser.add_option("--prior-variance", dest="prior_var", type="float",
help="the prior variance used to weight the SNP "
"variance")
parser.add_option("--fine-map-window", dest="map_window", type="int",
help="the region size to included around the index "
"SNP as the fine-mapping region.")
parser.add_option("--eigen-score-directory", dest="eigen_dir", type="string",
help="PATH to directory containing tabix indexed "
"eigen score files")
parser.add_option("--flat-prior", dest="flat_prior", action="store_true",
help="Ignore functional annotation information and "
"use an uninformative prior on each SNP")
parser.add_option("--snp-set", dest="snp_set", type="string",
help="Pre-defined SNP set as a list of SNP IDs."
"If used to calculate priors contains column of scores.")
parser.add_option("--distribution", dest="dist", type="choice",
choices=["normal", "t", "gamma", "lognormal",
"exponential"],
help="distribution from which to draw prior "
"probabilities")
parser.add_option("--distribution-parameters", dest="dist_params", type="string",
help="distribution parameters as a comma-separated list")
parser.add_option("--lead-snp-id", dest="lead_snp", type="int",
help="0-based item number in filename")
parser.add_option("--filename-separator", dest="separator", type="string",
help="filename separator to extract information")
parser.add_option("--snp-column", dest="snp_col", type="int",
help="0-based index of SNP ID column number")
parser.add_option("--probability-column", dest="prob_col", type="int",
help="0-based index of posterior probabilities column"
" number")
parser.set_defaults(ld_dir=None,
dist="normal",
dist_params=None,
snp_set=None,
prior_var=0.04,
interval=0.99,
eigen_dir=None,
map_window=100000,
ld_threshold=0.5,
database=None,
table=None,
flat_prior=False,
lead_snp=2,
separator="_",
snp_col=0,
prob_col=1,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
infile = argv[-1]
if len(infile.split(",")) > 1:
pass
else:
peek = pd.read_table(infile, nrows=5, sep="\s*", header=0)
try:
if len(peek["TEST"] != "ADD"):
clean = False
else:
clean = True
except KeyError:
clean = True
if options.method == "LDscore":
snpscores = gwas.snpPriorityScore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
ld_dir=options.ld_dir,
clean=clean)
# take top 1%, all SNPs doesn't achieve anything useful
ranks = int(len(snpscores.index) * 0.01)
snpscores = snpscores.iloc[:ranks]
elif options.method == "PICS":
snp_list = {}
if options.snp_set and not options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
try:
score = float(line.split("\t")[-1].rstrip("\n"))
except ValueError:
score = 0
snp_list[snp] = float(score)
# get the parameter estimates for the distribution
# if they have not been provided
if not options.dist_params:
dist_params = gwas.estimateDistributionParameters(data=snp_list.values(),
distribution=options.dist)
else:
dist_params = tuple([float(fx) for fx in options.dist_params.split(",")])
E.info("Calculating priors on SNPs")
priors = gwas.calcPriorsOnSnps(snp_list=snp_list,
distribution=options.dist,
params=dist_params)
elif options.snp_set and options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
snp_list[snp] = 1.0
priors = snp_list
else:
# allow for no priors or scores to be set,
# use of priors will be ignored,
# i.e. when prior and likelihood are not from
# conjugate distributions
priors = None
# PICS scores expects the gwas results file to
# only contain the region of interest, which
# represents an independent association signal
snpscores = gwas.PICSscore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
priors=priors,
clean=clean,
ld_dir=options.ld_dir,
ld_threshold=options.ld_threshold)
snpscores.columns = ["SNP", "PICS"]
posterior_sum = 0
snpscores.sort_values(ascending=False,
inplace=True)
post_snps = []
for snp in snpscores.index:
if posterior_sum < 99.0:
posterior_sum += snpscores.loc[snp]
post_snps.append(snp)
else:
break
snpscores = snpscores.loc[post_snps]
snpscores.drop_duplicates(inplace=True)
elif options.method == "R2_rank":
# rank SNPs based on their LD with the lead
# SNP, take the top n% SNPs
snpscores = gwas.LdRank(gwas_results=infile,
database=options.database,
table_name=options.table,
ld_dir=options.ld_dir,
chromosome=options.chromosome,
ld_threshold=options.ld_threshold,
top_snps=options.rank_threshold,
clean=clean)
elif options.method == "ABF":
snpscores = gwas.ABFScore(gwas_results=infile,
region_size=options.map_window,
chromosome=options.chromosome,
prior_variance=options.prior_var,
clean=clean)
elif options.method == "get_eigen":
E.info("Fetching Eigen scores")
snpscores = gwas.getEigenScores(eigen_dir=options.eigen_dir,
bim_file=infile,
snp_file=options.snp_set)
snpscores = pd.DataFrame(snpscores).T
elif options.method == "credible_set":
E.info("Creating credible set")
snpscores = gwas.makeCredibleSet(probs_file=infile,
credible_set=options.interval,
lead_snp_indx=options.lead_snp,
filename_sep=options.separator,
snp_column=options.snp_col,
probs_column=options.prob_col)
elif options.method == "summarise":
E.info("Collating SNP prioritisation resuslts")
file_list = infile.split(",")
snpscores = gwas.summariseResults(file_list=file_list)
snpscores.to_csv(options.stdout, index_label="SNP",
sep="\t")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_14093 | """
track, part of glbase
"""
import pickle, sys, os, struct, math, sqlite3, zlib, time, csv, zlib
from operator import itemgetter
from .progress import progressbar
from .errors import AssertionError
from .location import location
from . import genelist as Genelist
from . import utils, config
from .data import positive_strand_labels, negative_strand_labels
from .draw import draw
import matplotlib.cm as cm
import matplotlib.pyplot as plot
import scipy.stats as stats
from scipy.stats import pearsonr
from .base_track import base_track
import numpy
from numpy import array, zeros, set_printoptions, int32, append, linspace, argmax, amax, delete
TRACK_CACHE_SIZE = 10 # number of track segments to cache.
class track(base_track):
"""
track definition, used for things like sequence reads across the genome
**Arguments**
name (string)
name for the track (defaults to filename)
filename (string)
directory location of the track file.
only respected if dir_name is set.
new (Optional, default=False)
Use seqToTrk() in preference of this. But if you know what you are
doing then this will generate a new (empty) db.
norm_factor (Optional, default = 1.0)
An optional normalization factor. Data is multiplied by this number before display.
Can only be specified at creation time, cannot be modified later.
mem_cache (Optional, default=False)
Instead of doing the whole thing from disk, first cache the DB in memory for
super fast access. Just make sure you have enough memory!
pre_build (Optional, default=[0, 100, 200])
prebuild genome arrays for various read_extend parameters for fast lookups.
By default glbase builds indexes for 100 and 200 bp read_extends and unextended
(i.e. the complete frag size, useful for e.g. paired-end data). Raw frag sizes are returned
when read_extend and pre_build are 0
"""
def __init__(self, name=None, new=False, filename=None, norm_factor=None, mem_cache=False, pre_build=[0, 100, 200], **kargs):
base_track.__init__(self, name, new, filename, norm_factor, mem_cache)
if new:
if norm_factor is None:
norm_factor = 1.0
self.meta_data['norm_factor'] = str(norm_factor)
self.pre_build = pre_build
self.meta_data['pre_build'] = pre_build
self.__setup_tables(filename)
config.log.info('Generating new track')
else:
assert not norm_factor, 'norm_factor can only be modified at creation time'
# if not new, get pre_build from the metadatum
try:
self.pre_build = self.meta_data['pre_build']
except KeyError:
raise AssertionError('meta data not found in trk file, this suggests the trk file is incomplete, please check your trk file and regenerate if required')
self.norm_factor = float(self.meta_data['norm_factor'])
if self.norm_factor != 1.0:
config.log.info('track: Using norm_factor=%.3f' % self.norm_factor)
#print norm_factor, self.norm_factor, str(norm_factor) != str(self.norm_factor)
#if str(norm_factor) != str(self.norm_factor):
# config.log.error('the norm_factor supplied here does not match the norm_factor used during the creation of the track!')
# raise AssertionError, 'norm_factor != norm_factor (%.2f != %.2f) stored in the trk file.' % (norm_factor, self.norm_factor)
self.__warned_about_zlib = False # To deprecate
def __repr__(self):
return("glbase.track")
# I use these ones here as tracks prefer numpy arrays
# LATER: port the flat_tracks to use numpy arrays
def _format_data(self, data):
"""
array('i', []) --> whatever it's stored as in db
"""
return(sqlite3.Binary(zlib.compress(data.dumps(), 1)))
def _unformat_data(self, data):
"""
whatever stored as in db --> array('i', [])
"""
#print "ret:",[d for d in data], ":"
try:
a = numpy.loads(zlib.decompress(data))
except UnicodeDecodeError:
print(data.decode('utf-8'))
'''
a = numpy.loads(zlib.decompress(data))
if not self.__warned_about_zlib:
config.log.warning('Tracks are no no longer Zlib compressed by default. Tracks will need to be regenerated')
config.log.warning('Benefit is that they are now faster!')
config.log.warning('In future versions of glbase this warning will be removed and an error will be produced')
self.__warned_about_zlib = True
'''
return(a)
def __setup_tables(self, filename):
"""
No pre-defined file - I want a new database track.
"""
# If i make it here, then a lot of grunt work already done in base_track
c = self._connection.cursor()
c.execute("CREATE TABLE main (chromosome TEXT PRIMARY KEY, seq_reads INT)")
c.execute("CREATE TABLE pre_build (chromosome TEXT, read_extend TEXT, array_blob BLOB)")
self._connection.commit()
c.close()
def finalise(self):
c = self._connection.cursor()
# Do the prebuilds:
list_of_all_chroms_in_db = self.get_chromosome_names()
for read_extend in self.pre_build:
for chrom in list_of_all_chroms_in_db:
a = self.get_array_chromosome(chrom, read_extend=read_extend, _silent=True) # Force a cache miss
c.execute('INSERT INTO pre_build VALUES (?, ?, ?)', (chrom, read_extend, self._format_data(a)))
config.log.info('Cached chrom=%s, read_extend=%s' % (chrom, str(read_extend)))
#print self.meta_data
base_track.finalise(self)
def __add_chromosome(self, chromosome):
"""
add a chromosome to the main table.
add a chromosome table.
returns True if succesfully created/already present.
"""
c = self._connection.cursor()
# check chromosome is not already present.
if self.__has_chromosome(chromosome):
return(True)
c.execute("INSERT INTO main VALUES (?, ?)", (chromosome, 0)) # add chr to master table.
# make the new chromsome table:
table_name = "chr_%s" % str(chromosome)
c.execute("CREATE TABLE %s (left INT, right INT, strand TEXT)" % (table_name, ))
#c.execute("CREATE INDEX %s_com_idx ON %s(left, right)" % (table_name, table_name))
#c.execute("CREATE INDEX %s_lef_idx ON %s(left)" % (table_name, table_name))
#c.execute("CREATE INDEX %s_rig_idx ON %s(right)" % (table_name, table_name))
c.close()
return(True)
def __has_chromosome(self, chromosome):
"""
do we have that chromosome?
"""
c = self._connection.cursor()
c.execute("SELECT chromosome FROM main WHERE chromosome=?", (chromosome, ))
result = c.fetchone()
c.close()
return bool(result)
def add_location(self, loc, strand="+", increment=1):
"""
**Purpose**
Add a location to the track.
Increments the score by 'increment' from loc["left"] to
loc["right"]
**Arguments**
loc
strand
increment
**Returns**
True, if completes succesfully, or exception.
"""
if not self.__has_chromosome(loc["chr"]):
self.__add_chromosome(loc["chr"])
# convert strand to +, -
if strand in positive_strand_labels:
strand = "+"
elif strand in negative_strand_labels:
strand = "-"
if not self._c:
self._c = self._connection.cursor()
# insert location into new array.
table_name = "chr_%s" % str(loc["chr"])
# get the old number of seq_reads
self._c.execute("SELECT seq_reads FROM main WHERE chromosome=?", (loc["chr"], ))
current_seq_reads = self._c.fetchone()[0] # always returns a tuple
self._c.execute("UPDATE main SET seq_reads=? WHERE chromosome=?", (current_seq_reads+1, loc["chr"]))
# add the location to the seq table:
self._c.execute("INSERT INTO %s VALUES (?, ?, ?)" % table_name, (loc["left"], loc["right"], strand))
#c.close()
def get(self, loc, resolution=1, read_extend=0, kde_smooth=False,
view_wid=0, strand=False, **kargs):
"""
**Purpose**
get the data between location 'loc' and return it formatted as
a nbp resolution array
**Arguments**
loc (Required)
a valid location or string location.
resolution (Optional, default = 1bp)
nbp resolution required (you should probably send a float for accurate rendering)
read_extend (Optional, default = 0)
extend the read length to 'fill in the peak'
if the original reads are 36bp, then add ~70bp to give an
estimated size of the peak.
If the reads are end-based, then set this to the estimated
size of the DNA shear.
kde_smooth (Experimental)
perform kde smoothng on the data, using the integer specified as an option.
In this case the read_extend acts as a tag shift instead of a read_extend
Hence set that to half of the expected shear size.
strand (Optional, default=False)
collect only reads on the specified strand. (track will use read strand
information intelligently, if present).
**Returns**
an 'numpy.array([0, 1, 2 ... n])' contiginous array
or a tuple containing two arrays, one for each strand.
"""
if not isinstance(loc, location):
loc = location(loc=loc)
extended_loc = loc.expand(read_extend)
result = self.get_reads(extended_loc, strand=strand)
if kde_smooth:
return(self.__kde_smooth(loc, result, resolution, 0, view_wid, read_extend))
loc_left = loc["left"]
loc_right = loc["right"]
# make a single array
a = [0] * int( (loc_right-loc_left+resolution)/resolution ) # Fast list allocation
# Python lists are much faster for this than numpy or array
len_a = len(a)
for r in result:
read_left, read_right, strand = r
if strand == "+":
read_right += (read_extend + 1) # coords are open
elif strand == "-" :
read_left -= read_extend
read_right += 1 # coords are open
rel_array_left = int((read_left - loc_left) // resolution)
rel_array_right = int((read_right - loc_left) // resolution)
rel_array_left = max(rel_array_left, 0)
rel_array_right = min(rel_array_right, len_a)
for array_relative_location in range(rel_array_left, rel_array_right, 1):
a[array_relative_location] += 1
#a[rel_array_left:rel_array_right] += 1 # Why is this slower than the for loop? # could be done with num_expr?
#[a[array_relative_location].__add__(1) for array_relative_location in xrange(rel_array_left, rel_array_right, 1)] # just returns the exact item, a is unaffected?
return(numpy.array(a)*self.norm_factor)
def __kde_smooth(self, loc, reads, resolution, bandwidth, view_wid, read_shift=100):
"""
Internal abstraction for kde smoothing
Returns a new array
"""
# Shift the reads
newr = []
for r in reads:
if r[2] in positive_strand_labels:
newr.append(float((r[0] + read_shift) - loc["left"])) # Must be floats for kde to work
elif r[2] in negative_strand_labels:
newr.append(float((r[1] - read_shift) - loc["left"]))
a = linspace(0, loc["right"] - loc["left"], view_wid)
# Hack gaussian_kde()
def covariance_factor(self):
return 0.02
kde = stats.gaussian_kde(newr)
setattr(kde, 'covariance_factor', covariance_factor.__get__(kde, type(kde)))
kde._compute_covariance()
kk = kde.evaluate(a) * 1000000 # resacle to get in integer range.
res = array(kk)
return(res)
def get_all_reads_on_chromosome(self, chrom, strand=None):
"""
**Purpose**
Get all of the reads on chromosomes.
**Arguments**
chromosome (Required)
the name of the chromosome to pull from.
strand (Optional)
selct + or - strands and only collect those.
**Returns**
An iterator to collect the data from.
You must process the data in some sort of for loop:
for read in trk.get_all_reads_on_chromosome("1")
"""
assert chrom, "You must provide a chromosome"
assert chrom in self.get_chromosome_names(), "chromsome '%s' not found in this track" % chromosome
if not self._c:
self._c = self._connection.cursor()
if len(chrom) < 30: # small security check
table_name = "chr_%s" % chrom
if strand:
result = self._c.execute("SELECT * FROM %s WHERE strand=?" % table_name, strand)
else:
result = self._c.execute("SELECT * FROM %s" % table_name)
#reads = self._c.fetchall()
return(result)
def get_array_chromosome(self, chrom, read_extend=0, strand=None, resolution=1, _silent=False, **kargs):
"""
**Purpose**
get the entire array data for the chromosome
**Arguments**
chromosome (Required)
a number '1', string "1", or X, Y
strand (Optional, default = None, ie. collect and merge both strands)
strand, but only valid for stranded tracks
if "+" return only that strand, if '-' return only the negative
strand (will recognise several forms of strand, e.g. F/R, +/-
NOT SUPPORTED AT THIS TIME
resolution (default = 1bp)
nbp resolution required (you should probably send a float for accurate rendering)
read_extend (Optional, default = 0)
extend the read length to 'fill in the peak'
if the original reads are 36bp, then add ~70bp to give an
estimated size of the peak.
If the reads are end-based, then set this to the estimated
size of the DNA shear.
Use a read_extend of 0 to return the actual frags.
**Returns**
an 'numpy.array([0, 1, 2 ... n], dtype=integer)' contiginous array of integers
or a tuple containing two arrays, one for each strand.
"""
if strand:
raise NotImplementedError("Eh... strand not supported yet...")
c = self._connection.cursor()
# Find out if we already have this array:
c.execute("SELECT chromosome FROM pre_build WHERE (chromosome=? AND read_extend=?)", (chrom, read_extend))
result = c.fetchone()
if not result:
if not _silent: # It purposely misses the cache when building the track
config.log.warning('Cache miss on chromosome=%s, read_extend=%s' % (chrom, read_extend))
return(self.__cache_miss_get_array_chromosome(chromosome=chrom, read_extend=read_extend)) # Don't have... :(
# The above is already * self.norm_factor
# have a changed copy:
c.execute("SELECT array_blob FROM pre_build WHERE (chromosome=? AND read_extend=?)", (chrom, read_extend))
return(self._unformat_data(c.fetchone()[0])) # DO NOT multiply the below result by norm_factor
# The prebuilt causes a cache _miss and a return above. norm_factor is applied at the end of
# __cache_miss_get_array_chromosome() and the result is stored in the array_blob
def __cache_miss_get_array_chromosome(self, chromosome, strand=None, resolution=1, read_extend=0, **kargs):
# Generate the chromosome array for a cache miss
if not self._c:
self._c = self._connection.cursor()
table_name = "chr_%s" % chromosome
self._c.execute("SELECT * FROM %s" % table_name)
reads = sorted(self._c.fetchall(), key=itemgetter(0)) # shouldn't this be 1?
# I need to find the right most read to estimate the size of the track array.
right_most = reads[-1][1]+read_extend+1000 # Add a large enough pad onto the end, particularly for weird data with ultra long reads
# make an array.
a = [0] * int( (right_most+resolution)/resolution ) # Fast list allocation
# Python lists are much faster for this than numpy or array
len_a = len(a) # == right_most+resolution
for r in reads:
read_left, read_right, strand = r
if read_extend > 0: # if == 0 then use the total frag size
if strand == "+":
read_right += (read_extend + 1) # coords are open
elif strand == "-" :
read_left -= read_extend
read_right += 1 # coords are open
rel_array_left = read_left
rel_array_right = read_right
if resolution != 1:
rel_array_left = int(read_left // resolution)
rel_array_right = int(read_right // resolution)
if rel_array_left <= 0:
rel_array_left = 0
if rel_array_right > len_a: # This should never happen?
rel_array_right = len_a
# fold up to 1 liner
# This one liner does not work for some reason.
#[a[array_relative_location] + 1 for array_relative_location in xrange(rel_array_left, rel_array_right, 1)]
for array_relative_location in range(rel_array_left, rel_array_right, 1):
a[array_relative_location] += 1
#print "array_len", len(a)
return(numpy.array(a)*self.norm_factor) # NORMFACTOR (If any) is done HERE!
def get_reads(self, loc, strand=None):
"""
**Purpose**
get all of the sequence reads between location 'loc' and return
it formatted as a list of tuples: (left, right, strand), seq reads.
**Arguments**
loc (Required)
a valid location or string location.
**Returns**
a list containing all of the reads between loc.
"""
if not isinstance(loc, location):
loc = location(loc=loc)
if self.gl_mem_cache: # Use the mem cache if available
# work out which of the buckets is required:
left_buck = int((loc["left"]-1-delta)/config.bucket_size)*config.bucket_size
right_buck = int((loc["right"]+delta)/config.bucket_size)*config.bucket_size
buckets_reqd = list(range(left_buck, right_buck+config.bucket_size, config.bucket_size)) # make sure to get the right spanning and left spanning sites
# get the ids reqd.
loc_ids = set()
if buckets_reqd:
for buck in buckets_reqd:
if buck in self.gl_mem_cache.buckets[loc["chr"]]:
loc_ids.update(self.gl_mem_cache.buckets[loc["chr"]][buck]) # set = unique ids
# loc_ids is a set, and has no order.
#print loc_ids
for index in loc_ids:
#print loc.qcollide(self.linearData[index]["loc"]), loc, self.linearData[index]["loc"]
if loc.qcollide(self.linearData[index]["loc"]):
# result expected in form :read_left, read_right, strand
result.append((self.linearData[index]["loc"]['left'], self.linearData[index]["loc"]['right'], self.linearData[index]["strand"]))
#if len(loc["chr"]) < 30: # small security measure.
table_name = "chr_%s" % loc["chr"]
#result = self._connection.execute("SELECT * FROM %s WHERE (?>=left AND ?<=right) OR (?>=left AND ?<=right) OR (left<=? AND right>=?) OR (?<=left AND ?>=right)" % table_name,
# (loc["left"], loc["left"], loc["right"], loc["right"], loc["left"], loc["right"], loc["left"], loc["right"]))
# This is the code used in location.collide():
#self["right"] >= loc["left"] and self["left"] <= loc["right"]
result = self._connection.execute("SELECT left, right, strand FROM %s WHERE (right >= ? AND left <= ?)" % table_name,
(loc["left"], loc["right"]))
#result = None
result = result.fetchall() # safer for empty lists and reusing the cursor
if result and strand: # sort out only this strand
if strand in positive_strand_labels:
strand_to_get = positive_strand_labels
elif strand in negative_strand_labels:
strand_to_get = negative_strand_labels
newl = [r for r in result if r[2] in strand_to_get]
result = newl
return(result)
def get_read_count(self, loc):
"""
**Purpose**
get the number of reads within the location specified
**Arguments**
loc (Required)
a valid location or string location.
**Returns**
an float (or 0.0) containing the number of reads falling within
the location string.
"""
if not self._c:
self._c = self._connection.cursor()
if not isinstance(loc, location):
loc = location(loc=loc)
table_name = "chr_%s" % loc["chr"]
self._c.execute("SELECT left, right, strand FROM %s WHERE (right >= ? AND left <= ?)" % table_name,
(loc["left"], loc["right"]))
return(len(self._c.fetchall())*self.norm_factor)
def get_chromosome_names(self):
"""
**Purpose**
Return a list of all the valid chromosome names in the database
**Arguments**
None
**Returns**
A list of strings containing all of the chromosome names in the track
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome FROM main")
r = [i[0] for i in self._c.fetchall()]
return(set(r))
def get_numreads_on_chromosome(self, name):
"""
**Purpose**
Return the number of reads on chromosme name
**Arguments**
name (Required)
get the number of reads on chromsome 'name'
**Returns**
An integer containing the number of reads
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome, seq_reads FROM main WHERE chromosome=?", (str(name), ))
r = self._c.fetchone()
return(r[1])
def get_total_num_reads(self):
"""
**Purpose**
Return the number total number of reads for this track.
**Arguments**
None
**Returns**
An integer containing the number of reads
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome, seq_reads FROM main")
r = [int(i[1]) for i in self._c.fetchall()]
return(sum(r))
def _debug__print_all_tables(self):
c = self._connection.cursor()
c.execute("SELECT * FROM main")
result = c.fetchall()
print("Main:")
for item in result:
print(item)
print("Chr_Tables:")
for item in result:
table_name = "chr_%s" % str(item[0])[0] # stop injections.
print(" Table", table_name)
c.execute("SELECT * FROM %s" % table_name)
chr_table_res = c.fetchall()
for i in chr_table_res:
print(" ", i)
c.close()
def saveBedGraph(self, filename, bin_size=100, read_extend=0):
'''
**Purpose**
Save the track as a BedGraph
Will take into account the norm_factor if available
**Arguments**
filename (Required)
filename to save BG to
bin_size (Optional, default=100)
the size for each bin (resolution) of the BedGraph
read_extend (Optional, default=0)
extend the reads on the 3' end by this many base pairs.
set to 0 if your reads are the total fragments (e.g. paired-end data).
**Returns**
None
'''
assert filename, 'You must provide a filename'
with open(filename, 'w') as oh:
min_position = 0 # Could guess, but the below code will trim the padding zeros
for chrom in sorted(self.get_chromosome_names()):
this_chrom = self.get_array_chromosome(chrom, read_extend=read_extend)
config.log.info("Doing Chromosome '%s'" % chrom)
max_position = len(this_chrom)
for l in range(min_position, max_position, bin_size):
value = numpy.mean(this_chrom[l:l+bin_size])
if value > 0.0: # If zero then it is okay to skip the loc.
oh.write('chr%s\t%s\t%s\t%s\n' % (chrom, l, l+bin_size, numpy.mean(this_chrom[l:l+bin_size]))) # data is already norm_factor corrected
config.log.info("saveBedGraph: Saved '%s'" % filename)
return(None)
def pileup(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, stranded=False, respect_strand=True, raw_tag_filename=None,
norm_by_read_count=False, pointify=True,
**kargs):
"""
**Purpose**
draw cumulative 'pileups' of the tag density in the track based on a genelist
containing a "loc" tag
**Arguments**
genelist (Required)
A genelist-like object containing a "loc"-like key tag
key (Optional, default="loc")
the key to use for the locations. Must contain valid location data:
chr1:1000-1011 etc. draw_pileup() will use the centre of the location if it is a
span.
filename (Required)
the name of the image file to save the pileup graph to.
normalize (Optional, default=True)
IMPORTANT
If you are using the norm_factor system, you MUST set this to False!
bin_size (Optional, default=500)
bin_size to use for the heatmap
pointify (Optional, default=True)
convert ythe genomic locations in 'genelist' to a single point
(Usually used in combination with window_size).
window_size (Optional, default=5000)
The window size +- around the centre of the peak to build the tag density map
from
read_extend (Optional, default=200)
extend the read x bp either 5' or 3' depending upon the strand of the read.
If use_kde is true then this will be the 'tag shift' value instead.
use_kde (Optional)
Use KDE versions of the tracks instead (Experimental)
simple_cleanup (False)
remove rows from the pileup that have < simple_cleanup tag counts
stranded (Optional, default=False)
build a stranded pileup, with + reads in blue and - reads in red
respect_strand (Optional, default=True)
If available, respect the orientation of the strand from the genelist.
This is useful if you are, say, using the TSS's and want to maintain the
orientation with respect to the transcription direction.
norm_by_read_count (Optional, default=False)
If you are not using a norm_factor for this library then you probably want to set this to True.
It will divide the resulting number of reads by the total number of reads,
i.e. it will account for differences in library sizes.
**Returns**
If succesful returns a list of lists containing the a single entry for each
entry in the original genelist (in the same order as the genelist).
"""
assert filename, "you must specify a filename"
assert genelist, "you must specify a genelist"
assert key in genelist.linearData[0], "the genelist appears to be lacking a loc key"
if stranded:
return(self.__draw_pileup_stranded(genelist, filename, window_size, **kargs))
else:
return(self.__draw_pileup_normal(genelist=genelist, key=key, filename=filename,
heatmap_filename=heatmap_filename,
bin_size=bin_size, window_size=window_size, read_extend=read_extend, use_kde=use_kde,
simple_cleanup=simple_cleanup, pointify=pointify,
norm_by_read_count=norm_by_read_count,
only_do=only_do, raw_tag_filename=raw_tag_filename, respect_strand=respect_strand,
**kargs))
'''
def __draw_pileup_normal(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, respect_strand=True, raw_tag_filename=None, mask_zero=False,
norm_by_read_count=False, normalize=False,
**kargs):
'''
def __draw_pileup_stranded(self, genelist=None, filename=None, bandwidth=300, **kargs):
"""
**Purpose**
Build a histogram density plot of the paired reads.
This will estimate the approximate observed shear size in your chip-seq data.
It pairs the data together for all pairs of all reads within a specified bandwidth
then outputs a plot of the resulting histogram.
**Arguments**
filename
the filename to save the image(s) to.
genelist (Required)
some sort of genelistlike object with a 'loc' key containing genomic coordinates
bandwidth (Optional, default=300)
area around the centre of the peak to build the cumulative distributions.
**Returns**
None
and some image files in <base_filename>.<draw_mode>
"""
# step along each chromosome. Quit if there are no reads in the window
if not self._c:
self._c = self._connection.cursor()
hist = {"+": zeros(bandwidth+1), "-": zeros(bandwidth+1)}
# get a sorted list of all the locs I am going to use.
all_locs = genelist['loc']
all_locs.sort()
p = progressbar(len(genelist))
for n, read in enumerate(all_locs):
loc = read.pointify().expand(bandwidth//2)
for s in self.get_reads(loc): # get reads returns all overlapping reads. I need to trim
# the edges to stop negative array positions
loc_left = s[0] - loc["left"]
loc_right = s[1] - loc["left"]
if s[2] == "+" and loc_left > 0:
loc_left = s[0] - loc["left"]
hist["+"][loc_left] += 1
elif s[2] == "-" and loc_right < bandwidth-1:
loc_right = s[1] - loc["left"]
hist["-"][loc_right] += 1
p.update(n)
if not self._draw:
self._draw = draw(self)
# now plot:
fig = self._draw.getfigure()
ax = fig.add_subplot(111)
ax.plot(hist["+"], color="blue")
ax.plot(hist["-"], color="red")
max_left = argmax(hist["+"])
max_right = argmax(hist["-"])
realfilename = self._draw._saveFigure(fig, filename)
config.log.info("Saved shear_size_pileup '%s'" % realfilename)
return(hist)
def __draw_pileup_normal(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, respect_strand=True, raw_tag_filename=None, mask_zero=False,
norm_by_read_count=False, pointify=True,
**kargs):
"""
The normal pileup views
"""
# See if there is a proper stransd key in there somewhere:
if respect_strand and (not "strand" in genelist.linearData[0]):
config.log.warning("I could not find the 'strand' key, setting respect_strand to False")
respect_strand = False
n = 0
h = 0
pileup = None
binned_data = None
setup_bins = False
number_of_tags_in_library = 1.0 # For code laziness
if norm_by_read_count:
number_of_tags_in_library = self.get_total_num_reads()/float(1e6) # div 1e6 for number nicness.
# get a sorted list of all the locs I am going to use.
# I need to do this:
gl_sorted = genelist.deepcopy()
gl_sorted.sort(key)
all_locs = gl_sorted[key]
strands = ['+'] * len(all_locs)
if respect_strand:
strands = gl_sorted['strand']
curr_cached_chrom = None
cached_chrom = None
p = progressbar(len(genelist))
for i, read in enumerate(zip(all_locs, strands)):
l = read[0]
if pointify:
l = l.pointify()
if window_size:
l = l.expand(window_size)
# I can dispose and free memory as the locations are now sorted:
# See if the read_extend is already in the cache:
if l['chr'] != curr_cached_chrom:
curr_cached_chrom = l['chr']
cached_chrom = self.get_array_chromosome(l['chr'], read_extend) # auto-deals with cahce issues
# UseKDE is now unimplemented:
'''
if not use_kde:
a = self.get(l, read_extend=read_extend) # read_extend=shear size
else:
a = self.get(l, read_extend=read_extend, kde_smooth=True, view_wid=window_size) # read_extend=tag shift
'''
assert l['left'] >= 0, 'left genome coordinate is less than zero "%s"' % l
if l['right'] > cached_chrom.shape[0]:
# Trouble, need to fill in the part of the array with zeros
# Possible error here it l['left'] is also off the array?
expected_width = l['right'] - l['left']
actual_width = cached_chrom.shape[0] - l['left']
a = cached_chrom[l['left']:cached_chrom.shape[0]] # stop wrap around
a = numpy.pad(a, (0,expected_width-actual_width), mode='constant')
#print a, a.shape
config.log.warning('Asked for part of the chomosome outside of the array "%s", skipping this loc' % str(l))
continue
else:
a = cached_chrom[l['left']:l['right']]
#print read
if respect_strand:
# positive strand is always correct, so I leave as is.
# For the reverse strand all I have to do is flip the array.
if read[1] in negative_strand_labels:
a = a[::-1]
# It is possible to ask for an array longer than the length of the array
# NOFIX?
#print l, a.shape
if pileup is None: # numpy __nonzero__ retardedness
pileup = a
binned_data = array([utils.bin_data(a, bin_size)])
setup_bins = True
else:
if sum(a) > simple_cleanup: # Only keep if tag count is > simple_cleanup
pileup = pileup + a
if heatmap_filename or raw_tag_filename:
if setup_bins:
#print binned_data, [utils.bin_data(a, bin_size)]
binned_data = append(binned_data, [utils.bin_data(a, bin_size)], axis=0)
if only_do and n > only_do:
#print only_do, n
break
p.update(i)
if not self._draw:
self._draw = draw()
if pileup is None: # numpy iszero testing:
raise AssertionError('no data found, either the bed is empty, has no regions or the trk file is empty')
if norm_by_read_count:
config.log.info('Normalized by read count')
pileup /= float(number_of_tags_in_library) # This one should not be used if norm_factor is also used
# This one SHOULD be used, even if norm_factor is True
pileup /= float(len(genelist)) # convert it back to a relative tag density.
# matplotlib pileup graph
fig = self._draw.getfigure(**kargs)
ax = fig.add_subplot(111)
ax.plot(pileup)
real_filename = self._draw.savefigure(fig, filename)
config.log.info("Saved pileup tag density to '%s'" % filename)
other_args = {}
if "vmax" in kargs:
other_args["vmax"] = kargs["vmax"]
if "vmin" in kargs:
other_args["vmin"] = kargs["vmin"]
# spin this off into a .heatmap() method?
if heatmap_filename or raw_tag_filename:
binned_data = numpy.delete(binned_data, numpy.s_[-1:], 1) # kill the rightmost empty col.
if raw_tag_filename:
binned_data = numpy.delete(binned_data, numpy.s_[-1:], 1)
oh = open(raw_tag_filename, "w")
writer = csv.writer(oh, dialect=csv.excel_tab)
writer.writerows(binned_data)
oh.close()
config.log.info("saved raw_tag_file to '%s'" % raw_tag_filename)
if heatmap_filename:
if other_args:
real_filename = self._draw._simple_heatmap(data=binned_data, filename=heatmap_filename,
dpi=150, figsize=(6, 24), aspect="long", **other_args)
else:
real_filename = self._draw._simple_heatmap(data=binned_data, filename=heatmap_filename,
dpi=150, figsize=(6, 24), aspect="long")
config.log.info("saved heatmap to '%s'" % real_filename)
ret = {"pileup": pileup}
if heatmap_filename or raw_tag_filename: #if binned_data:
# __nonzero__ not set in numpy arrays, so assume binned_data is valid
# if doing heatmap
ret["binned_data"] = binned_data
return(ret)
def measure_frip(self, genelist=None, sample=None, delta=None, pointify=False):
"""
**Purpose**
Measure the FRiP 'Fraction of Reads in Peaks' as defined by Landt et al., 2012;
Gen Res, 22:1813-1831.
Essentially the fraction of reads inside a list of peaks (from the genelist).
**Arguments**
genelist (Required)
A list of peaks, must have a "loc" key containing the location data.
Ideally this is the peak spans reported by your peak discovery tool,
but you can provide delta=xxx bp argument to expand the locations.
sample (Optional)
sample only the first n peaks. By default, all peaks are used.
delta (Optional, default=None)
a value to expand the locations by + and - delta.
pointify (Optional, default=False)
'pointify' (Convert a span of base pairs to the centre point).
Executed before 'delta'
**Returns**
The FRiP score, the total number of reads and the number of reads inside the peaks
"""
assert genelist, "You must provide a genelist"
assert "loc" in genelist.linearData[0], "The genelist does not appear to have a 'loc' key"
if sample:
gl = gl[sample]
else:
gl = genelist.deepcopy() # get a copy as I may mess with it.
if pointify:
gl = gl.pointify("loc")
if delta:
gl = gl.expand("loc", delta)
# work out the total number of reads in this library
chr_names = self.get_chromosome_names()
total_reads = 0
for chrom in chr_names:
total_reads += self.get_numreads_on_chromosome(chrom)
num_reads = 0
p = progressbar(len(gl))
for idx, item in enumerate(gl):
num_reads += self.get_read_count(item["loc"])
p.update(idx)
return({"FRiP": num_reads/float(total_reads), "Total number of reads": total_reads, "Number of reads in peaks": num_reads})
def qc_encode_idr(self, chrom_sizes=None, filename=None, max_shift=400, **kargs):
"""
**Purpose**
Perform QC for ChIP-seq libraries, as explained
https://sites.google.com/site/anshulkundaje/projects/idr
and in Landt et al., 2012, Gen Res, 22:1813-1831.
**Arguments**
chromosome_sizes (Required)
You must provide a dict, containing the chromosome names and the
number of base pairs.
For mm9 this data is available as part of glbase and can be specified:
trk.qc_encode_idr(chromosome_sizes=gldata.chromsizes["mm9"])
Potentially, hg18, hg19, mm8 and mm9 will be available too. maybe.
filename (Required)
filename to save the plot to
**Returns**
NSC and RSC values. (See Landt et al., 2012; Gen Res, 22:1813-1831) for
details.
"""
assert chrom_sizes, "You must provide chromosome sizes"
assert filename, "You must provide a filename"
if not self._draw:
self._draw = draw()
# I only need to generate the + strand once.
plus_strands = {}
minu_strands = {}
# constructing a numpy array is excessively large. I only need to store pairs of reads
all_chroms = set(self.get_chromosome_names()) & set([i.replace("chr", "") for i in list(chrom_sizes.keys())]) # only iterate ones in both list
all_p = numpy.array([])
all_m = numpy.array([])
res = []
pears = numpy.zeros([max_shift, len(all_chroms)])
for idx, chrom in enumerate(all_chroms):
this_p = numpy.array([r[0] for r in self.get_all_reads_on_chromosome(chrom, "+")])
this_m = numpy.array([r[1] for r in self.get_all_reads_on_chromosome(chrom, "-")])
p = progressbar(max_shift)
for n in range(max_shift):
this_m = this_m - 1
union = numpy.union1d(this_p, this_m) # only ones I will have to look at
#union = numpy.intersect1d(this_p, this_m)
#xor_union = numpy.setxor1d(this_p, this_m)
#union = numpy.append(union, xor_union)
pair_p = numpy.bincount(this_p, minlength=max(this_p.max(), this_m.max())+1)[union]
pair_m = numpy.bincount(this_m, minlength=max(this_p.max(), this_m.max())+1)[union]
pears[n, idx] = pearsonr(pair_p, pair_m)[0]
p.update(n)
"""
fig = self._draw.getfigure()
ax = fig.add_subplot(111)
ax.plot(pair_p, pair_m, 'o', mec="none", alpha=0.2)
ax.set_title("Pearson: %.3f" % pears[n, idx])
fig.savefig("plots/test_%s_%s.png"% (chrom, n))
"""
print("Done chromosome '%s'" % chrom)
print(pears)
for row in pears:
res.append(numpy.average(row))
print(res)
fig = self._draw.getfigure(**kargs)
ax = fig.add_subplot(111)
ax.plot(numpy.arange(len(res)), res)
self._draw.savefigure(fig, filename)
ret = {"NSC": 0.0, "RSC": 0.0}
return(ret)
if __name__ == "__main__":
"""
Current 15 s
"""
import random, time
from .location import location
from .genelist import genelist
s = time.time()
print("Building...")
t = track(filename="testold.trk2", name="test", new=True)
for _ in range(10000):
l = random.randint(0, 100000)
t.add_location(location(chr="1", left=l, right=l+35), strand="+")
t.finalise()
e = time.time()
print(e-s, "s")
#t.finalise()
print(t.get_reads('chr1:100-200'))
s = time.time()
print("Fake bed...")
# fake a bed
newb = []
for _ in range(1000):
l = random.randint(1000, 100000) # 1000 is for the window size. -ve locs are real bad.
newb.append({"loc": location(chr="1", left=l, right=l+200), "strand": "+"})
bed = genelist()
bed.load_list(newb)
e = time.time()
print(e-s, "s")
t = track(filename="testold.trk2")
print("Pileup...")
import cProfile, pstats
cProfile.run("t.pileup(genelist=bed, filename='test.png', bin_size=10, window_size=1000)", "profile.pro")
p = pstats.Stats("profile.pro")
p.strip_dirs().sort_stats("time").print_stats()
print(t.pileup(genelist=bed, filename='/tmp/test2.png', respect_strand=True))
print(t.pileup(genelist=bed, filename='/tmp/test2.png', pointify=False, respect_strand=True))
print(bed.all())
print(t.pileup(genelist=bed, filename='/tmp/test2.png', pointify=False, window_size=0, respect_strand=True))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000, log=None))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000, log=None, respect_strand=True))
|
the-stack_0_14095 | from __future__ import unicode_literals
import datetime
import decimal
import itertools
from wtforms import widgets
from wtforms.compat import text_type, izip
from wtforms.i18n import DummyTranslations
from wtforms.validators import StopValidation
from wtforms.utils import unset_value
__all__ = (
'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'FieldList',
'FloatField', 'FormField', 'IntegerField', 'RadioField', 'SelectField',
'SelectMultipleField', 'StringField',
)
class Field(object):
"""
Field base class
"""
errors = tuple()
process_errors = tuple()
raw_data = None
validators = tuple()
widget = None
_formfield = True
_translations = DummyTranslations()
do_not_call_in_templates = True # Allow Django 1.4 traversal
def __new__(cls, *args, **kwargs):
if '_form' in kwargs and '_name' in kwargs:
return super(Field, cls).__new__(cls)
else:
return UnboundField(cls, *args, **kwargs)
def __init__(self, label=None, validators=None, filters=tuple(),
description='', id=None, default=None, widget=None,
_form=None, _name=None, _prefix='', _translations=None,
_meta=None):
"""
Construct a new field.
:param label:
The label of the field.
:param validators:
A sequence of validators to call when `validate` is called.
:param filters:
A sequence of filters which are run on input data by `process`.
:param description:
A description for the field, typically used for help text.
:param id:
An id to use for the field. A reasonable default is set by the form,
and you shouldn't need to set this manually.
:param default:
The default value to assign to the field, if no form or object
input is provided. May be a callable.
:param widget:
If provided, overrides the widget used to render the field.
:param _form:
The form holding this field. It is passed by the form itself during
construction. You should never pass this value yourself.
:param _name:
The name of this field, passed by the enclosing form during its
construction. You should never pass this value yourself.
:param _prefix:
The prefix to prepend to the form name of this field, passed by
the enclosing form during construction.
:param _translations:
A translations object providing message translations. Usually
passed by the enclosing form during construction. See
:doc:`I18n docs <i18n>` for information on message translations.
:param _meta:
If provided, this is the 'meta' instance from the form. You usually
don't pass this yourself.
If `_form` and `_name` isn't provided, an :class:`UnboundField` will be
returned instead. Call its :func:`bind` method with a form instance and
a name to construct the field.
"""
if _translations is not None:
self._translations = _translations
if _meta is not None:
self.meta = _meta
elif _form is not None:
self.meta = _form.meta
else:
raise TypeError("Must provide one of _form or _meta")
self.default = default
self.description = description
self.filters = filters
self.flags = Flags()
self.name = _prefix + _name
self.short_name = _name
self.type = type(self).__name__
self.validators = validators or list(self.validators)
self.id = id or self.name
self.label = Label(self.id, label if label is not None else self.gettext(_name.replace('_', ' ').title()))
if widget is not None:
self.widget = widget
for v in self.validators:
flags = getattr(v, 'field_flags', ())
for f in flags:
setattr(self.flags, f, True)
def __unicode__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __str__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __html__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the :meth:`__call__` method.
"""
return self()
def __call__(self, **kwargs):
"""
Render this field as HTML, using keyword args as additional attributes.
This delegates rendering to
:meth:`meta.render_field <wtforms.meta.DefaultMeta.render_field>`
whose default behavior is to call the field's widget, passing any
keyword arguments from this call along to the widget.
In all of the WTForms HTML widgets, keyword arguments are turned to
HTML attributes, though in theory a widget is free to do anything it
wants with the supplied keyword arguments, and widgets don't have to
even do anything related to HTML.
"""
return self.meta.render_field(self, kwargs)
def gettext(self, string):
"""
Get a translation for the given message.
This proxies for the internal translations object.
:param string: A unicode string to be translated.
:return: A unicode string which is the translated output.
"""
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
"""
Get a translation for a message which can be pluralized.
:param str singular: The singular form of the message.
:param str plural: The plural form of the message.
:param int n: The number of elements this message is referring to
"""
return self._translations.ngettext(singular, plural, n)
def validate(self, form, extra_validators=tuple()):
"""
Validates the field and returns True or False. `self.errors` will
contain any errors raised during validation. This is usually only
called by `Form.validate`.
Subfields shouldn't override this, but rather override either
`pre_validate`, `post_validate` or both, depending on needs.
:param form: The form the field belongs to.
:param extra_validators: A sequence of extra validators to run.
"""
self.errors = list(self.process_errors)
stop_validation = False
# Call pre_validate
try:
self.pre_validate(form)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
stop_validation = True
except ValueError as e:
self.errors.append(e.args[0])
# Run validators
if not stop_validation:
chain = itertools.chain(self.validators, extra_validators)
stop_validation = self._run_validation_chain(form, chain)
# Call post_validate
try:
self.post_validate(form, stop_validation)
except ValueError as e:
self.errors.append(e.args[0])
return len(self.errors) == 0
def _run_validation_chain(self, form, validators):
"""
Run a validation chain, stopping if any validator raises StopValidation.
:param form: The Form instance this field beongs to.
:param validators: a sequence or iterable of validator callables.
:return: True if validation was stopped, False otherwise.
"""
for validator in validators:
try:
validator(form, self)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
return True
except ValueError as e:
self.errors.append(e.args[0])
return False
def pre_validate(self, form):
"""
Override if you need field-level validation. Runs before any other
validators.
:param form: The form the field belongs to.
"""
pass
def post_validate(self, form, validation_stopped):
"""
Override if you need to run any field-level validation tasks after
normal validation. This shouldn't be needed in most cases.
:param form: The form the field belongs to.
:param validation_stopped:
`True` if any validator raised StopValidation.
"""
pass
def process(self, formdata, data=unset_value):
"""
Process incoming data, calling process_data, process_formdata as needed,
and run filters.
If `data` is not provided, process_data will be called on the field's
default.
Field subclasses usually won't override this, instead overriding the
process_formdata and process_data methods. Only override this for
special advanced processing, such as when a field encapsulates many
inputs.
"""
self.process_errors = []
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
try:
self.process_data(data)
except ValueError as e:
self.process_errors.append(e.args[0])
if formdata:
try:
if self.name in formdata:
self.raw_data = formdata.getlist(self.name)
else:
self.raw_data = []
self.process_formdata(self.raw_data)
except ValueError as e:
self.process_errors.append(e.args[0])
try:
for filter in self.filters:
self.data = filter(self.data)
except ValueError as e:
self.process_errors.append(e.args[0])
def process_data(self, value):
"""
Process the Python data applied to this field and store the result.
This will be called during form construction by the form's `kwargs` or
`obj` argument.
:param value: The python object containing the value to process.
"""
self.data = value
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
:param valuelist: A list of strings to process.
"""
if valuelist:
self.data = valuelist[0]
def populate_obj(self, obj, name):
"""
Populates `obj.<name>` with the field's data.
:note: This is a destructive operation. If `obj.<name>` already exists,
it will be overridden. Use with caution.
"""
setattr(obj, name, self.data)
class UnboundField(object):
_formfield = True
creation_counter = 0
def __init__(self, field_class, *args, **kwargs):
UnboundField.creation_counter += 1
self.field_class = field_class
self.args = args
self.kwargs = kwargs
self.creation_counter = UnboundField.creation_counter
def bind(self, form, name, prefix='', translations=None, **kwargs):
kw = dict(
self.kwargs,
_form=form,
_prefix=prefix,
_name=name,
_translations=translations,
**kwargs
)
return self.field_class(*self.args, **kw)
def __repr__(self):
return '<UnboundField(%s, %r, %r)>' % (self.field_class.__name__, self.args, self.kwargs)
class Flags(object):
"""
Holds a set of boolean flags as attributes.
Accessing a non-existing attribute returns False for its value.
"""
def __getattr__(self, name):
if name.startswith('_'):
return super(Flags, self).__getattr__(name)
return False
def __contains__(self, name):
return getattr(self, name)
def __repr__(self):
flags = (name for name in dir(self) if not name.startswith('_'))
return '<wtforms.fields.Flags: {%s}>' % ', '.join(flags)
class Label(object):
"""
An HTML form label.
"""
def __init__(self, field_id, text):
self.field_id = field_id
self.text = text
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
attributes = widgets.html_params(**kwargs)
return widgets.HTMLString('<label %s>%s</label>' % (attributes, text or self.text))
def __repr__(self):
return 'Label(%r, %r)' % (self.field_id, self.text)
class SelectFieldBase(Field):
option_widget = widgets.Option()
"""
Base class for fields which can be iterated to produce options.
This isn't a field, but an abstract base class for fields which want to
provide this functionality.
"""
def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
super(SelectFieldBase, self).__init__(label, validators, **kwargs)
if option_widget is not None:
self.option_widget = option_widget
def iter_choices(self):
"""
Provides data for choice widget rendering. Must return a sequence or
iterable of (value, label, selected) tuples.
"""
raise NotImplementedError()
def __iter__(self):
opts = dict(widget=self.option_widget, _name=self.name, _form=None, _meta=self.meta)
for i, (value, label, checked) in enumerate(self.iter_choices()):
opt = self._Option(label=label, id='%s-%d' % (self.id, i), **opts)
opt.process(None, value)
opt.checked = checked
yield opt
class _Option(Field):
checked = False
def _value(self):
return text_type(self.data)
class SelectField(SelectFieldBase):
widget = widgets.Select()
def __init__(self, label=None, validators=None, coerce=text_type, choices=None, **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.coerce = coerce
self.choices = choices
def iter_choices(self):
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid Choice: could not coerce'))
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == v:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class SelectMultipleField(SelectField):
"""
No different from a normal select field, except this one can take (and
validate) multiple choices. You'll need to specify the HTML `size`
attribute to the select field when rendering.
"""
widget = widgets.Select(multiple=True)
def iter_choices(self):
for value, label in self.choices:
selected = self.data is not None and self.coerce(value) in self.data
yield (value, label, selected)
def process_data(self, value):
try:
self.data = list(self.coerce(v) for v in value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
try:
self.data = list(self.coerce(x) for x in valuelist)
except ValueError:
raise ValueError(self.gettext('Invalid choice(s): one or more data inputs could not be coerced'))
def pre_validate(self, form):
if self.data:
values = list(c[0] for c in self.choices)
for d in self.data:
if d not in values:
raise ValueError(self.gettext("'%(value)s' is not a valid choice for this field") % dict(value=d))
class RadioField(SelectField):
"""
Like a SelectField, except displays a list of radio buttons.
Iterating the field will produce subfields (each containing a label as
well) in order to allow custom rendering of the individual radio fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
class StringField(Field):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextInput()
def process_formdata(self, valuelist):
if valuelist:
self.data = valuelist[0]
else:
self.data = ''
def _value(self):
return text_type(self.data) if self.data is not None else ''
class LocaleAwareNumberField(Field):
"""
Base class for implementing locale-aware number parsing.
Locale-aware numbers require the 'babel' package to be present.
"""
def __init__(self, label=None, validators=None, use_locale=False, number_format=None, **kwargs):
super(LocaleAwareNumberField, self).__init__(label, validators, **kwargs)
self.use_locale = use_locale
if use_locale:
self.number_format = number_format
self.locale = kwargs['_form'].meta.locales[0]
self._init_babel()
def _init_babel(self):
try:
from babel import numbers
self.babel_numbers = numbers
except ImportError:
raise ImportError('Using locale-aware decimals requires the babel library.')
def _parse_decimal(self, value):
return self.babel_numbers.parse_decimal(value, self.locale)
def _format_decimal(self, value):
return self.babel_numbers.format_decimal(value, self.number_format, self.locale)
class IntegerField(Field):
"""
A text field, except all input is coerced to an integer. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = int(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid integer value'))
class DecimalField(LocaleAwareNumberField):
"""
A text field which displays and coerces data of the `decimal.Decimal` type.
:param places:
How many decimal places to quantize the value to for display on form.
If None, does not quantize value.
:param rounding:
How to round the value during quantize, for example
`decimal.ROUND_UP`. If unset, uses the rounding value from the
current thread's context.
:param use_locale:
If True, use locale-based number formatting. Locale-based number
formatting requires the 'babel' package.
:param number_format:
Optional number format for locale. If omitted, use the default decimal
format for the locale.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, places=unset_value, rounding=None, **kwargs):
super(DecimalField, self).__init__(label, validators, **kwargs)
if self.use_locale and (places is not unset_value or rounding is not None):
raise TypeError("When using locale-aware numbers, 'places' and 'rounding' are ignored.")
if places is unset_value:
places = 2
self.places = places
self.rounding = rounding
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
if self.use_locale:
return text_type(self._format_decimal(self.data))
elif self.places is not None:
if hasattr(self.data, 'quantize'):
exp = decimal.Decimal('.1') ** self.places
if self.rounding is None:
quantized = self.data.quantize(exp)
else:
quantized = self.data.quantize(exp, rounding=self.rounding)
return text_type(quantized)
else:
# If for some reason, data is a float or int, then format
# as we would for floats using string formatting.
format = '%%0.%df' % self.places
return format % self.data
else:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
if self.use_locale:
self.data = self._parse_decimal(valuelist[0])
else:
self.data = decimal.Decimal(valuelist[0])
except (decimal.InvalidOperation, ValueError):
self.data = None
raise ValueError(self.gettext('Not a valid decimal value'))
class FloatField(Field):
"""
A text field, except all input is coerced to an float. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = float(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid float value'))
class BooleanField(Field):
"""
Represents an ``<input type="checkbox">``. Set the ``checked``-status by using the
``default``-option. Any value for ``default``, e.g. ``default="checked"`` puts
``checked`` into the html-element and sets the ``data`` to ``True``
:param false_values:
If provided, a sequence of strings each of which is an exact match
string of what is considered a "false" value. Defaults to the tuple
``('false', '')``
"""
widget = widgets.CheckboxInput()
false_values = ('false', '')
def __init__(self, label=None, validators=None, false_values=None, **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
if false_values is not None:
self.false_values = false_values
def process_data(self, value):
self.data = bool(value)
def process_formdata(self, valuelist):
if not valuelist or valuelist[0] in self.false_values:
self.data = False
else:
self.data = True
def _value(self):
if self.raw_data:
return text_type(self.raw_data[0])
else:
return 'y'
class DateTimeField(Field):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, format='%Y-%m-%d %H:%M:%S', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.format) or ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
Same as DateTimeField, except stores a `datetime.date`.
"""
def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid date value'))
class FormField(Field):
"""
Encapsulate a form as a field in another form.
:param form_class:
A subclass of Form that will be encapsulated.
:param separator:
A string which will be suffixed to this field's name to create the
prefix to enclosed fields. The default is fine for most uses.
"""
widget = widgets.TableWidget()
def __init__(self, form_class, label=None, validators=None, separator='-', **kwargs):
super(FormField, self).__init__(label, validators, **kwargs)
self.form_class = form_class
self.separator = separator
self._obj = None
if self.filters:
raise TypeError('FormField cannot take filters, as the encapsulated data is not mutable.')
if validators:
raise TypeError('FormField does not accept any validators. Instead, define them on the enclosed form.')
def process(self, formdata, data=unset_value):
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self._obj = data
self.object_data = data
prefix = self.name + self.separator
if isinstance(data, dict):
self.form = self.form_class(formdata=formdata, prefix=prefix, **data)
else:
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix)
def validate(self, form, extra_validators=tuple()):
if extra_validators:
raise TypeError('FormField does not accept in-line validators, as it gets errors from the enclosed form.')
return self.form.validate()
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
if candidate is None:
if self._obj is None:
raise TypeError('populate_obj: cannot find a value to populate from the provided obj or input data/defaults')
candidate = self._obj
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
def __iter__(self):
return iter(self.form)
def __getitem__(self, name):
return self.form[name]
def __getattr__(self, name):
return getattr(self.form, name)
@property
def data(self):
return self.form.data
@property
def errors(self):
return self.form.errors
class FieldList(Field):
"""
Encapsulate an ordered list of multiple instances of the same field type,
keeping data as a list.
>>> authors = FieldList(StringField('Name', [validators.required()]))
:param unbound_field:
A partially-instantiated field definition, just like that would be
defined on a form directly.
:param min_entries:
if provided, always have at least this many entries on the field,
creating blank ones if the provided input does not specify a sufficient
amount.
:param max_entries:
accept no more than this many entries as input, even if more exist in
formdata.
"""
widget = widgets.ListWidget()
def __init__(self, unbound_field, label=None, validators=None, min_entries=0,
max_entries=None, default=tuple(), **kwargs):
super(FieldList, self).__init__(label, validators, default=default, **kwargs)
if self.filters:
raise TypeError('FieldList does not accept any filters. Instead, define them on the enclosed field.')
assert isinstance(unbound_field, UnboundField), 'Field must be unbound, not a field class'
self.unbound_field = unbound_field
self.min_entries = min_entries
self.max_entries = max_entries
self.last_index = -1
self._prefix = kwargs.get('_prefix', '')
def process(self, formdata, data=unset_value):
self.entries = []
if data is unset_value or not data:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
if formdata:
indices = sorted(set(self._extract_indices(self.name, formdata)))
if self.max_entries:
indices = indices[:self.max_entries]
idata = iter(data)
for index in indices:
try:
obj_data = next(idata)
except StopIteration:
obj_data = unset_value
self._add_entry(formdata, obj_data, index=index)
else:
for obj_data in data:
self._add_entry(formdata, obj_data)
while len(self.entries) < self.min_entries:
self._add_entry(formdata)
def _extract_indices(self, prefix, formdata):
"""
Yield indices of any keys with given prefix.
formdata must be an object which will produce keys when iterated. For
example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then
the numbers 0 and 1 will be yielded, but not neccesarily in order.
"""
offset = len(prefix) + 1
for k in formdata:
if k.startswith(prefix):
k = k[offset:].split('-', 1)[0]
if k.isdigit():
yield int(k)
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in izip(self.entries, candidates):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
name = '%s-%d' % (self.short_name, index)
id = '%s-%d' % (self.id, index)
field = self.unbound_field.bind(form=None, name=name, prefix=self._prefix, id=id, _meta=self.meta,
translations=self._translations)
field.process(formdata, data)
self.entries.append(field)
return field
def append_entry(self, data=unset_value):
"""
Create a new entry with optional default data.
Entries added in this way will *not* receive formdata however, and can
only receive object data.
"""
return self._add_entry(data=data)
def pop_entry(self):
""" Removes the last entry from the list and returns it. """
entry = self.entries.pop()
self.last_index -= 1
return entry
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
return self.entries[index]
@property
def data(self):
return [f.data for f in self.entries]
|
the-stack_0_14097 | import os
import sys
from datetime import datetime
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
from models.Conv3D import r2plus1d_18
from dataset_sign_clip import Sign_Isolated
from train import train_epoch
from validation_clip import val_epoch
from collections import OrderedDict
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self):
super(LabelSmoothingCrossEntropy, self).__init__()
def forward(self, x, target, smoothing=0.1):
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
# Path setting
exp_name = 'depth_hha_final_finetune'
data_path = "../data/train_val_hha_2_mask"
data_path2 = "../data/test_hha_2_mask"
label_train_path = "data/train_val_labels.csv"
label_val_path = "data/test_labels_pseudo.csv"
model_path = "checkpoint/{}".format(exp_name)
if not os.path.exists(model_path):
os.mkdir(model_path)
if not os.path.exists(os.path.join('results', exp_name)):
os.mkdir(os.path.join('results', exp_name))
log_path = "log/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}.log".format(exp_name, datetime.now())
sum_path = "runs/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}".format(exp_name, datetime.now())
phase = 'Train'
# Log to file & tensorboard writer
logging.basicConfig(level=logging.INFO, format='%(message)s', handlers=[logging.FileHandler(log_path), logging.StreamHandler()])
logger = logging.getLogger('SLR')
logger.info('Logging to file...')
writer = SummaryWriter(sum_path)
# Use specific gpus
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparams
num_classes = 226 #100
epochs = 100
# batch_size = 16
batch_size = 24
learning_rate = 1e-4 #1e-3 Train 1e-4 Finetune
weight_decay = 1e-4
log_interval = 80
sample_size = 128
sample_duration = 32
attention = False
drop_p = 0.0
hidden1, hidden2 = 512, 256
num_workers = 24
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
# Train with 3DCNN
if __name__ == '__main__':
# Load data
transform = transforms.Compose([transforms.Resize([sample_size, sample_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
train_set = Sign_Isolated(data_path=data_path, label_path=label_train_path, frames=sample_duration,
num_classes=num_classes, train=True, transform=transform)
val_set = Sign_Isolated(data_path=data_path2, label_path=label_val_path, frames=sample_duration,
num_classes=num_classes, train=False, transform=transform)
logger.info("Dataset samples: {}".format(len(train_set)+len(val_set)))
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
# Create model
model = r2plus1d_18(pretrained=False, num_classes=226)
# load pretrained
checkpoint = torch.load('final_models/val_depth_hha_final.pth')
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k[7:] # remove 'module.'
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
# if phase == 'Train':
# model.fc1 = nn.Linear(model.fc1.in_features, num_classes)
print(model)
model = model.to(device)
# Run the model parallelly
if torch.cuda.device_count() > 1:
logger.info("Using {} GPUs".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
# Create loss criterion & optimizer
# criterion = nn.CrossEntropyLoss()
criterion = LabelSmoothingCrossEntropy()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, threshold=0.0001)
# Start training
if phase == 'Train':
logger.info("Training Started".center(60, '#'))
for epoch in range(epochs):
print('lr: ', get_lr(optimizer))
# Train the model
train_epoch(model, criterion, optimizer, train_loader, device, epoch, logger, log_interval, writer)
# Validate the model
val_loss = val_epoch(model, criterion, val_loader, device, epoch, logger, writer)
scheduler.step(val_loss)
# Save model
torch.save(model.state_dict(), os.path.join(model_path, "sign_resnet2d+1_epoch{:03d}.pth".format(epoch+1)))
logger.info("Epoch {} Model Saved".format(epoch+1).center(60, '#'))
elif phase == 'Test':
logger.info("Testing Started".center(60, '#'))
val_loss = val_epoch(model, criterion, val_loader, device, 0, logger, writer, phase=phase, exp_name=exp_name)
logger.info("Finished".center(60, '#'))
|
the-stack_0_14098 | import torch
from gaussed.distribution.base import Distribution
from gaussed.utils.lin_alg_solvers import DefaultSolver
class GP(Distribution):
def __init__(self, mean, kernel, solver=DefaultSolver()):
self.mean = mean
self.kernel = kernel
self.solver = solver
self.dim = self.kernel.dim
self.t_dim = self.kernel.dim
self.func_dist = True # distribution over functions
self.func_dim = self.kernel.dim
self.kernel = kernel
self.dim = self.kernel.dim
def condition_x(self, x, X, Y, solver=None):
"""Computes the posterior mean and covariance at locations x, conditioned upon output observations Y, observed at input locations X
Args:
x ([torch.Tensor]): [Locations to evaluate posterior mean and covariance amtrix]
X ([torch.Tensor]): [Tensor of input locations]
Y ([type]): [Tensor of output observations]
solver ([Solver], optional): [Solver]. Defaults to None and so to self.solver.
Returns:
[torch.Tensor, torch.Tensor]: [Posterior mean and covariance matrix]
"""
if solver is None:
solver = self.solver
K_XX = self.kernel.eval(X, X)
K_Xx = self.kernel.eval(X, x)
K_xx = self.kernel.eval(x, x)
inverse = solver.inverse(K_XX)
solved_y = solver.solve(inverse, Y.unsqueeze(1))
solved_gram = solver.solve(inverse, K_Xx)
mean = torch.matmul(K_Xx.T, solved_y)
covariance_matrix = K_xx - torch.matmul(K_Xx.T, solved_gram)
return mean, covariance_matrix
def get_prior(self, x):
"""Returns prior mean and covariance at locations x.
Args:
x ([torch.Tensor]): [Locations to evaluate prior mean and prior covariance]
Returns:
[torch.Tensor, torch.Tensor]: [Prior mean and covariance matrix]
"""
K_xx = self.kernel.eval(x, x)
prior_mean = self.mean.eval(x)
return prior_mean, K_xx
def sample(self, mean, covariance, n):
"""Sample from the GP with given mean and covariance, n number of times.
Args:
mean ([torch.Tensor]): [Mean Tensor]
covariance ([torch.Tensor]): [Covariance matrix Tensor]
n ([int]): [Number of samples]
Returns:
[torch.Tensor]: [GP samples]
"""
m = mean.size(0)
U, S, V = torch.svd(covariance)
s_cov = torch.matmul(U, torch.diag(torch.sqrt(S)))
sn = torch.distributions.MultivariateNormal(torch.zeros(m), torch.eye(m))
sn_samples = sn.sample(torch.Size([n])).T
mn_samples = torch.add(torch.matmul(s_cov, sn_samples).T, mean).T
return mn_samples
|
the-stack_0_14099 | import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.core.api import Float64Index
def test_get():
# GH 6383
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
)
)
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
),
index=Float64Index(
[
25.0,
36.0,
49.0,
64.0,
81.0,
100.0,
121.0,
144.0,
169.0,
196.0,
1225.0,
1296.0,
1369.0,
1444.0,
1521.0,
1600.0,
1681.0,
1764.0,
1849.0,
1936.0,
]
),
)
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default="Missing")
assert result == "Missing"
vc = df.b.value_counts()
result = vc.get(False, default="Missing")
assert result == 3
result = vc.get(True, default="Missing")
assert result == "Missing"
def test_get_nan():
# GH 8569
s = Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default="Missing") == "Missing"
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = Float64Index(range(10)).to_series()
idx = [2, 30]
assert s.get(idx) is None
idx = [2, np.nan]
assert s.get(idx) is None
# GH 17295 - all missing keys
idx = [20, 30]
assert s.get(idx) is None
idx = [np.nan, np.nan]
assert s.get(idx) is None
def test_get_with_default():
# GH#7725
d0 = ["a", "b", "c", "d"]
d1 = np.arange(4, dtype="int64")
others = ["e", 10]
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
@pytest.mark.parametrize(
"arr",
[np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
)
def test_get_with_ea(arr):
# GH#21260
ser = Series(arr, index=[2 * i for i in range(len(arr))])
assert ser.get(4) == ser.iloc[2]
result = ser.get([4, 6])
expected = ser.iloc[[2, 3]]
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match="label-based"):
result = ser.get(slice(2))
expected = ser.iloc[[0, 1]]
tm.assert_series_equal(result, expected)
assert ser.get(-1) is None
assert ser.get(ser.index.max() + 1) is None
ser = Series(arr[:6], index=list("abcdef"))
assert ser.get("c") == ser.iloc[2]
result = ser.get(slice("b", "d"))
expected = ser.iloc[[1, 2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get("Z")
assert result is None
assert ser.get(4) == ser.iloc[4]
assert ser.get(-1) == ser.iloc[-1]
assert ser.get(len(ser)) is None
# GH#21257
ser = Series(arr)
ser2 = ser[::2]
assert ser2.get(1) is None
def test_getitem_get(string_series, object_series):
for obj in [string_series, object_series]:
idx = obj.index[5]
assert obj[idx] == obj.get(idx)
assert obj[idx] == obj[5]
assert string_series.get(-1) == string_series.get(string_series.index[-1])
assert string_series[5] == string_series.get(string_series.index[5])
def test_get_none():
# GH#5652
s1 = Series(dtype=object)
s2 = Series(dtype=object, index=list("abc"))
for s in [s1, s2]:
result = s.get(None)
assert result is None
|
the-stack_0_14100 | frase = str(input('Digite uma Frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
#para cada letra no Range, a gente ta pegando o valor total menos 1 pra corrigir; invertido; fazendo vir no caminho oposto.
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
print(f'O inverso de {junto} é {inverso}! ')
if inverso == junto:
print('Temos um palindromo! ')
else:
print('A frase digitada não é um palindromo ') |
the-stack_0_14101 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python collection that can contain further collections as well as other objects
called atoms. Note that numpy arrays are considered atoms.
nest recognizes the following types of collections:
1.tuple
2.namedtuple
3.dict
4.orderedDict
5.MutableMapping
6.attr.s
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
import wrapt as _wrapt
from tensorflow.python import _pywrap_utils
from tensorflow.python.util.compat import collections_abc as _collections_abc
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.platform import tf_logging
_SHALLOW_TREE_HAS_INVALID_KEYS = (
"The shallow_tree's keys are not a subset of the input_tree's keys. The "
"shallow_tree has the following keys that are not in the input_tree: {}.")
_STRUCTURES_HAVE_MISMATCHING_TYPES = (
"The two structures don't have the same sequence type. Input structure has "
"type {input_type}, while shallow structure has type {shallow_type}.")
_STRUCTURES_HAVE_MISMATCHING_LENGTHS = (
"The two structures don't have the same sequence length. Input "
"structure has length {input_length}, while shallow structure has length "
"{shallow_length}."
)
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = (
"The input_tree has fewer elements than the shallow_tree. Input structure "
"has length {input_size}, while shallow structure has length "
"{shallow_size}.")
_IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = (
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: {}.")
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = (a.name for a in attrs)
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_.keys())
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_utils.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping_view = _pywrap_utils.IsMappingView
_is_attrs = _pywrap_utils.IsAttrs
_is_composite_tensor = _pywrap_utils.IsCompositeTensor
_is_type_spec = _pywrap_utils.IsTypeSpec
_is_mutable_mapping = _pywrap_utils.IsMutableMapping
_is_mapping = _pywrap_utils.IsMapping
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mutable_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
else:
d = instance_type()
for key in instance:
d[key] = result[key]
return d
elif _is_mapping(instance):
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
tf_logging.log_first_n(
tf_logging.WARN, "Mapping types may not work well with tf.nest. Prefer"
" using MutableMapping for {}".format(instance_type), 1)
try:
return instance_type((key, result[key]) for key in instance)
except TypeError as err:
raise TypeError("Error creating an object of type {} like {}. Note that "
"it must accept a single positional argument "
"representing an iterable of key-value pairs, in "
"addition to self. Cause: {}".format(
type(instance), instance, err))
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif _is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(_sequence_like(instance.__wrapped__, args))
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
for _, v in _yield_sorted_items(iterable):
yield v
def _yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
# Ordered to check common structure types (list, tuple, dict) first.
if isinstance(iterable, list):
for item in enumerate(iterable):
yield item
# namedtuples handled separately to avoid expensive namedtuple check.
elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck
for item in enumerate(iterable):
yield item
elif isinstance(iterable, (dict, _collections_abc.Mapping)):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif _is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
type_spec = iterable._type_spec # pylint: disable=protected-access
yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
else:
for item in enumerate(iterable):
yield item
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_utils.IsSequence
# See the swig file (util.i) for documentation.
is_sequence_or_composite = _pywrap_utils.IsSequenceOrComposite
@tf_export("nest.is_nested")
def is_nested(seq):
"""Returns true if its input is a collections.abc.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.abc.Sequence
or a dict.
"""
return is_sequence(seq)
@tf_export("nest.flatten")
def flatten(structure, expand_composites=False):
"""Returns a flat list from a given nested structure.
If nest is not a structure , tuple (or a namedtuple), dict, or an attrs class,
then returns a single-element list:
[nest].
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in pack_sequence_as. This correctly
repacks dicts and OrderedDicts after they have been flattened, and also allows
flattening an OrderedDict and then repacking it back using a corresponding
plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be
flattened.
Users must not modify any collections used in nest while this function is
running.
Examples:
1. Python dict (ordered by key):
>>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" }
>>> tf.nest.flatten(dict)
['value1', 'value2', 'value3']
2. For a nested python tuple:
>>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), (6.0))
>>> tf.nest.flatten(tuple)
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
3. Numpy array (will not flatten):
>>> array = np.array([[1, 2], [3, 4]])
>>> tf.nest.flatten(array)
[array([[1, 2],
[3, 4]])]
4. `tf.Tensor` (will not flatten):
>>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> tf.nest.flatten(tensor)
[<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=float32)>]
Args:
structure: an arbitrarily nested structure. Note, numpy arrays are
considered atoms and are not flattened.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
if structure is None:
return [None]
expand_composites = bool(expand_composites)
return _pywrap_utils.Flatten(structure, expand_composites)
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_utils.SameNamedtuples
class _DotString(object):
__slots__ = []
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
@tf_export("nest.assert_same_structure")
def assert_same_structure(nest1, nest2, check_types=True,
expand_composites=False):
"""Asserts that two structures are nested in the same way.
Note that namedtuples with identical name and fields are always considered
to have the same shallow structure (even with `check_types=True`).
For instance, this code will print `True`:
```python
def nt(a, b):
return collections.namedtuple('foo', 'a b')(a, b)
print(assert_same_structure(nt(0, 1), nt(2, 3)))
```
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from trackable dependency tracking to compare
equal).
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
# Convert to bool explicitly as otherwise pybind will not be able# to handle
# type mismatch message correctly. See GitHub issue 42329 for details.
check_types = bool(check_types)
expand_composites = bool(expand_composites)
try:
_pywrap_utils.AssertSameStructure(nest1, nest2, check_types,
expand_composites)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
"""
if not isinstance(dictionary, (dict, _collections_abc.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index, is_seq, sequence_fn=None):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_seq: Function used to test if a value should be treated as a sequence.
sequence_fn: Function used to generate a new sequence instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
sequence_fn = sequence_fn or _sequence_like
for s in _yield_value(structure):
if is_seq(s):
new_index, child = _packed_nest_with_indices(s, flat, index, is_seq,
sequence_fn)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def _pack_sequence_as(structure, flat_sequence, expand_composites,
sequence_fn=None):
"""Implements sequence packing, with the option to alter the structure."""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
sequence_fn = sequence_fn or _sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_seq(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a sequence, but found "
"incompatible type `{}` instead."
.format(truncate(flat_sequence, 100), type(flat_sequence)))
if not is_seq(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"structure is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure), truncate(structure, 100), type(flat_sequence),
len(flat_sequence), truncate(flat_sequence, 100)))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence,
0, is_seq, sequence_fn)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return sequence_fn(structure, packed)
@tf_export("nest.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence, expand_composites=False):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
return _pack_sequence_as(structure, flat_sequence, expand_composites)
@tf_export("nest.map_structure")
def map_structure(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Examples:
1. A single Python dict:
>>> a = {"hello": 24, "world": 76}
>>> tf.nest.map_structure(lambda p: p * 2, a)
{'hello': 48, 'world': 152}
2. Multiple Python dictionaries:
>>> d1 = {"hello": 24, "world": 76}
>>> d2 = {"hello": 36, "world": 14}
>>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2)
{'hello': 60, 'world': 90}
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or dict or list of constructed scalars and/or
other tuples/lists, or scalars. Note: numpy arrays are considered as
scalars.
**kwargs: Valid keyword args are:
* `check_types`: If set to `True` (default) the types of
iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
* `expand_composites`: If set to `True`, then composite tensors such
as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into
their component tensors. If `False` (the default), then composite
tensors are not expanded.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
if kwargs:
raise ValueError(
"Only valid keyword arguments are `check_types` and "
"`expand_composites`, not: `%s`" % ("`, `".join(kwargs.keys())))
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types,
expand_composites=expand_composites)
flat_structure = (flatten(s, expand_composites) for s in structure)
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries],
expand_composites=expand_composites)
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results with the same structure layout. Special kwarg
`check_types` determines whether the types of iterables within the structure
must be the same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
def wrapper_func(tuple_path, *inputs, **kwargs):
string_path = "/".join(str(s) for s in tuple_path)
return func(string_path, *inputs, **kwargs)
return map_structure_with_tuple_paths_up_to(structure[0],
wrapper_func,
*structure,
**kwargs)
def map_structure_with_tuple_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry
in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary
keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the
common path to x[i] in the structures. All structures in `structure` must have
the same arity, and the return value will contain the results in the same
structure. Special kwarg `check_types` determines whether the types of
iterables within the structure must be the same-- see **kwargs definition
below.
Args:
func: A callable with the signature `func(tuple_path, *values, **kwargs)`
that is evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
return map_structure_with_tuple_paths_up_to(structure[0],
func,
*structure,
**kwargs)
def _yield_flat_up_to(shallow_tree, input_tree, is_seq, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_seq: Function used to test if a value should be treated as a sequence.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_seq(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_yield_sorted_items(input_tree))
for shallow_key, shallow_subtree in _yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _yield_flat_up_to(shallow_subtree,
input_subtree, is_seq,
path=subpath):
yield (leaf_path, leaf_value)
def assert_shallow_structure(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if is_seq(shallow_tree):
if not is_seq(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if isinstance(shallow_tree, _wrapt.ObjectProxy):
shallow_type = type(shallow_tree.__wrapped__)
else:
shallow_type = type(shallow_tree)
if check_types and not isinstance(input_tree, shallow_type):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
elif ((_is_composite_tensor(shallow_tree) or
_is_composite_tensor(input_tree)) and
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
pass # Compatibility will be checked below.
elif not (isinstance(shallow_tree, _collections_abc.Mapping) and
isinstance(input_tree, _collections_abc.Mapping)):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) and
(_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree))):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
type_spec_1 = (shallow_tree if _is_type_spec(shallow_tree) else
shallow_tree._type_spec) # pylint: disable=protected-access
type_spec_2 = (input_tree if _is_type_spec(input_tree) else
input_tree._type_spec) # pylint: disable=protected-access
try:
_ = type_spec_1.most_specific_compatible_type(type_spec_2)
except (TypeError, ValueError) as e:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s -- %s" %
(type_spec_1, type_spec_2, e))
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError("If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree))
else:
if len(input_tree) != len(shallow_tree):
raise ValueError(
_STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)))
elif len(input_tree) < len(shallow_tree):
raise ValueError(
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)))
if isinstance(shallow_tree, _collections_abc.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
.format(sorted(absent_keys)))
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types,
expand_composites=expand_composites)
def flatten_up_to(shallow_tree, input_tree, check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Discard paths returned by _yield_flat_up_to.
return [v for _, v in _yield_flat_up_to(shallow_tree, input_tree, is_seq)]
def flatten_with_tuple_paths_up_to(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flattened output.
Returns a list of (path, value) pairs, where value a leaf node in the
flattened tree, and path is the tuple path of that leaf in input_tree.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[((), input_tree)]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree,
shallow_tree)
# Output is:
# [((0, 0), [2, 2]),
# ((0, 1), [3, 3]),
# ((1, 0), [4, 9]),
# ((1, 1), [5, 5])]
#
# [((0, 0), True),
# ((0, 1), True),
# ((1, 0), False),
# ((1, 1), True)]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [((0, 0), ('a', 1)),
# ((0, 1, 0), ('b', 2)),
# ((0, 1, 1, 0), ('c', 3)),
# ((0, 1, 1, 1), ('d', 4))]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0]
flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]]
flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError
flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2])
# Output: [((0,) 0), ((1,), 1), ((2,), 2)]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
return list(_yield_flat_up_to(shallow_tree, input_tree, is_seq))
def map_structure_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
return map_structure_with_tuple_paths_up_to(
shallow_tree,
lambda _, *values: func(*values), # Discards the path arg.
*inputs,
**kwargs)
def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
Like map_structure_up_to(), except that the 'func' argument takes a path
tuple as its first argument, followed by the corresponding values from
*inputs.
Example:
```python
lowercase = {'a': 'a', 'b': ('b0', 'b1')}
uppercase = {'a': 'A', 'b': ('B0', 'B1')}
def print_path_and_values(path, *values):
print("path: {}, values: {}".format(path, values))
shallow_tree = {'a': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase)
path: ('a',), values: ('a', 'A')
path: ('b', 0), values: ('b0', 'B0')
path: ('b', 1), values: ('b1', 'B1')
shallow_tree = {'b': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1'))
shallow_tree = {'a': None, 'b': {1: None}}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('a',), values: ('a', 'A')
path: ('b', 1), values: ('b1', B1')
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable that takes args (path, inputs_0_value, ... , inputs_N_value),
where path is a tuple path to a leaf node in shallow_tree, and
inputs_i_value is the corresponding value from inputs[i].
*inputs: nested structures that are all structurally compatible with
shallow_tree.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
Result of repeatedly applying `func`. Has the same structure layout as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for input_tree in inputs:
assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
flat_value_gen = (
flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites) for input_tree in inputs)
flat_path_gen = (
path for path, _ in _yield_flat_up_to(shallow_tree, inputs[0], is_seq))
results = [
func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)
]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results,
expand_composites=expand_composites)
def get_traverse_shallow_structure(traverse_fn, structure,
expand_composites=False):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
to_traverse = traverse_fn(structure)
if not is_seq(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch,
expand_composites=expand_composites))
elif not is_seq(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure,
expand_composites=expand_composites)
for t, branch in zip(_yield_value(to_traverse),
_yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
def yield_flat_paths(nest, expand_composites=False):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
nest.flatten(value)
[3, 23, 42]
list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for k, _ in _yield_flat_up_to(nest, nest, is_seq):
yield k
def flatten_with_joined_string_paths(structure, separator="/",
expand_composites=False):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = (stringify_and_join(path) for path in flat_paths)
return list(zip(flat_string_paths,
flatten(structure, expand_composites=expand_composites)))
def flatten_with_tuple_paths(structure, expand_composites=False):
"""Returns a list of `(tuple_path, leaf_element)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`leaf_element` within `structure`.
"""
return list(zip(yield_flat_paths(structure,
expand_composites=expand_composites),
flatten(structure, expand_composites=expand_composites)))
def list_to_tuple(structure):
"""Replace all lists with tuples.
The fork of nest that tf.data uses treats lists as single elements, while
tf.nest treats them as structures to recurse into. Keras has chosen to adopt
the latter convention, and must therefore deeply replace all lists with tuples
before passing structures to Dataset.from_generator.
Args:
structure: A nested structure to be remapped.
Returns:
structure mapped to replace all lists with tuples.
"""
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return _sequence_like(instance, args)
return _pack_sequence_as(structure, flatten(structure), False,
sequence_fn=sequence_fn)
_pywrap_utils.RegisterType("Mapping", _collections_abc.Mapping)
_pywrap_utils.RegisterType("MutableMapping", _collections_abc.MutableMapping)
_pywrap_utils.RegisterType("Sequence", _collections_abc.Sequence)
_pywrap_utils.RegisterType("MappingView", _collections_abc.MappingView)
_pywrap_utils.RegisterType("ObjectProxy", _wrapt.ObjectProxy)
|
the-stack_0_14104 | import os
print("Starting Capsian Setup Tool...")
print("This script will install all the dependencies you need")
input("Press enter to continue or close to terminate ")
_pip_type = "pip"
if os.name == "posix":
_pip_type = "pip3"
os.system(_pip_type + " install pyglet==1.5.6")
os.system(_pip_type + " install PyOpenGL")
os.system(_pip_type + " install pyinstaller")
input("Installation complete!\nPress enter to exit ")
|
the-stack_0_14105 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import functools
import importlib
import json
import operator
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from contextlib import contextmanager
from functools import partial, reduce
from subprocess import PIPE, Popen
from typing import Callable, Optional, Tuple
from urllib.error import ContentTooShortError, HTTPError
import numpy as np
import torch
import torch.distributed as dist
from monai.apps.utils import download_url
from monai.config import NdarrayTensor
from monai.config.deviceconfig import USE_COMPILED
from monai.config.type_definitions import NdarrayOrTensor
from monai.data import create_test_image_2d, create_test_image_3d
from monai.networks import convert_to_torchscript
from monai.utils import optional_import
from monai.utils.module import pytorch_after, version_leq
from monai.utils.type_conversion import convert_data_type
nib, _ = optional_import("nibabel")
quick_test_var = "QUICKTEST"
_tf32_enabled = None
_test_data_config: dict = {}
def testing_data_config(*keys):
"""get _test_data_config[keys0][keys1]...[keysN]"""
if not _test_data_config:
with open(os.path.join(os.path.dirname(__file__), "testing_data", "data_config.json")) as c:
_config = json.load(c)
for k, v in _config.items():
_test_data_config[k] = v
return reduce(operator.getitem, keys, _test_data_config)
def clone(data: NdarrayTensor) -> NdarrayTensor:
"""
Clone data independent of type.
Args:
data (NdarrayTensor): This can be a Pytorch Tensor or numpy array.
Returns:
Any: Cloned data object
"""
return copy.deepcopy(data)
def assert_allclose(
actual: NdarrayOrTensor,
desired: NdarrayOrTensor,
type_test: bool = True,
device_test: bool = False,
*args,
**kwargs,
):
"""
Assert that types and all values of two data objects are close.
Args:
actual: Pytorch Tensor or numpy array for comparison.
desired: Pytorch Tensor or numpy array to compare against.
type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
device_test: whether to test the device property.
args: extra arguments to pass on to `np.testing.assert_allclose`.
kwargs: extra arguments to pass on to `np.testing.assert_allclose`.
"""
if type_test:
# check both actual and desired are of the same type
np.testing.assert_equal(isinstance(actual, np.ndarray), isinstance(desired, np.ndarray), "numpy type")
np.testing.assert_equal(isinstance(actual, torch.Tensor), isinstance(desired, torch.Tensor), "torch type")
if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
if device_test:
np.testing.assert_equal(str(actual.device), str(desired.device), "torch device check") # type: ignore
actual = actual.detach().cpu().numpy() if isinstance(actual, torch.Tensor) else actual
desired = desired.detach().cpu().numpy() if isinstance(desired, torch.Tensor) else desired
np.testing.assert_allclose(actual, desired, *args, **kwargs)
@contextmanager
def skip_if_downloading_fails():
try:
yield
except (ContentTooShortError, HTTPError, ConnectionError) as e:
raise unittest.SkipTest(f"error while downloading: {e}") from e
except RuntimeError as rt_e:
if "unexpected EOF" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e # incomplete download
if "network issue" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "gdown dependency" in str(rt_e): # no gdown installed
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "md5 check" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
raise rt_e
def test_pretrained_networks(network, input_param, device):
with skip_if_downloading_fails():
return network(**input_param).to(device)
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def is_tf32_env():
"""
The environment variable NVIDIA_TF32_OVERRIDE=0 will override any defaults
or programmatic configuration of NVIDIA libraries, and consequently,
cuBLAS will not accelerate FP32 computations with TF32 tensor cores.
"""
global _tf32_enabled
if _tf32_enabled is None:
_tf32_enabled = False
if (
torch.cuda.is_available()
and not version_leq(f"{torch.version.cuda}", "10.100")
and os.environ.get("NVIDIA_TF32_OVERRIDE", "1") != "0"
and torch.cuda.device_count() > 0 # at least 11.0
):
try:
# with TF32 enabled, the speed is ~8x faster, but the precision has ~2 digits less in the result
g_gpu = torch.Generator(device="cuda")
g_gpu.manual_seed(2147483647)
a_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
b_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
_tf32_enabled = (a_full.float() @ b_full.float() - a_full @ b_full).abs().max().item() > 0.001 # 0.1713
except BaseException:
pass
print(f"tf32 enabled: {_tf32_enabled}")
return _tf32_enabled
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
self.version_too_old = not pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than or equal to that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
self.version_too_new = pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def is_main_test_process():
ps = torch.multiprocessing.current_process()
if not ps or not hasattr(ps, "name"):
return False
return ps.name.startswith("Main")
def has_cupy():
"""
Returns True if the user has installed a version of cupy.
"""
cp, has_cp = optional_import("cupy")
if not is_main_test_process():
return has_cp # skip the check if we are running in subprocess
if not has_cp:
return False
try: # test cupy installation with a basic example
x = cp.arange(6, dtype="f").reshape(2, 3)
y = cp.arange(3, dtype="f")
kernel = cp.ElementwiseKernel(
"float32 x, float32 y", "float32 z", """ if (x - 2 > y) { z = x * y; } else { z = x + y; } """, "my_kernel"
)
flag = kernel(x, y)[0, 0] == 0
del x, y, kernel
cp.get_default_memory_pool().free_all_blocks()
return flag
except Exception:
return False
HAS_CUPY = has_cupy()
def make_nifti_image(array: NdarrayOrTensor, affine=None, dir=None, fname=None, suffix=".nii.gz", verbose=False):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if isinstance(array, torch.Tensor):
array, *_ = convert_data_type(array, np.ndarray)
if isinstance(affine, torch.Tensor):
affine, *_ = convert_data_type(affine, np.ndarray)
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
# if dir not given, create random. Else, make sure it exists.
if dir is None:
dir = tempfile.mkdtemp()
else:
os.makedirs(dir, exist_ok=True)
# If fname not given, get random one. Else, concat dir, fname and suffix.
if fname is None:
temp_f, fname = tempfile.mkstemp(suffix=suffix, dir=dir)
os.close(temp_f)
else:
fname = os.path.join(dir, fname + suffix)
nib.save(test_image, fname)
if verbose:
print(f"File written: {fname}.")
return fname
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random.random.__self__ if random_state is None else random_state # type: ignore
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
torch.cuda.set_device(int(local_rank)) # using device ids from CUDA_VISIBILE_DEVICES
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
processes = []
results = tmp.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = tmp.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results), daemon=self.daemon
)
p.start()
processes.append(p)
for p in processes:
p.join()
assert results.get(), "Distributed call failed."
_del_original_func(obj)
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = tmp.Queue()
p = tmp.Process(target=TimedCall.run_process, args=(func, args, kwargs, results), daemon=self.daemon)
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
_del_original_func(obj)
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
_original_funcs[obj.__name__] = obj
def _del_original_func(obj):
"""pop the original function from cache."""
_original_funcs.pop(obj.__name__, None)
if torch.cuda.is_available(): # clean up the cached function
torch.cuda.synchronize()
torch.cuda.empty_cache()
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(
self.im_shape[0], self.im_shape[1], num_objs=4, rad_max=20, noise_max=0.0, num_seg_classes=self.num_classes
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(
self.im_shape[0],
self.im_shape[1],
self.im_shape[2],
num_objs=4,
rad_max=20,
noise_max=0.0,
num_seg_classes=self.num_classes,
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, device=None, rtol=1e-4, atol=0.0):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned.
The forward pass for both is done without gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
# TODO: would be nice to use GPU if available, but it currently causes CI failures.
device = "cpu"
with tempfile.TemporaryDirectory() as tempdir:
convert_to_torchscript(
model=net,
filename_or_obj=os.path.join(tempdir, "model.ts"),
verify=True,
inputs=inputs,
device=device,
rtol=rtol,
atol=atol,
)
def download_url_or_skip_test(*args, **kwargs):
"""``download_url`` and skip the tests if any downloading error occurs."""
with skip_if_downloading_fails():
download_url(*args, **kwargs)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids using the `nvidia-smi` command.
"""
bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
free_memory[1] += free_memory[0] # combine 0/1 column measures
ids = np.lexsort(free_memory)[:n]
except (TypeError, IndexError, OSError):
ids = range(n) if isinstance(n, int) else []
return ",".join(f"{int(x)}" for x in ids)
TEST_NDARRAYS: Tuple[Callable] = (np.array, torch.as_tensor) # type: ignore
if torch.cuda.is_available():
gpu_tensor: Callable = partial(torch.as_tensor, device="cuda")
TEST_NDARRAYS = TEST_NDARRAYS + (gpu_tensor,) # type: ignore
TEST_DEVICES = [[torch.device("cpu")]]
if torch.cuda.is_available():
TEST_DEVICES.append([torch.device("cuda")])
if __name__ == "__main__":
print(query_memory())
|
the-stack_0_14106 | '''
test_dnssec_nsecx - Tests NSECx support routines.
.. Copyright (c) 2015 Neustar, Inc. All rights reserved.
.. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions.
'''
# pylint: skip-file
import dns.rdatatype
import dns_sprockets_lib.dnssec_nsecx as nsecx
def test_encode_salt():
tests = [
(None, None),
(1, None),
(b'', ''),
(b'1', '31'),
(b'a', '61'),
(b'Testing', '54657374696e67')]
for test in tests:
print(test)
assert nsecx.encode_salt(test[0]) == test[1]
def test_decode_salt():
tests = [
(None, None),
(1, None),
('', b''),
('1', None),
('31', b'1'),
('54657374696e67', b'Testing'),
('54657374696E67', b'Testing')]
for test in tests:
print(test)
assert nsecx.decode_salt(test[0]) == test[1]
def test_hash_nsec3_name():
tests = [
(None, '7f1962f2', 1, 15, None),
(1, '7f1962f2', 1, 15, None),
('', '7f1962f2', 1, 15, 'lsa969sfkmlb6c92ea510pohd54douqu'),
('.', '7f1962f2', 1, 15, 'lsa969sfkmlb6c92ea510pohd54douqu'),
('001.cst.net.', '7f1962f2', 1, 15, 'uqml1am96tftfmlkagtbs82isr050sh0'),
('001.cst.net.', '7F1962F2', 1, 15, 'uqml1am96tftfmlkagtbs82isr050sh0'),
('001.001.cst.net.', '7F1962F2', 1, 15, '06es9cggdrorfdd4ns9ahocaikldrrp8'),
('test.001.cst.net.', '7F1962F2', 1, 15, 'kqgpu8i0ai43nem212bd0079j5si5r3k'),
('test2.001.cst.net.', '7F1962F2', 1, 15, 'al016abkh6lvdig6503fs92kdmotqh4v'),
('example', 'aabbccdd', 1, 12, '0p9mhaveqvm6t7vbl5lop2u3t2rp3tom'),
('a.example', 'aabbccdd', 1, 12, '35mthgpgcu1qg68fab165klnsnk3dpvl'),
('ai.example', 'aabbccdd', 1, 12, 'gjeqe526plbf1g8mklp59enfd789njgi'),
('ns1.example', 'aabbccdd', 1, 12, '2t7b4g4vsa5smi47k61mv5bv1a22bojr'),
('ns2.example', 'aabbccdd', 1, 12, 'q04jkcevqvmu85r014c7dkba38o0ji5r'),
('w.example', 'aabbccdd', 1, 12, 'k8udemvp1j2f7eg6jebps17vp3n8i58h'),
('*.w.example', 'aabbccdd', 1, 12, 'r53bq7cc2uvmubfu5ocmm6pers9tk9en'),
('x.w.example', 'aabbccdd', 1, 12, 'b4um86eghhds6nea196smvmlo4ors995'),
('y.w.example', 'aabbccdd', 1, 12, 'ji6neoaepv8b5o6k4ev33abha8ht9fgc'),
('x.y.w.example', 'aabbccdd', 1, 12, '2vptu5timamqttgl4luu9kg21e0aor3s'),
('xx.example', 'aabbccdd', 1, 12, 't644ebqk9bibcna874givr6joj62mlhv'),
('2t7b4g4vsa5smi47k61mv5bv1a22bojr.example', 'aabbccdd', 1, 12,
'kohar7mbb8dc2ce8a9qvl8hon4k53uhi')]
for test in tests:
print(test)
assert nsecx.hash_nsec3_name(test[0], test[1], test[2], test[3], False) == test[4]
def test__windows_covers():
tests = [
([(0, None)], dns.rdatatype.A, False),
([(0, bytearray(b'\x00'))], dns.rdatatype.A, False),
([(0, bytearray(b'\x40'))], dns.rdatatype.A, True),
([(0, bytearray(b'\x40'))], dns.rdatatype.NS, False),
([(1, bytearray(b'\x40'))], dns.rdatatype.A, False),
([(1, bytearray(b'\x40'))], dns.rdatatype.CAA, True),
([(0, bytearray(b'\x00\x08'))], dns.rdatatype.PTR, True),
([(0, bytearray(
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08'))],
dns.rdatatype.AXFR, True)]
for test in tests:
print(test)
assert nsecx._windows_covers(test[0], test[1]) == test[2]
def test__windows_get_covered_types():
tests = [
([(0, None)], []),
([(0, bytearray(b'\x00'))], []),
([(0, bytearray(b'\x40'))], [dns.rdatatype.A]),
([(0, bytearray(b'\x60'))], [dns.rdatatype.A, dns.rdatatype.NS]),
([(0, bytearray(b'\x64'))], [
dns.rdatatype.A, dns.rdatatype.NS, dns.rdatatype.CNAME]),
([(1, bytearray(b'\x40'))], [dns.rdatatype.CAA]),
([(0, bytearray(b'\x40')),
(1, bytearray(b'\x40'))], [dns.rdatatype.A, dns.rdatatype.CAA]),
([(0, bytearray(b'\x40\x08')),
(1, bytearray(b'\x40'))], [
dns.rdatatype.A, dns.rdatatype.CAA, dns.rdatatype.PTR])]
for test in tests:
print(test)
assert sorted(nsecx._windows_get_covered_types(test[0])) == sorted(test[1])
# end of file
|
the-stack_0_14107 | from setuptools import setup
version = '0.0.0'
setup(
name = 'grid-plot',
version = version,
description = 'Plots data onto a grid.',
url = 'http://github.com/doggan/grid-plot',
license = 'MIT',
author='Shyam Guthikonda',
packages = ['grid_plot'],
install_requires = [
'Pillow == 2.7.0',
],
entry_points = """
[console_scripts]
grid-plot = grid_plot.command_line:main
"""
)
|
the-stack_0_14108 | from datetime import datetime
import numpy as np
import csv
from utils import total_gini
import tensorflow.compat.v1 as tf
import json
from pgd_attack import LinfPGDAttack
from utils_MLP_model import init_MLP_vars
with open('config.json') as config_file:
config = json.load(config_file)
w_vars, b_vars, stable_var, sparse_vars = init_MLP_vars()
def print_metrics(sess, model, train_dict, nat_dict, val_dict, val_dict_distil, test_dict, ii, args, summary_writer, dict_exp, experiment, global_step):
print('Step {}: ({})'.format(ii, datetime.now()))
try:
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
print(' batch training nat accuracy {:.4}'.format(nat_acc * 100))
nat_xent = sess.run(model.xent, feed_dict=nat_dict)
print(' Nat Xent {:.4}'.format(nat_xent))
stable_xent = sess.run(model.stable_xent, feed_dict=nat_dict)
robust_xent = sess.run(model.robust_xent, feed_dict=nat_dict)
robust_stable_xent = sess.run(model.robust_stable_xent, feed_dict=nat_dict)
train_l2 = sess.run(model.l2_loss, feed_dict=nat_dict)
print(' Batch Training L2 Loss {:.4}'.format(train_l2))
except:
train_distil_loss = sess.run(model.distil_loss, feed_dict=nat_dict)
print(' Batch Training Distillation L2 Teacher Student Loss {:.4}'.format(train_distil_loss))
train_normal_acc = sess.run(model.accuracy, feed_dict=train_dict)
print(' Training accuracy {:.4}'.format(train_normal_acc * 100))
train_l2 = sess.run(model.l2_loss, feed_dict=train_dict)
print(' Training L2 Loss Ground Truth {:.4}'.format(train_l2))
summary3 = tf.Summary(value=[tf.Summary.Value(tag='TrainL2', simple_value=train_l2), ])
val_l2 = sess.run(model.l2_loss, feed_dict=val_dict)
print(' Val L2 Loss Ground Truth {:.4}'.format(val_l2))
test_l2 = sess.run(model.l2_loss, feed_dict=test_dict)
print(' Test L2 Loss Ground Truth {:.4}'.format(test_l2))
val_acc = sess.run(model.accuracy, feed_dict=val_dict)
print(' validation nat accuracy {:.4}'.format(val_acc * 100))
if args.n_distillations > 1:
train_l2 = sess.run(model.distil_loss, feed_dict=nat_dict)
print(' Training L2 Loss vs Teacher {:.4}'.format(train_l2))
val_distil_loss = sess.run(model.distil_loss, feed_dict=val_dict_distil)
print(' Validation L2 Loss Ground Truth {:.4}'.format(val_distil_loss))
summary7 = tf.Summary(value=[tf.Summary.Value(tag='ValTeacherL2', simple_value=val_distil_loss), ])
summary_writer.add_summary(summary7, global_step.eval(sess))
# summary1 = tf.Summary(value=[tf.Summary.Value(tag='TrainAcc', simple_value=train_normal_acc),])
summary2 = tf.Summary(value=[tf.Summary.Value(tag='ValAcc', simple_value=val_acc),])
summary4 = tf.Summary(value=[tf.Summary.Value(tag='ValL2', simple_value=val_l2), ])
summary6 = tf.Summary(value=[tf.Summary.Value(tag='TrainTeacherL2', simple_value=train_l2), ])
# summary_writer.add_summary(summary1, global_step.eval(sess))
summary_writer.add_summary(summary2, global_step.eval(sess))
# summary_writer.add_summary(summary3, global_step.eval(sess))
summary_writer.add_summary(summary4, global_step.eval(sess))
summary_writer.add_summary(summary6, global_step.eval(sess))
#summary_writer.add_text('args', str(args), global_step.eval(sess))
# summary5 = sess.run(model.summary, feed_dict=test_dict)
# summary_writer.add_summary(summary5, global_step.eval(sess))
test_acc = sess.run(model.accuracy, feed_dict=test_dict)
print(' Test accuracy {:.4}'.format(test_acc * 100))
# summary_writer.add_summary(test_acc, global_step.eval(sess))
if args.is_stable:
stable_var = sess.run(getattr(model, config['stability_variable']), feed_dict=nat_dict)
print(' Stability Variable {:.4}'.format(stable_var ))
print(' Stable Xent {:.4}'.format(stable_xent))
print(' Stable Xent {:.4}'.format(stable_xent))
if args.rho > 0 :
print(' Robust Xent {:.4}'.format(robust_xent))
if args.is_stable:
print(' Robust Stable Xent {:.4}'.format(robust_stable_xent))
for i in range(len(w_vars)):
if args.l0 > 0:
print(' Killed neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_neurons'][experiment])
print(' Killed input neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_input_features'][experiment])
print(' Non zero features percentage - ' + w_vars[i] , dict_exp[w_vars[i] + '_nonzero'][experiment])
regularizer = sess.run(model.regularizer, feed_dict=nat_dict)
print(' Regularizer', regularizer)
# try:
# summary = tf.Summary(value=[
# tf.Summary.Value(tag='Train Xent', simple_value= nat_xent),
# # tf.Summary.Value(tag='Val Acc', simple_value= val_acc),
# tf.Summary.Value(tag='Train Acc', simple_value= nat_acc),
# tf.Summary.Value(tag='Train Stable Xent', simple_value= stable_xent),
# tf.Summary.Value(tag='Train Robust Stable Xent', simple_value= robust_stable_xent),
# tf.Summary.Value(tag='Test Acc', simple_value= test_acc)])
# except:
# pass
for i in range(len(w_vars)):
if args.l0 > 0:
summary_sparse = tf.Summary(value=[
tf.Summary.Value(tag=w_vars[i] + '_killed_neurons', simple_value=dict_exp[w_vars[i] + '_killed_neurons'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_killed_inputs', simple_value=dict_exp[w_vars[i] + '_killed_input_features'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_nonzero', simple_value=dict_exp[w_vars[i] + '_nonzero'][experiment])])
summary_writer.add_summary(summary_sparse, global_step.eval(sess))
def update_dict_output(dict_exp, experiment, sess, test_acc, model, test_dict, num_iters):
dict_exp['test_accs'][experiment] = test_acc*100
dict_exp['iterations'][experiment] = num_iters
return dict_exp
def update_adv_acc(args, best_model, x_test, y_test, experiment, dict_exp):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
clip = True
if "uci" in args.data_set:
clip = False
for rho_test in args.robust_test:
attack = LinfPGDAttack(best_model, rho_test, config['k'], config['a'],
config['random_start'], config['loss_func'], clip)
x_test_adv = attack.perturb(x_test, y_test, sess)
adv_dict = {best_model.x_input: x_test_adv, best_model.y_input: y_test}
dict_exp['adv_test_accs'][rho_test][experiment] = sess.run(best_model.accuracy, feed_dict=adv_dict)
def print_stability_measures(dict_exp, args, num_experiments, batch_size, subset_ratio, tot_test_acc, tot_train_acc, max_train_steps, network_path):
avg_test_acc = tot_test_acc / num_experiments
avg_train_acc = tot_train_acc / num_experiments
std = np.array([float(k) for k in dict_exp['test_accs']]).std()
logit_stability = np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)
gini_stability = total_gini(dict_exp['preds'].transpose())
print(' Average training accuracy {:.4}'.format(avg_train_acc * 100))
print(' Average testing accuracy {:.4}'.format(avg_test_acc * 100))
print(' Individual accuracies: \n', dict_exp['test_accs'])
print(' Adv testing accuracies', dict_exp['adv_test_accs'])
print(' Stability values', dict_exp[stable_var])
print(' Test Accuracy std {:.2}'.format(np.array([float(k) for k in dict_exp['test_accs']]).std()))
print(" Logits std", np.mean(np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)))
print(" Gini stability", gini_stability)
weights_stability = print_layer_stability_ff(dict_exp, num_experiments)
weights_nonzero = [np.mean(dict_exp[w_vars[i]]) for i in range(len(w_vars))]
for i in range(len(w_vars)):
print(w_vars[i] + ' non zero percentage', weights_nonzero[i])
file = open(str('results_' + network_path + args.data_set + '.csv'), 'a+', newline='')
file_read = open(str('results_' + network_path + args.data_set + '.csv'), "r")
one_char = file_read.read(1)
writer = csv.writer(file)
if not len(one_char):
headers = []
headers += ['num_experiments', 'batch_size', 'subset_ratio', 'max_train_steps']
headers += ['test accuracy '+ str(i) for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
headers += ['Avg '+str(key)]
headers += ['Avg test adversarial acc for rho = '+ str(rho) for rho in args.robust_test]
headers += ['is_stable', 'rho', 'train_size', 'l2', 'l0', 'network_size', 'learning rate']
headers += [w_vars[i] + ' Nonzero weights' for i in range(len(w_vars))]
headers += [w_vars[i] + ' Stability' for i in range(len(w_vars))]
headers += ['std', 'logit_stability', 'gini_stability' ]
writer.writerow(headers)
with file:
cols = []
cols += [num_experiments, batch_size, subset_ratio, max_train_steps]
cols += [dict_exp['test_accs'][i] for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
cols += [np.mean(dict_exp[key])]
cols += [np.mean(dict_exp['adv_test_accs'][rho]) for rho in args.robust_test]
cols += [args.is_stable, args.rho, args.train_size, args.l2, args.l0, args.network_size, args.lr]
cols += weights_nonzero
cols += weights_stability
cols += [std, logit_stability, gini_stability ]
print(cols)
writer.writerow(cols)
def print_layer_stability_ff(dict_exp, num_experiments):
stabilities = []
for i in range(len(w_vars)):
w_i = [dict_exp[w_vars[i]][experiment].reshape(-1) for experiment in range(num_experiments)]
w_stability = np.mean(np.std(w_i, axis=0), axis=0)
print(w_vars[i] + " std", w_stability)
stabilities = stabilities + [w_stability]
return stabilities
|
the-stack_0_14113 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None, limit=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query, limit=limit)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching each
tenant by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = OrderedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
('hardware.ipmi.node.temperature', {
'label': '',
'description': _("System Temperature Reading"),
}),
('hardware.ipmi.node.outlet_temperature', {
'label': '',
'description': _("System Outlet Temperature Reading"),
}),
('hardware.ipmi.node.airflow', {
'label': '',
'description': _("System Airflow Reading"),
}),
('hardware.ipmi.node.cups', {
'label': '',
'description': _("System CUPS Reading"),
}),
('hardware.ipmi.node.cpu_util', {
'label': '',
'description': _("System CPU Utility Reading"),
}),
('hardware.ipmi.node.mem_util', {
'label': '',
'description': _("System Memory Utility Reading"),
}),
('hardware.ipmi.node.io_util', {
'label': '',
'description': _("System IO Utility Reading"),
}),
])
|
the-stack_0_14114 | import datetime
import logging
import os
from itertools import groupby
from math import ceil
from django.db.models import Max
from django.db.models import Sum
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import exists
from sqlalchemy import false
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from .paths import get_content_file_name
from .paths import get_content_storage_file_path
from .sqlalchemybridge import Bridge
from .sqlalchemybridge import filter_by_uuids
from kolibri.core.content.apps import KolibriContentConfig
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.sqlalchemybridge import filter_by_checksums
from kolibri.core.device.models import ContentCacheKey
logger = logging.getLogger(__name__)
CONTENT_APP_NAME = KolibriContentConfig.label
CHUNKSIZE = 10000
def _generate_MPTT_descendants_statement(mptt_values, ContentNodeTable):
"""
This logic is modified from:
https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L137
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
queries = []
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
return queries
def _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# Setup list to collect queries
or_queries = []
# First we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_results = connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall()
non_topic_node_ids = [result[0] for result in non_topic_results]
# If we have any node ids that are for non-topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Now get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
# Query modified from:
# https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L123
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Add constraints specific to our requirements, in terms of batching:
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# And topics:
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Extend the constraints we are filtering by with ones generated from the relevant
# MPTT values we have queried above.
or_queries.extend(
_generate_MPTT_descendants_statement(mptt_values, ContentNodeTable)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(false())
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
def _create_batch_update_statement(
bridge, channel_id, min_boundary, max_boundary, node_ids, exclude_node_ids
):
ContentNodeTable = bridge.get_table(ContentNode)
# Restrict the update statement to nodes falling within the boundaries
batch_statement = ContentNodeTable.update().where(
and_(
# Only update leaf nodes (non topics)
ContentNodeTable.c.kind != content_kinds.TOPIC,
# Only update nodes in the channel we specified
ContentNodeTable.c.channel_id == channel_id,
# Only select nodes inside the boundary conditions
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
)
)
if node_ids is not None:
# Construct a statement that restricts which nodes we update
# in this batch by the specified inclusion constraints
node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
ContentNodeTable.c.id.in_(node_ids_statement)
)
if exclude_node_ids is not None:
# Construct a statement that restricts nodes we update
# in this batch by the specified exclusion constraints
exclude_node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, exclude_node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
~ContentNodeTable.c.id.in_(exclude_node_ids_statement)
)
return batch_statement
def _calculate_batch_params(bridge, channel_id, node_ids, exclude_node_ids):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# To chunk the tree, we first find the full extent of the tree - this gives the
# highest rght value for this channel.
max_rght = connection.execute(
select([func.max(ContentNodeTable.c.rght)]).where(
ContentNodeTable.c.channel_id == channel_id
)
).scalar()
# Count the total number of constraints
constraint_count = len(node_ids or []) + len(exclude_node_ids or [])
# Aim for a constraint per batch count of about 250 on average
# This means that there will be at most 750 parameters from the constraints
# and should therefore also limit the overall SQL expression size.
dynamic_chunksize = int(
min(CHUNKSIZE, ceil(250 * max_rght / (constraint_count or 1)))
)
return max_rght, dynamic_chunksize
def set_leaf_nodes_invisible(channel_id, node_ids=None, exclude_node_ids=None):
"""
Set nodes in a channel as unavailable.
With no additional arguments, this will hide an entire channel.
With the additional nodes arguments, it will selectively flag nodes
as unavailable, based on the passed in ids, setting them as unavailable if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
connection = bridge.get_connection()
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Removing availability of non-topic ContentNode objects in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(available=False).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=None, exclude_node_ids=None
):
"""
Set nodes in a channel as available, based on their required files.
With no additional arguments, this will make every node in the channel
available or unavailable based on whether the files needed to render
those nodes are present on disk.
With the additional nodes arguments, it will selectively flag nodes
based on the passed in ids, marking their availability if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
Nodes in the channel not captured by the constraints will not have
their availability changed either way.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
# SQL Alchemy reference to the content node table
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
connection = bridge.get_connection()
# This statement defines the update condition for the contentnode
# running exists on this (as it is used below) will produce either
# True, in the case when the contentnode has the required files
# available for rendering, or False otherwise.
contentnode_statement = (
# We could select any property here, as it's the exist that matters.
select([1]).select_from(
# This does the first step in the many to many lookup for File
# and LocalFile.
FileTable.join(
LocalFileTable,
and_(
# This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
# This only joins on LocalFile objects that we know
# have associated files on disk.
LocalFileTable.c.available == True, # noqa
),
)
)
# Only look at files that are required (not supplementary)
.where(FileTable.c.supplementary == False)
# Correlate between the contentnode id and the foreign key
# to the content node on the file table to complete the
# many to many lookup
.where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
)
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Setting availability of non-topic ContentNode objects based on LocalFile availability in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(
available=exists(contentnode_statement)
).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def mark_local_files_as_unavailable(checksums, destination=None):
mark_local_files_availability(checksums, False, destination=destination)
def mark_local_files_as_available(checksums, destination=None):
"""
Shortcut method to update database if we are sure that the files are available.
Can be used after successful downloads to flag availability without having to do expensive disk reads.
"""
mark_local_files_availability(checksums, True, destination=destination)
def mark_local_files_availability(checksums, availability, destination=None):
if checksums:
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
logger.info(
"Setting availability to {availability} of {number} LocalFile objects based on passed in checksums".format(
number=len(checksums), availability=availability
)
)
connection = bridge.get_connection()
trans = connection.begin()
for i in range(0, len(checksums), CHUNKSIZE):
connection.execute(
LocalFileTable.update()
.where(
filter_by_checksums(
LocalFileTable.c.id, checksums[i : i + CHUNKSIZE]
)
)
.values(available=availability)
)
trans.commit()
bridge.end()
def _check_file_availability(files):
checksums_to_set_available = []
checksums_to_set_unavailable = []
for file in files:
try:
# Update if the file exists, *and* the localfile is set as unavailable.
if os.path.exists(
get_content_storage_file_path(
get_content_file_name({"id": file[0], "extension": file[2]})
)
):
if not file[1]:
checksums_to_set_available.append(file[0])
# Update if the file does not exist, *and* the localfile is set as available.
else:
if file[1]:
checksums_to_set_unavailable.append(file[0])
except InvalidStorageFilenameError:
continue
return checksums_to_set_available, checksums_to_set_unavailable
def set_local_file_availability_from_disk(checksums=None, destination=None):
if type(checksums) == list and len(checksums) > CHUNKSIZE:
for i in range(0, len(checksums), CHUNKSIZE):
set_local_file_availability_from_disk(
checksums=checksums[i : i + CHUNKSIZE], destination=destination
)
return
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
query = select(
[LocalFileTable.c.id, LocalFileTable.c.available, LocalFileTable.c.extension]
)
if checksums is None:
logger.info(
"Setting availability of LocalFile objects based on disk availability"
)
elif type(checksums) == list:
logger.info(
"Setting availability of {number} LocalFile objects based on disk availability".format(
number=len(checksums)
)
)
query = query.where(filter_by_checksums(LocalFileTable.c.id, checksums))
else:
logger.info(
"Setting availability of LocalFile object with checksum {checksum} based on disk availability".format(
checksum=checksums
)
)
query = query.where(LocalFileTable.c.id == checksums)
connection = bridge.get_connection()
files = connection.execute(query).fetchall()
checksums_to_set_available, checksums_to_set_unavailable = _check_file_availability(
files
)
bridge.end()
mark_local_files_as_available(checksums_to_set_available, destination=destination)
mark_local_files_as_unavailable(
checksums_to_set_unavailable, destination=destination
)
def recurse_annotation_up_tree(channel_id):
bridge = Bridge(app_name=CONTENT_APP_NAME)
ContentNodeClass = bridge.get_class(ContentNode)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
node_depth = (
bridge.session.query(func.max(ContentNodeClass.level))
.filter_by(channel_id=channel_id)
.scalar()
)
logger.info(
"Annotating ContentNode objects with children for {levels} levels".format(
levels=node_depth
)
)
child = ContentNodeTable.alias()
# start a transaction
trans = connection.begin()
start = datetime.datetime.now()
# Update all leaf ContentNodes to have num_coach_content to 1 or 0
# Update all leaf ContentNodes to have on_device_resources to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are not topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
.values(
num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
on_device_resources=cast(ContentNodeTable.c.available, Integer()),
)
)
# Before starting set availability to False on all topics.
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
.values(available=False)
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expressions for annotation of coach content
# Expression that will resolve a boolean value for all the available children
# of a content node, whereby if they all have coach_content flagged on them, it will be true,
# but otherwise false.
# Everything after the select statement should be identical to the available_nodes expression above.
if bridge.engine.name == "sqlite":
# Use a min function to simulate an AND.
coach_content_nodes = select([func.min(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
elif bridge.engine.name == "postgresql":
# Use the postgres boolean AND operator
coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of on_device_resources for each child node
# of a contentnode
on_device_num = select([func.sum(child.c.on_device_resources)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
logger.info(
"Annotating ContentNode objects with children for level {level}".format(
level=level
)
)
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(
available=exists(available_nodes),
coach_content=coach_content_nodes,
num_coach_contents=coach_content_num,
on_device_resources=on_device_num,
)
)
# commit the transaction
trans.commit()
elapsed = datetime.datetime.now() - start
logger.debug(
"Recursive topic tree annotation took {} seconds".format(elapsed.seconds)
)
bridge.end()
def calculate_dummy_progress_for_annotation(node_ids, exclude_node_ids, total_progress):
num_annotation_constraints = len(node_ids or []) + len(exclude_node_ids or [])
# Calculate a percentage of the total progress to denote to annotation
# between 1 and 10
annotation_proportion = min(10, max(1, int(num_annotation_constraints / 500)))
# Create some progress proportional to annotation task
return int(annotation_proportion * total_progress / (100 - annotation_proportion))
def propagate_forced_localfile_removal(localfiles_list):
files = File.objects.filter(supplementary=False, local_file__in=localfiles_list)
ContentNode.objects.filter(files__in=files).update(available=False)
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
recurse_annotation_up_tree(channel_id)
def update_content_metadata(
channel_id, node_ids=None, exclude_node_ids=None, public=None
):
set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids
)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id, public=public)
ContentCacheKey.update_cache_key()
def set_content_visibility(
channel_id, checksums, node_ids=None, exclude_node_ids=None, public=None
):
mark_local_files_as_available(checksums)
update_content_metadata(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, public=public
)
def set_content_visibility_from_disk(channel_id):
set_local_file_availability_from_disk()
update_content_metadata(channel_id)
def set_content_invisible(channel_id, node_ids, exclude_node_ids):
set_leaf_nodes_invisible(channel_id, node_ids, exclude_node_ids)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id)
ContentCacheKey.update_cache_key()
def set_channel_metadata_fields(channel_id, public=None):
channel = ChannelMetadata.objects.get(id=channel_id)
calculate_published_size(channel)
calculate_total_resource_count(channel)
calculate_included_languages(channel)
calculate_next_order(channel)
if public is not None:
channel.public = public
channel.save()
def files_for_nodes(nodes):
return LocalFile.objects.filter(files__contentnode__in=nodes)
def total_file_size(files_or_nodes):
if issubclass(files_or_nodes.model, LocalFile):
localfiles = files_or_nodes
elif issubclass(files_or_nodes.model, ContentNode):
localfiles = files_for_nodes(files_or_nodes)
else:
raise TypeError("Expected queryset for LocalFile or ContentNode")
return localfiles.distinct().aggregate(Sum("file_size"))["file_size__sum"] or 0
def calculate_published_size(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.published_size = total_file_size(
files_for_nodes(content_nodes).filter(available=True)
)
channel.save()
def calculate_total_resource_count(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.total_resource_count = (
content_nodes.filter(available=True)
.exclude(kind=content_kinds.TOPIC)
.dedupe_by_content_id()
.count()
)
channel.save()
def calculate_included_languages(channel):
content_nodes = ContentNode.objects.filter(
channel_id=channel.id, available=True
).exclude(lang=None)
languages = content_nodes.order_by("lang").values_list("lang", flat=True).distinct()
channel.included_languages.add(*list(languages))
def calculate_next_order(channel, model=ChannelMetadata):
if channel.order is None or channel.order == 0:
max_order = model.objects.aggregate(Max("order")).get("order__max", 0)
if max_order is None:
max_order = 0
channel.order = max_order + 1
channel.save()
|
the-stack_0_14115 | """ Evo-LeViT in PyTorch
A PyTorch implement of Evo-LeViT as described in
'Evo-ViT: Slow-Fast Token Evolution for Dynamic Vision Transformer'
The code is modified from LeViT as described in
'LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference' - https://arxiv.org/abs/2104.01136
The official code of LeViT is released and available at https://github.com/facebookresearch/LeViT
"""
import torch
import utils
import torch.nn as nn
from timm.models.vision_transformer import trunc_normal_
from timm.models.registry import register_model
specification = {
'EvoLeViT_128S': {
'C': '128_256_384', 'D': 16, 'N': '4_6_8', 'X': '2_3_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth'},
'EvoLeViT_128': {
'C': '128_256_384', 'D': 16, 'N': '4_8_12', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth'},
'EvoLeViT_192': {
'C': '192_288_384', 'D': 32, 'N': '3_5_6', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth'},
'EvoLeViT_256': {
'C': '256_384_512', 'D': 32, 'N': '4_6_8', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth'},
'EvoLeViT_384': {
'C': '384_512_768', 'D': 32, 'N': '6_9_12', 'X': '4_4_4', 'drop_path': 0.1,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth'},
}
prune_ratio_list = {
'EvoLeViT_128S': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
'EvoLeViT_128': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_192': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_256': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_384': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
}
__all__ = [specification.keys()]
@register_model
def EvoLeViT_128S(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_128S'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_128S'])
@register_model
def EvoLeViT_128(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_128'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_128'])
@register_model
def EvoLeViT_192(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_192'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_192'])
@register_model
def EvoLeViT_256(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_256'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_256'])
@register_model
def EvoLeViT_384(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_384'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_384'])
global_attn = 0
ori_indices = None
learn_tradeoff_mode = True
def easy_gather(x, indices):
# x: B,N,C; indices: B,N
B, N, C = x.shape
N_new = indices.shape[1]
offset = torch.arange(B, dtype=torch.long, device=x.device).view(B, 1) * N
indices = indices + offset
out = x.reshape(B * N, C)[indices.view(-1)].reshape(B, N_new, C)
return out
def merge_tokens(x_drop, score):
# score B,N
# scale
weight = score / torch.sum(score, dim=1, keepdim=True)
x_drop = weight.unsqueeze(-1) * x_drop
return torch.sum(x_drop, dim=1, keepdim=True)
class CatModule(torch.nn.Module):
def __init__(self, m1, m2, prune_ratio, N):
super().__init__()
self.m1 = m1
self.m2 = m2
self.prune_ratio = prune_ratio
# self.i = i
if prune_ratio < 1.0:
N_ = N - int(N * prune_ratio)
self.drop_fc = nn.AdaptiveAvgPool1d(1)
# self.recover_fc=nn.Linear(1,N_)
def set_prune_ratio(self, prune_ratio):
self.prune_ratio = prune_ratio
def forward(self, x_):
global global_attn # ga
global ori_indices # oi
if self.prune_ratio < 1:
x = x_[:, 1:] # split out cls token
N = x.shape[1]
N_ = int(N * self.prune_ratio)
indices = torch.argsort(global_attn, dim=1, descending=True)
x_ga_oi = torch.cat((x, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_sorted, global_attn, ori_indices = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2], x_ga_oi[:, :, -1]
if self.training:
x_ = torch.cat((x_[:, :1], x_sorted), dim=1)
else:
x_[:, 1:] = x_sorted
x = x_[:, :N_ + 1]
x_drop = x_[:, N_ + 1:]
add_token = merge_tokens(x_drop, global_attn[:, N_:]) # B,1,C
x = torch.cat((x, add_token), dim=1) # B,N+1,C
x, raw_x1 = self.m1(x)
x, raw_x2 = self.m2(x)
x = x[:, :-1]
# fast update via skip connection
add_token1 = raw_x1[:, -1:]
add_token2 = raw_x2[:, -1:]
x_drop = x_drop + add_token1.expand(-1, x_drop.shape[1], -1) + add_token2.expand(-1, x_drop.shape[1], -1)
x_ = torch.cat((x, x_drop), dim=1)
# x_[:, N_ + 1:] = x_drop
# x_[:, :N_ + 1] = x
else:
x_, _ = self.m1(x_)
x_, _ = self.m2(x_)
return x_
class StageModule(torch.nn.Module):
def __init__(self, m, prune_ratio):
super().__init__()
self.m = m
self.prune_ratio = prune_ratio
def forward(self, x_):
global global_attn # ga
global ori_indices # oi
if isinstance(x_, tuple):
x_ = x_[0]
if self.prune_ratio < 1:
x = x_[:, 1:] # split out cls token
N = x.shape[1]
N_ = int(N * self.prune_ratio)
indices = torch.argsort(global_attn, dim=1, descending=True)
x_ga_oi = torch.cat((x, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_sorted, global_attn, ori_indices = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2], x_ga_oi[:, :, -1]
if self.training:
x_ = torch.cat((x_[:, :1], x_sorted), dim=1)
else:
x_[:, 1:] = x_sorted
x = x_[:, :N_ + 1]
x_drop = x_[:, N_ + 1:]
merge_weight = global_attn[:, N_:]
add_token = merge_tokens(x_drop, merge_weight) # B,1,C
x = torch.cat((x, add_token), dim=1) # B,N+1,C
raw_total = 0
for blk in self.m:
x, raw = blk(x)
raw_total = raw_total + raw[:, -1:]
x_drop = x_drop + raw_total.expand(-1, x_drop.shape[1], -1)
x = x[:, :-1]
if self.training:
x_ = torch.cat((x, x_drop), dim=1)
else:
x_[:, N_ + 1:] = x_drop
x_[:, :N_ + 1] = x
else:
x_ = self.m(x_)
return x_
class Conv2d_BN(torch.nn.Sequential):
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
groups=1, bn_weight_init=1, resolution=-10000):
super().__init__()
self.add_module('c', torch.nn.Conv2d(
a, b, ks, stride, pad, dilation, groups, bias=False))
bn = torch.nn.BatchNorm2d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module('bn', bn)
@torch.no_grad()
def fuse(self):
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Conv2d(w.size(1), w.size(
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation,
groups=self.c.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class Linear_BN(torch.nn.Sequential):
def __init__(self, a, b, bn_weight_init=1, resolution=-100000):
super().__init__()
self.add_module('c', torch.nn.Linear(a, b, bias=False))
bn = torch.nn.BatchNorm1d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module('bn', bn)
@torch.no_grad()
def fuse(self):
l, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[:, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def forward(self, x):
l, bn = self._modules.values()
x = l(x)
return bn(x.flatten(0, 1)).reshape_as(x)
class BN_Linear(torch.nn.Sequential):
def __init__(self, a, b, bias=True, std=0.02):
super().__init__()
self.add_module('bn', torch.nn.BatchNorm1d(a))
l = torch.nn.Linear(a, b, bias=bias)
trunc_normal_(l.weight, std=std)
if bias:
torch.nn.init.constant_(l.bias, 0)
self.add_module('l', l)
@torch.no_grad()
def fuse(self):
bn, l = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * \
self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.l.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def b16(n, activation, resolution=224):
return torch.nn.Sequential(
Conv2d_BN(3, n // 8, 3, 2, 1, resolution=resolution),
activation(),
Conv2d_BN(n // 8, n // 4, 3, 2, 1, resolution=resolution // 2),
activation(),
Conv2d_BN(n // 4, n // 2, 3, 2, 1, resolution=resolution // 4),
activation(),
Conv2d_BN(n // 2, n, 3, 2, 1, resolution=resolution // 8))
class Residual(torch.nn.Module):
def __init__(self, m, drop, out_raw=False):
super().__init__()
self.m = m
self.drop = drop
self.out_raw = out_raw
def set_prune_ratio(self, prune_ratio):
pass
def forward(self, x):
if isinstance(x, tuple):
x = x[0]
if self.training and self.drop > 0:
raw = self.m(x) * torch.rand(x.size(0), 1, 1,
device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
raw = self.m(x)
if self.out_raw:
return x + raw, raw
else:
return x + raw
class Attention(torch.nn.Module):
def __init__(self, dim, key_dim, num_heads=8,
attn_ratio=4,
activation=None,
resolution=14, posembed=False, global_attn_tradeoff=0.5):
super().__init__()
self.tradeoff = global_attn_tradeoff
self.learn_tradeoff = torch.nn.Parameter(torch.Tensor([0]))
self.sigmoid = torch.nn.Sigmoid()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
h = self.dh + nh_kd * 2
self.qkv = Linear_BN(dim, h, resolution=resolution)
self.proj = torch.nn.Sequential(activation(), Linear_BN(
self.dh, dim, bn_weight_init=0, resolution=resolution))
self.pos_embed = posembed
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
def forward(self, x): # x (B,N,C)
global global_attn
global learn_tradeoff_mode
B, N, C = x.shape
qkv = self.qkv(x)
q, k, v = qkv.view(B, N, self.num_heads, -
1).split([self.key_dim, self.key_dim, self.d], dim=3)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
attn_raw = (q @ k.transpose(-2, -1)) * self.scale
attn = attn_raw.softmax(dim=-1)
# update global attn
if learn_tradeoff_mode:
tradeoff = self.sigmoid(self.learn_tradeoff)
else:
tradeoff = self.tradeoff
if isinstance(global_attn, int):
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1) # B,N
global_attn = cls_attn
else:
if global_attn.shape[1] - N + 2 == 1:
# no additional token and no pruning
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1)
global_attn = (1 - tradeoff) * global_attn + tradeoff * cls_attn
else:
cls_attn = torch.mean(attn[:, :, 0, 1:-1], dim=1)
if self.training:
temp_attn = (1 - tradeoff) * global_attn[:, :N - 2] + tradeoff * cls_attn
global_attn = torch.cat((temp_attn, global_attn[:, N - 2:]), dim=1)
else:
global_attn[:, :N - 2] = (1 - tradeoff) * global_attn[:, :N - 2] + tradeoff * cls_attn
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
x = self.proj(x)
return x
class Subsample(torch.nn.Module):
def __init__(self, stride, resolution):
super().__init__()
self.stride = stride
self.resolution = resolution
def forward(self, x, with_cls=True):
if with_cls:
B, N, C = x.shape
x1 = x[:, 1:, :]
x1 = x1.view(B, self.resolution, self.resolution, C)[
:, ::self.stride, ::self.stride].reshape(B, -1, C)
x = torch.cat((x[:, :1, :], x1), dim=1)
else:
B, N, C = x.shape
x = x.view(B, self.resolution, self.resolution, C)[
:, ::self.stride, ::self.stride].reshape(B, -1, C)
return x
class AttentionSubsample(torch.nn.Module):
def __init__(self, in_dim, out_dim, key_dim, num_heads=8,
attn_ratio=2,
activation=None,
stride=2,
resolution=14, resolution_=7, posembed=False, global_attn_tradeoff=0.5):
super().__init__()
self.tradeoff = global_attn_tradeoff
self.learn_tradeoff = torch.nn.Parameter(torch.Tensor([0]))
self.sigmoid = torch.nn.Sigmoid()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * self.num_heads
self.attn_ratio = attn_ratio
self.resolution_ = resolution_
self.resolution_2 = resolution_ ** 2
h = self.dh + nh_kd
self.kv = Linear_BN(in_dim, h, resolution=resolution)
self.q = torch.nn.Sequential(
Subsample(stride, resolution),
Linear_BN(in_dim, nh_kd, resolution=resolution_))
self.proj = torch.nn.Sequential(activation(), Linear_BN(
self.dh, out_dim, resolution=resolution_))
self.pos_embed = posembed
if posembed:
self.poss = nn.Parameter(torch.zeros(1, resolution ** 2 + 1, in_dim))
trunc_normal_(self.poss, std=.02)
self.stride = stride
self.resolution = resolution
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
def set_prune_ratio(self, prune_ratio):
pass
def forward(self, x):
global global_attn # ga
global ori_indices # oi
global learn_tradeoff_mode
if isinstance(x, tuple):
x = x[0]
# recover sequence
old_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
x_patch = x[:, 1:]
indices = torch.argsort(ori_indices, dim=1)
x_ga_oi = torch.cat((x_patch, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_patch, ga_oi = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2:]
# subsample global attn and ori indices
ga_oi = self.q[0](ga_oi, False)
global_attn, ori_indices = ga_oi[:, :, 0], ga_oi[:, :, 1]
# global_attn, ori_indices = ga_oi[:, :, 0], ga_oi[:, :, 1]
if self.training:
x = torch.cat((x[:, :1], x_patch), dim=1)
else:
x[:, 1:] = x_patch
x = x + self.poss
B, N, C = x.shape
k, v = self.kv(x).view(B, N, self.num_heads, -
1).split([self.key_dim, self.d], dim=3)
k = k.permute(0, 2, 1, 3) # BHNC
v = v.permute(0, 2, 1, 3) # BHNC
q = self.q(x).view(B, self.resolution_2 + 1, self.num_heads,
self.key_dim).permute(0, 2, 1, 3)
attn_raw = (q @ k.transpose(-2, -1)) * self.scale
attn = attn_raw.softmax(dim=-1)
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1) # B,N
cls_attn = self.q[0](cls_attn.unsqueeze(-1), False).squeeze(-1)
if learn_tradeoff_mode:
tradeoff = self.sigmoid(self.learn_tradeoff)
else:
tradeoff = self.tradeoff
global_attn = (1 - tradeoff) * global_attn + tradeoff * cls_attn
# normalize global attention
new_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
scale = old_global_scale / new_global_scale
global_attn = global_attn * scale
x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh)
x = self.proj(x)
return x
class LeViT(torch.nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=[192],
key_dim=[64],
depth=[12],
num_heads=[3],
attn_ratio=[2],
mlp_ratio=[2],
hybrid_backbone=None,
down_ops=[],
attention_activation=torch.nn.Hardswish,
mlp_activation=torch.nn.Hardswish,
distillation=True,
drop_path=0, prune_ratio=None):
super().__init__()
self.stage_wise_prune = True
self.num_classes = num_classes
self.num_features = embed_dim[-1]
self.embed_dim = embed_dim
self.distillation = distillation
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim[0]))
self.patch_embed = hybrid_backbone
self.pos_embed = True
self.blocks = []
self.stage_blocks = []
down_ops.append([''])
resolution = img_size // patch_size
if self.pos_embed:
self.poss = nn.Parameter(torch.zeros(1, resolution ** 2 + 1, embed_dim[0]))
trunc_normal_(self.poss, std=.02)
self.prune_ratio = prune_ratio[0]
self.stage_prune_ratio = prune_ratio[1]
layer_index = -1
n = 14
j = 0
for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)):
stage_subblocks = []
for _ in range(dpth):
layer_index += 1
m1 = Residual(Attention(
ed, kd, nh,
attn_ratio=ar,
activation=attention_activation,
resolution=resolution,
posembed=self.pos_embed
), drop_path, out_raw=True)
if self.prune_ratio[layer_index] == 1:
self.stage_blocks.append(m1)
else:
stage_subblocks.append(m1)
if mr > 0:
h = int(ed * mr)
m2 = Residual(torch.nn.Sequential(
Linear_BN(ed, h, resolution=resolution),
mlp_activation(),
Linear_BN(h, ed, bn_weight_init=0,
resolution=resolution),
), drop_path, out_raw=True)
else:
m2 = torch.nn.Identity()
if self.prune_ratio[layer_index] == 1:
self.stage_blocks.append(m2)
else:
stage_subblocks.append(m2)
self.blocks.append(CatModule(m1, m2, prune_ratio=self.prune_ratio[layer_index], N=n ** 2))
if self.prune_ratio[layer_index] < 1:
j = j + 1
if len(stage_subblocks) != 0:
stage_subblocks = torch.nn.ModuleList(stage_subblocks)
self.stage_blocks.append(StageModule(stage_subblocks, prune_ratio=self.stage_prune_ratio[i]))
if do[0] == 'Subsample':
n = int((n + 1) / 2)
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
resolution_ = (resolution - 1) // do[5] + 1
subsample = AttentionSubsample(
*embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2],
attn_ratio=do[3],
activation=attention_activation,
stride=do[5],
resolution=resolution,
resolution_=resolution_,
posembed=self.pos_embed)
self.blocks.append(subsample)
self.stage_blocks.append(subsample)
resolution = resolution_
if do[4] > 0: # mlp_ratio
h = int(embed_dim[i + 1] * do[4])
ffn = Residual(torch.nn.Sequential(
Linear_BN(embed_dim[i + 1], h,
resolution=resolution),
mlp_activation(),
Linear_BN(
h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution),
), drop_path)
self.blocks.append(ffn)
self.stage_blocks.append(ffn)
self.blocks = torch.nn.Sequential(*self.blocks)
self.stage_blocks = torch.nn.Sequential(*self.stage_blocks)
# Classifier head
self.head = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
if distillation:
self.head_dist = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
self.clsc = True
if self.clsc:
self.head_cls = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
if distillation:
self.head_cls_dist = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'poss' in x}
def set_learn_tradeoff(self, mode):
global learn_tradeoff_mode
learn_tradeoff_mode = mode
def set_prune_ratio(self, mode):
pass
def remove_cls(self):
if hasattr(self, 'head_cls'):
del self.head_cls
if hasattr(self, 'head_cls_dist'):
del self.head_cls_dist
def forward(self, x):
global global_attn
global ori_indices
global learn_tradeoff_mode
global_attn = 0
x = self.patch_embed(x)
x = x.flatten(2).transpose(1, 2)
ori_indices = torch.arange(x.shape[1], dtype=torch.long, device=x.device).unsqueeze(0)
ori_indices = ori_indices.expand(x.shape[0], -1)
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), 1)
if self.pos_embed:
x = x + self.poss
if self.stage_wise_prune:
x = self.stage_blocks(x)
else:
x = self.blocks(x)
cls = x[:, 0, :]
x = x[:, 1:, :]
x = x.mean(1)
if self.distillation:
x = self.head(x), self.head_dist(x)
if self.clsc:
if self.training:
xcls = self.head_cls(cls)
xcls_dist = self.head_cls_dist(cls)
return x[0], x[1], xcls, xcls_dist
else:
return (x[0] + x[1]) / 2
if not self.training:
x = (x[0] + x[1]) / 2
else:
x = self.head(x)
return x
def model_factory(C, D, X, N, drop_path, weights,
num_classes, distillation, pretrained, fuse, prune_ratio):
embed_dim = [int(x) for x in C.split('_')]
num_heads = [int(x) for x in N.split('_')]
depth = [int(x) for x in X.split('_')]
act = torch.nn.Hardswish
model = LeViT(
patch_size=16,
embed_dim=embed_dim,
num_heads=num_heads,
key_dim=[D] * 3,
depth=depth,
attn_ratio=[2, 2, 2],
mlp_ratio=[2, 2, 2],
down_ops=[
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
['Subsample', D, embed_dim[0] // D, 4, 2, 2],
['Subsample', D, embed_dim[1] // D, 4, 2, 2],
],
attention_activation=act,
mlp_activation=act,
hybrid_backbone=b16(embed_dim[0], activation=act),
num_classes=num_classes,
drop_path=drop_path,
distillation=distillation,
prune_ratio=prune_ratio
)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
weights, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if fuse:
utils.replace_batchnorm(model)
return model
if __name__ == '__main__':
for name in specification:
net = globals()[name](fuse=False, pretrained=False)
net.eval()
net.remove_cls()
net(torch.randn(4, 3, 224, 224))
print(name, 'Parameters:', sum(p.numel() for p in net.parameters() if p.requires_grad))
|
the-stack_0_14120 | # encoding:utf-8
import sys
sys.path.append("..")
from mf import MF
from utility.matrix import SimMatrix
from utility.similarity import cosine_sp
class ItemCF(MF):
"""
docstring for ItemCF
implement the ItemCF
Sarwar B, Karypis G, Konstan J, et al. Item-based collaborative filtering recommendation algorithms[C]//Proceedings of the 10th international conference on World Wide Web. ACM, 2001: 285-295.
"""
def __init__(self):
super(ItemCF, self).__init__()
self.config.n = 10
# self.init_model()
def init_model(self, k):
super(ItemCF, self).init_model(k)
# def init_model(self):
# self.item_sim = SimMatrix()
# for i_test in self.rg.testSet_i:
# for i_train in self.rg.item:
# if i_test != i_train:
# if self.item_sim.contains(i_test, i_train):
# continue
# sim = cosine_sp(self.rg.get_col(i_test), self.rg.get_col(i_train))
# self.item_sim.set(i_test, i_train, sim)
def predict(self, u, i):
item_sim = dict()
for i_train in self.rg.item:
if i != i_train:
if i_train in item_sim:
continue
sim = cosine_sp(self.rg.get_col(i), self.rg.get_col(i_train))
item_sim[i_train] = sim
matchItems = sorted(item_sim.items(), key=lambda x: x[1], reverse=True)
itemCount = self.config.n
if itemCount > len(matchItems):
itemCount = len(matchItems)
sum, denom = 0, 0
for n in range(itemCount):
similarItem = matchItems[n][0]
if self.rg.containsUserItem(u, similarItem):
similarity = matchItems[n][1]
rating = self.rg.trainSet_u[u][similarItem]
sum += similarity * (rating - self.rg.itemMeans[similarItem])
denom += similarity
if sum == 0:
if not self.rg.containsItem(i):
return self.rg.globalMean
return self.rg.itemMeans[i]
pred = self.rg.itemMeans[i] + sum / float(denom)
# print('finished user:'+str(u)+" item:"+str(i))
return pred
pass
if __name__ == '__main__':
ic = ItemCF()
ic.init_model(0)
print(ic.predict_model())
print(ic.predict_model_cold_users())
ic.init_model(1)
print(ic.predict_model())
print(ic.predict_model_cold_users())
|
the-stack_0_14121 | #!/usr/bin/env python
import json
import os
import requests
from typing import List, Dict
from typing_extensions import Final
# 1 page fetches 100 proposals. Remember to increment the number below periodically
# to match the number of currently open proposals on
# https://github.com/godotengine/godot-proposals/issues.
NUM_PAGES: Final = 15
def main() -> None:
# Change to the directory where the script is located,
# so that the script can be run from any location.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print("[*] Fetching proposal JSON pages...")
all_proposals: List[Dict] = []
for page in range(1, NUM_PAGES + 1):
print(f" Requesting batch of proposals {page}/{NUM_PAGES}...")
request = requests.get(
f"https://api.github.com/repos/godotengine/godot-proposals/issues?state=open&per_page=100&page={page}",
headers={"Accept": "application/vnd.github.squirrel-girl-preview"},
)
request_dict = json.loads(request.text)
for proposal in request_dict:
# Only include fields we use on the frontend.
all_proposals.append(
{
"id": proposal["id"],
"number": proposal["number"],
"title": proposal["title"],
"created_at": proposal["created_at"],
"html_url": proposal["html_url"],
"user": {"login": proposal["user"]["login"]},
"comments": proposal["comments"],
"reactions": {
"+1": proposal["reactions"]["+1"],
"-1": proposal["reactions"]["-1"],
},
}
)
print("[*] Saving proposals.json...")
with open("proposals.json", "w") as file:
json.dump(all_proposals, file)
print("[*] Success!")
if __name__ == "__main__":
main()
|
the-stack_0_14122 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureWorkloadJobExtendedInfo(Model):
"""Azure VM workload-specific additional information for job.
:param tasks_list: List of tasks for this job
:type tasks_list:
list[~azure.mgmt.recoveryservicesbackup.models.AzureWorkloadJobTaskDetails]
:param property_bag: Job properties.
:type property_bag: dict[str, str]
:param dynamic_error_message: Non localized error message on job
execution.
:type dynamic_error_message: str
"""
_attribute_map = {
'tasks_list': {'key': 'tasksList', 'type': '[AzureWorkloadJobTaskDetails]'},
'property_bag': {'key': 'propertyBag', 'type': '{str}'},
'dynamic_error_message': {'key': 'dynamicErrorMessage', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AzureWorkloadJobExtendedInfo, self).__init__(**kwargs)
self.tasks_list = kwargs.get('tasks_list', None)
self.property_bag = kwargs.get('property_bag', None)
self.dynamic_error_message = kwargs.get('dynamic_error_message', None)
|
the-stack_0_14127 | __title__ = 'splitwise'
__description__ = 'Splitwise Python SDK'
__version__ = '2.2.0'
__url__ = 'https://github.com/namaggarwal/splitwise'
__download_url__ = 'https://github.com/namaggarwal/splitwise/tarball/v'+__version__
__build__ = 0x022400
__author__ = 'Naman Aggarwal'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Naman Aggarwal'
|
the-stack_0_14131 | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NVIDIA Driver installation.
"""
import re
from absl import flags
from absl import logging
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import regex_util
NVIDIA_DRIVER_LOCATION_BASE = 'https://us.download.nvidia.com/tesla'
NVIDIA_TESLA_K80 = 'k80'
NVIDIA_TESLA_P4 = 'p4'
NVIDIA_TESLA_P100 = 'p100'
NVIDIA_TESLA_V100 = 'v100'
NVIDIA_TESLA_T4 = 't4'
NVIDIA_TESLA_A100 = 'a100'
"""Default GPU clocks and autoboost configurations.
Base_clock is the default clock speeds when setting the GPU clocks. Max_clock
is currently unused. The clock speeds are in the format of
[memory_clock in MHz, graphics_clock in MHz].
"""
GPU_DEFAULTS = {
NVIDIA_TESLA_K80: {
'base_clock': [2505, 562],
'max_clock': [2505, 875],
'autoboost_enabled': True,
},
NVIDIA_TESLA_P4: {
'base_clock': [3003, 885],
'max_clock': [3003, 1531],
'autoboost_enabled': None,
},
NVIDIA_TESLA_P100: {
'base_clock': [715, 1189],
'max_clock': [715, 1328],
'autoboost_enabled': None,
},
NVIDIA_TESLA_V100: {
'base_clock': [877, 1312],
'max_clock': [877, 1530],
'autoboost_enabled': None,
},
NVIDIA_TESLA_T4: {
'base_clock': [5001, 585],
'max_clock': [5001, 1590],
'autoboost_enabled': None,
},
NVIDIA_TESLA_A100: {
'base_clock': [1215, 1410],
'max_clock': [1215, 1410],
'autoboost_enabled': None,
},
}
EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'
flag_util.DEFINE_integerlist('gpu_clock_speeds',
None,
'desired gpu clock speeds in the form '
'[memory clock, graphics clock]')
flags.DEFINE_boolean('gpu_autoboost_enabled', None,
'whether gpu autoboost is enabled')
flags.DEFINE_string(
'nvidia_driver_version', '495.29.05',
'The version of nvidia driver to install. '
'For example, "418.67" or "418.87.01."')
flags.DEFINE_boolean('nvidia_driver_force_install', False,
'Whether to install NVIDIA driver, even if it is already '
'installed.')
flags.DEFINE_string('nvidia_driver_x_library_path', '/usr/lib',
'X library path for nvidia driver installation')
flags.DEFINE_string('nvidia_driver_x_module_path', '/usr/lib/xorg/modules',
'X module path for nvidia driver installation')
flags.DEFINE_boolean('nvidia_driver_persistence_mode', None,
'whether to enable persistence mode on the NVIDIA GPU')
FLAGS = flags.FLAGS
class UnsupportedClockSpeedError(Exception):
pass
class NvidiaSmiParseOutputError(Exception):
pass
class HeterogeneousGpuTypesError(Exception):
pass
class UnsupportedGpuTypeError(Exception):
pass
def CheckNvidiaGpuExists(vm):
"""Returns whether NVIDIA GPU exists or not on the vm.
Args:
vm: The virtual machine to check.
Returns:
True or False depending on whether NVIDIA GPU exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
vm.Install('pciutils')
output, _ = vm.RemoteCommand('sudo lspci', should_log=True)
regex = re.compile(r'3D controller: NVIDIA Corporation')
return regex.search(output) is not None
def CheckNvidiaSmiExists(vm):
"""Returns whether nvidia-smi is installed or not on a VM.
Args:
vm: The virtual to check.
Returns:
True or False depending on whether nvidia-smi command exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
resp, _ = vm.RemoteHostCommand('command -v nvidia-smi',
ignore_failure=True,
suppress_warning=True)
return bool(resp.rstrip())
def GetDriverVersion(vm):
"""Returns the NVIDIA driver version as a string.
Args:
vm: Virtual machine to query.
Returns:
String containing NVIDIA driver version installed.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
"""
stdout, _ = vm.RemoteCommand('nvidia-smi', should_log=True)
regex = r'Driver Version\:\s+(\S+)'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvidiaSmiParseOutputError('Unable to parse driver version from {}'
.format(stdout))
def GetGpuType(vm):
"""Return the type of NVIDIA gpu(s) installed on the vm.
Args:
vm: Virtual machine to query.
Returns:
Type of gpus installed on the vm as a string.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
HeterogeneousGpuTypesError: If more than one gpu type is detected.
UnsupportedClockSpeedError: If gpu type is not supported.
Example:
If 'nvidia-smi -L' returns:
GPU 0: Tesla V100-SXM2-16GB (UUID: GPU-1a046bb9-e456-45d3-5a35-52da392d09a5)
GPU 1: Tesla V100-SXM2-16GB (UUID: GPU-56cf4732-054c-4e40-9680-0ec27e97d21c)
GPU 2: Tesla V100-SXM2-16GB (UUID: GPU-4c7685ad-4b3a-8adc-ce20-f3a945127a8a)
GPU 3: Tesla V100-SXM2-16GB (UUID: GPU-0b034e63-22be-454b-b395-382e2d324728)
GPU 4: Tesla V100-SXM2-16GB (UUID: GPU-b0861159-4727-ef2f-ff66-73a765f4ecb6)
GPU 5: Tesla V100-SXM2-16GB (UUID: GPU-16ccaf51-1d1f-babe-9f3d-377e900bf37e)
GPU 6: Tesla V100-SXM2-16GB (UUID: GPU-6eba1fa6-de10-80e9-ec5f-4b8beeff7e12)
GPU 7: Tesla V100-SXM2-16GB (UUID: GPU-cba5a243-219c-df12-013e-1dbc98a8b0de)
GetGpuType() will return:
['V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB',
'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB']
"""
stdout, _ = vm.RemoteCommand('nvidia-smi -L', should_log=True)
try:
gpu_types = []
for line in stdout.splitlines():
if not line:
continue
splitted = line.split()
if splitted[2] in ('Tesla', 'NVIDIA'):
gpu_types.append(splitted[3])
else:
gpu_types.append(splitted[2])
except:
raise NvidiaSmiParseOutputError('Unable to parse gpu type from {}'
.format(stdout))
if any(gpu_type != gpu_types[0] for gpu_type in gpu_types):
raise HeterogeneousGpuTypesError(
'PKB only supports one type of gpu per VM')
if 'K80' in gpu_types[0]:
return NVIDIA_TESLA_K80
if 'P4' in gpu_types[0]:
return NVIDIA_TESLA_P4
if 'P100' in gpu_types[0]:
return NVIDIA_TESLA_P100
if 'V100' in gpu_types[0]:
return NVIDIA_TESLA_V100
if 'T4' in gpu_types[0]:
return NVIDIA_TESLA_T4
if 'A100' in gpu_types[0]:
return NVIDIA_TESLA_A100
raise UnsupportedClockSpeedError(
'Gpu type {0} is not supported by PKB'.format(gpu_types[0]))
def QueryNumberOfGpus(vm):
"""Returns the number of NVIDIA GPUs on the system.
Args:
vm: Virtual machine to query.
Returns:
Integer indicating the number of NVIDIA GPUs present on the vm.
"""
stdout, _ = vm.RemoteCommand('sudo nvidia-smi --query-gpu=count --id=0 '
'--format=csv', should_log=True)
return int(stdout.split()[1])
def GetPeerToPeerTopology(vm):
"""Returns a string specifying which GPUs can access each other via p2p.
Args:
vm: Virtual machine to operate on.
Example:
If p2p topology from nvidia-smi topo -p2p r looks like this:
0 1 2 3
0 X OK NS NS
1 OK X NS NS
2 NS NS X OK
3 NS NS OK X
GetTopology will return 'Y Y N N;Y Y N N;N N Y Y;N N Y Y'
"""
stdout, _ = vm.RemoteCommand('nvidia-smi topo -p2p r', should_log=True)
lines = [line.split() for line in stdout.splitlines()]
num_gpus = len(lines[0])
results = []
for idx, line in enumerate(lines[1:]):
if idx >= num_gpus:
break
results.append(' '.join(line[1:]))
# Delimit each GPU result with semicolons,
# and simplify the result character set to 'Y' and 'N'.
return (';'.join(results)
.replace('X', 'Y') # replace X (self) with Y
.replace('OK', 'Y') # replace OK with Y
.replace('NS', 'N')) # replace NS (not supported) with N
def SetAndConfirmGpuClocks(vm):
"""Sets and confirms the GPU clock speed and autoboost policy.
The clock values are provided either by the gpu_pcie_bandwidth_clock_speeds
flags, or from gpu-specific defaults. If a device is queried and its
clock speed does not align with what it was just set to, an exception will
be raised.
Args:
vm: The virtual machine to operate on.
Raises:
UnsupportedClockSpeedError: If a GPU did not accept the
provided clock speeds.
"""
gpu_type = GetGpuType(vm)
gpu_clock_speeds = GPU_DEFAULTS[gpu_type]['base_clock']
autoboost_enabled = GPU_DEFAULTS[gpu_type]['autoboost_enabled']
if FLAGS.gpu_clock_speeds is not None:
gpu_clock_speeds = FLAGS.gpu_clock_speeds
if FLAGS.gpu_autoboost_enabled is not None:
autoboost_enabled = FLAGS.gpu_autoboost_enabled
desired_memory_clock = gpu_clock_speeds[0]
desired_graphics_clock = gpu_clock_speeds[1]
EnablePersistenceMode(vm)
SetGpuClockSpeed(vm, desired_memory_clock, desired_graphics_clock)
SetAutoboostDefaultPolicy(vm, autoboost_enabled)
num_gpus = QueryNumberOfGpus(vm)
for i in range(num_gpus):
if QueryGpuClockSpeed(vm, i) != (desired_memory_clock,
desired_graphics_clock):
raise UnsupportedClockSpeedError(
'Unrecoverable error setting GPU #{} clock speed to {},{}'.format(
i, desired_memory_clock, desired_graphics_clock))
def SetGpuClockSpeed(vm, memory_clock_speed, graphics_clock_speed):
"""Sets autoboost and memory and graphics clocks to the specified frequency.
Args:
vm: Virtual machine to operate on.
memory_clock_speed: Desired speed of the memory clock, in MHz.
graphics_clock_speed: Desired speed of the graphics clock, in MHz.
"""
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_clock_speeds = QueryGpuClockSpeed(vm, device_id)
if current_clock_speeds != (memory_clock_speed, graphics_clock_speed):
vm.RemoteCommand('sudo nvidia-smi -ac {},{} --id={}'.format(
memory_clock_speed,
graphics_clock_speed,
device_id
))
def QueryGpuClockSpeed(vm, device_id):
"""Returns the value of the memory and graphics clock.
All clock values are in MHz.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Tuple of clock speeds in MHz in the form (memory clock, graphics clock).
"""
query = ('sudo nvidia-smi --query-gpu=clocks.applications.memory,'
'clocks.applications.graphics --format=csv --id={0}'
.format(device_id))
stdout, _ = vm.RemoteCommand(query, should_log=True)
clock_speeds = stdout.splitlines()[1]
matches = regex_util.ExtractAllMatches(EXTRACT_CLOCK_SPEEDS_REGEX,
clock_speeds)[0]
return (int(matches[0]), int(matches[1]))
def EnablePersistenceMode(vm):
"""Enables persistence mode on the NVIDIA driver.
Args:
vm: Virtual machine to operate on.
"""
vm.RemoteCommand('sudo nvidia-smi -pm 1')
def SetAutoboostDefaultPolicy(vm, autoboost_enabled):
"""Sets the autoboost policy to the specified value.
For each GPU on the VM, this function will set the autoboost policy
to the value specified by autoboost_enabled.
Args:
vm: Virtual machine to operate on.
autoboost_enabled: Bool or None. Value (if any) to set autoboost policy to
"""
if autoboost_enabled is None:
return
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_state = QueryAutoboostPolicy(vm, device_id)
if current_state['autoboost_default'] != autoboost_enabled:
vm.RemoteCommand('sudo nvidia-smi --auto-boost-default={0} --id={1}'
.format(1 if autoboost_enabled else 0, device_id))
def QueryAutoboostPolicy(vm, device_id):
"""Returns the state of autoboost and autoboost_default.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Dict containing values for autoboost and autoboost_default.
Values can be True (autoboost on), False (autoboost off),
and None (autoboost not supported).
Raises:
NvidiaSmiParseOutputError: If output from nvidia-smi can not be parsed.
"""
autoboost_regex = r'Auto Boost\s*:\s*(\S+)'
autoboost_default_regex = r'Auto Boost Default\s*:\s*(\S+)'
query = 'sudo nvidia-smi -q -d CLOCK --id={0}'.format(device_id)
stdout, _ = vm.RemoteCommand(query, should_log=True)
autoboost_match = re.search(autoboost_regex, stdout)
autoboost_default_match = re.search(autoboost_default_regex, stdout)
nvidia_smi_output_string_to_value = {
'On': True,
'Off': False,
'N/A': None,
}
if (autoboost_match is None) or (autoboost_default_match is None):
raise NvidiaSmiParseOutputError('Unable to parse Auto Boost policy from {}'
.format(stdout))
return {
'autoboost': nvidia_smi_output_string_to_value[
autoboost_match.group(1)],
'autoboost_default': nvidia_smi_output_string_to_value[
autoboost_default_match.group(1)]
}
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: Virtual machine to operate on.
Returns:
A dict of gpu-specific metadata.
"""
clock_speeds = QueryGpuClockSpeed(vm, 0)
autoboost_policy = QueryAutoboostPolicy(vm, 0)
return {
'gpu_memory_clock': clock_speeds[0],
'gpu_graphics_clock': clock_speeds[1],
'gpu_autoboost': autoboost_policy['autoboost'],
'gpu_autoboost_default': autoboost_policy['autoboost_default'],
'nvidia_driver_version': GetDriverVersion(vm),
'gpu_type': GetGpuType(vm),
'num_gpus': QueryNumberOfGpus(vm),
'peer_to_peer_gpu_topology': GetPeerToPeerTopology(vm),
}
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: The virtual machine to operate on.
"""
SetAndConfirmGpuClocks(vm)
def Install(vm):
"""Install NVIDIA GPU driver on the vm.
Args:
vm: The virtual machine to install NVIDIA driver on.
"""
version_to_install = FLAGS.nvidia_driver_version
if not version_to_install:
logging.info('--nvidia_driver_version unset. Not installing.')
return
elif not FLAGS.nvidia_driver_force_install and CheckNvidiaSmiExists(vm):
logging.warn('NVIDIA drivers already detected. Not installing.')
return
location = ('{base}/{version}/NVIDIA-Linux-x86_64-{version}.run'
.format(base=NVIDIA_DRIVER_LOCATION_BASE,
version=version_to_install))
vm.Install('wget')
tokens = re.split('/', location)
filename = tokens[-1]
vm.RemoteCommand('wget {location} && chmod 755 {filename} '
.format(location=location, filename=filename),
should_log=True)
vm.RemoteCommand('sudo ./{filename} -q -x-module-path={x_module_path} '
'--ui=none -x-library-path={x_library_path} '
'--no-install-compat32-libs'
.format(filename=filename,
x_module_path=FLAGS.nvidia_driver_x_module_path,
x_library_path=FLAGS.nvidia_driver_x_library_path),
should_log=True)
if FLAGS.nvidia_driver_persistence_mode:
EnablePersistenceMode(vm)
|
the-stack_0_14133 | #!/usr/bin/env python3
import rich.markup
from pwncat.db import Fact
from pwncat.modules import ModuleFailed
from pwncat.platform.windows import Windows, PowershellError
from pwncat.modules.enumerate import EnumerateModule
class InstalledProgramData(Fact):
def __init__(self, source, path: bool):
super().__init__(source=source, types=["system.programs"])
self.path: bool = path
def title(self, session):
return f"{rich.markup.escape(repr(self.path))}"
class Module(EnumerateModule):
"""Enumerate the current Windows Defender settings on the target"""
PROVIDES = ["system.programs"]
PLATFORM = [Windows]
def enumerate(self, session):
try:
program_files = session.platform.powershell(
'Get-ChildItem "C:\\Program Files","C:\\Program Files (x86)" -ErrorAction SilentlyContinue | Select Fullname'
)[0]
if not isinstance(program_files, list):
program_files = [program_files]
for path in program_files:
yield InstalledProgramData(self.name, path["FullName"])
except (PowershellError, IndexError) as exc:
raise ModuleFailed(
f"failed to list program file directories: {exc}"
) from exc
|
the-stack_0_14134 | from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.embeddings import Embedding
# from keras import optimizers
from preprocess.unsw import generate_dataset
from netlearner.utils import quantile_transform
import numpy as np
generate_dataset(one_hot_encode=False)
raw_train_dataset = np.load('UNSW/train_dataset.npy')
train_labels = np.load('UNSW/train_labels.npy')
raw_valid_dataset = np.load('UNSW/valid_dataset.npy')
valid_labels = np.load('UNSW/valid_labels.npy')
raw_test_dataset = np.load('UNSW/test_dataset.npy')
test_labels = np.load('UNSW/test_labels.npy')
embedded_features = raw_train_dataset[:, -3:]
print(embedded_features.shape)
vocabulary_dim = int(np.amax(embedded_features)) + 1
embedding_dim = int(np.log2(vocabulary_dim)) + 1
num_features = embedded_features.shape[1]
print("|V| =", vocabulary_dim)
print("|E| =", embedding_dim)
print("|F| =", num_features)
model1 = Sequential()
model1.add(Embedding(vocabulary_dim, embedding_dim, input_length=num_features))
model1.add(Flatten())
model1.compile('rmsprop', 'mse')
train_embeddings = model1.predict(embedded_features)
valid_embeddings = model1.predict(raw_valid_dataset[:, -3:])
test_embeddings = model1.predict(raw_test_dataset[:, -3:])
print(train_embeddings.shape)
print(test_embeddings.shape)
columns = np.array(range(1, 6) + range(8, 16) + range(17, 19) +
range(23, 25) + [26])
[train_dataset, valid_dataset, test_dataset] = quantile_transform(
raw_train_dataset[:, :-3],
raw_valid_dataset[:, :-3],
raw_test_dataset[:, :-3], columns)
X_train = np.concatenate((train_dataset, train_embeddings), axis=1)
X_valid = np.concatenate((valid_dataset, valid_embeddings), axis=1)
X_test = np.concatenate((test_dataset, test_embeddings), axis=1)
print(X_train.shape, X_test.shape)
num_features = X_train.shape[1]
num_classes = train_labels.shape[1]
model2 = Sequential()
model2.add(Dense(400, input_dim=num_features))
model2.add(Activation('relu'))
model2.add(Dropout(0.8))
model2.add(Dense(512))
model2.add(Activation('relu'))
model2.add(Dropout(0.8))
model2.add(Dense(640))
model2.add(Activation('relu'))
model2.add(Dense(num_classes))
model2.add(Activation('softmax'))
# adam = optimizers.Adam(lr=0.001, decay=0.002)
model2.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model2.fit(X_train, train_labels,
batch_size=100,
epochs=160,
verbose=1,
validation_data=(X_valid, valid_labels))
score = model2.evaluate(X_test, test_labels, batch_size=100, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
the-stack_0_14136 | #encoding:utf-8
import datetime
import csv
import logging
from multiprocessing import Process
import time
import yaml
from croniter import croniter
from supplier import supply
logger = logging.getLogger(__name__)
def read_own_cron(own_cron_filename, config):
with open(own_cron_filename) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
now = datetime.datetime.now()
cron = croniter(row['MASK'])
# prev_run = cron.get_current(datetime.datetime)
prev_run = cron.get_prev(datetime.datetime)
prev_run = cron.get_next(datetime.datetime)
diff = now - prev_run
diff_seconds = diff.total_seconds()
if diff_seconds >= 0.0 and diff_seconds <= 59.9:
# print(row['submodule_name'], diff_seconds)
# supply(row['submodule_name'], config)
supplying_process = Process(target=supply, args=(row['submodule_name'], config))
supplying_process.start()
time.sleep(2)
def main(config_filename):
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
read_own_cron(config['cron_file'], config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
args = parser.parse_args()
main(args.config)
|
the-stack_0_14138 | __author__ = 'Lambert Justo'
import glob # for getting botcogs
import discord
from discord.ext import commands
#import schmoobot.src.credentials as credentials
import credentials
from botcogs.utils import check
#from schmoobot.src.botcogs.utils import check
bot_prefix = "!"
formatter = commands.HelpFormatter(show_check_failure=False)
bot = commands.Bot(command_prefix=bot_prefix, formatter=formatter,
description="Schmoo Bot!", pm_help=None)
prev = None
def list_cogs():
"""
Gets all modules from [cwd]/botcogs/ and puts them in a list to return in the
form of ["botcogs.module1", "botcogs.module2", "botcogs.module3", ...]
:return: list of strings where each module J.py is of form "botcogs.J"
"""
cogs = glob.glob("botcogs/*.py")
cog_list = []
for c in cogs:
cog_list.append("botcogs." +
c.replace("/","\\").split("\\")[1].replace(".py", ""))
return cog_list
# cogs_list = list_cogs()
cogs_list = ["botcogs.rng", "botcogs.searches", "botcogs.misc", "botcogs.mash", "botcogs.blackjack"] # FOR NOW, KEEP A HARDCODED LIST
def load_cog(cog_name : str):
cog_name = cog_name.strip()
if cog_name not in cogs_list:
print("Couldn't find module", cog_name)
return
try:
bot.load_extension(cog_name)
except (ImportError, discord.ClientException, AttributeError) as e:
print("Failed to load cog", cog_name, " due to", str(e))
return
@bot.event
async def on_ready():
print("Logged in as:")
print("name:", bot.user.name)
print("id:", bot.user.id)
print("--------------------------------------------")
@check.is_owner()
@bot.command()
async def load(extension_name : str):
print("got to load function")
"""
attempt to load a cog - a cog is a module that has commands
"""
# strip any whitespace
extension_name = extension_name.strip()
# check if the extension is in the list of loaded botcogs
if extension_name not in cogs_list:
output = "Couldn't find module " + extension_name
await bot.say(output)
return
# attempt to load the extension
try:
bot.load_extension(extension_name)
except (ImportError, discord.ClientException, AttributeError) as e:
output = "Failed to load cog " + extension_name + " due to ", str(e)
await bot.say(output)
return
output = "Loaded " + extension_name + " successfully!"
await bot.say(output)
"""
@bot.group(name="set", pass_context=True)
async def __set__(context):
if context.invoked_subcommand is None:
pass
"""
@bot.command()
@check.is_owner()
async def load_all():
for elem in cogs_list:
load_cog(elem)
await bot.say("Loaded " + str(cogs_list) + " successfully!")
await bot.say("Active commands (syntax: '![command] [extra_argument]'): "
+ str(list(bot.commands.keys())))
@bot.command()
@check.is_owner()
async def unload(extension_name : str):
"""attempt to load an extension (plugin"""
# extension_name = botcogs.<module_name>
extension_name = extension_name.strip()
try:
bot.unload_extension(extension_name)
except Exception as e:
await bot.say("Failed to unload cog " + extension_name + " due to " + str(e))
await bot.say("Unloaded " + extension_name + " successfully!")
@bot.command()
@check.is_owner()
async def reload(extension_name : str):
extension_name = "botcogs." + extension_name
if extension_name not in cogs_list:
await bot.say("Failed to find cog " + str(extension_name))
return
try:
bot.unload_extension(extension_name)
load_cog(extension_name)
except Exception as e:
await bot.say("Failed to reload cog " + extension_name + " due to " + str(e))
return
await bot.say("Reloaded " + extension_name + " successfully!")
@bot.event
async def on_message(message):
await bot.process_commands(message)
@bot.event
async def on_command(command, context):
# not even sure why this is here
pass
@bot.command()
@check.is_owner()
async def bye():
await bot.say("Bye-bye!")
await bot.logout()
bot.run(credentials.email, credentials.password)
|
the-stack_0_14141 | from SeismicReduction import *
set_seed(42) # set seed to standardise results
### Data loading:
dataholder = DataHolder("Glitne", [1300, 1502, 2], [1500, 2002, 2])
dataholder.add_near('./data/3d_nearstack.sgy');
dataholder.add_far('./data/3d_farstack.sgy');
dataholder.add_horizon('./data/Top_Heimdal_subset.txt')
### Processing:
processor = Processor(dataholder)
processed_data = processor(flatten=[True, 12, 52], crop=[False, 120, 200], normalise=True)
### Model analysis:
## PCA
pca = PcaModel(processed_data)
pca.reduce(2)
pca.to_2d()
## UMAP
umap = UmapModel(processed_data)
umap.reduce(umap_neighbours=50, umap_dist=0.01)
## vae
vae = VaeModel(processed_data)
vae.reduce(epochs=50, hidden_size=2, lr=0.0005, plot_loss=False)
vae.to_2d()
## bvae
bvae = BVaeModel(processed_data)
bvae.reduce(epochs=50, hidden_size=2, lr=0.0005, beta=7, plot_loss=False)
bvae.to_2d()
## Visualisation
plot_agent(vae, attr='FF', cmap='magma', vmin=-3 ,save_path=False)
plot_agent(bvae, attr='FF', cmap='hot',save_path=False)
plot_agent(vae, attr='FF', cmap='magma' ,save_path=False)
plot_agent(bvae, attr='FF', cmap='winter',save_path=False) |
the-stack_0_14142 | import gdb.printing
class SmallStringPrinter:
"""Print an llvm::SmallString object."""
def __init__(self, val):
self.val = val
def to_string(self):
begin = self.val['BeginX']
end = self.val['EndX']
return begin.cast(gdb.lookup_type("char").pointer()).string(length = end - begin)
def display_hint (self):
return 'string'
class StringRefPrinter:
"""Print an llvm::StringRef object."""
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['Data'].string(length = self.val['Length'])
def display_hint (self):
return 'string'
class SmallVectorPrinter:
"""Print an llvm::SmallVector object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
return self._iterator(begin, end)
def to_string(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
capacity = self.val['CapacityX'].cast(t)
return 'llvm::SmallVector of length %d, capacity %d' % (end - begin, capacity - begin)
def display_hint (self):
return 'array'
class ArrayRefPrinter:
"""Print an llvm::ArrayRef object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
data = self.val['Data']
return self._iterator(data, data + self.val['Length'])
def to_string(self):
return 'llvm::ArrayRef of length %d' % (self.val['Length'])
def display_hint (self):
return 'array'
class OptionalPrinter:
"""Print an llvm::Optional object."""
def __init__(self, value):
self.value = value
class _iterator:
def __init__(self, member, empty):
self.member = member
self.done = empty
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
self.done = True
return ('value', self.member.dereference())
def children(self):
if not self.value['hasVal']:
return self._iterator('', True)
return self._iterator(self.value['storage']['buffer'].address.cast(self.value.type.template_argument(0).pointer()), False)
def to_string(self):
return 'llvm::Optional is %sinitialized' % ('' if self.value['hasVal'] else 'not ')
class DenseMapPrinter:
"Print a DenseMap"
class _iterator:
def __init__(self, key_info_t, begin, end):
self.key_info_t = key_info_t
self.cur = begin
self.end = end
self.advancePastEmptyBuckets()
self.first = True
def __iter__(self):
return self
def advancePastEmptyBuckets(self):
# disabled until the comments below can be addressed
# keeping as notes/posterity/hints for future contributors
return
n = self.key_info_t.name
is_equal = gdb.parse_and_eval(n + '::isEqual')
empty = gdb.parse_and_eval(n + '::getEmptyKey()')
tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
# the following is invalid, GDB fails with:
# Python Exception <class 'gdb.error'> Attempt to take address of value
# not located in memory.
# because isEqual took parameter (for the unsigned long key I was testing)
# by const ref, and GDB
# It's also not entirely general - we should be accessing the "getFirst()"
# member function, not the 'first' member variable, but I've yet to figure
# out how to find/call member functions (especially (const) overloaded
# ones) on a gdb.Value.
while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
self.cur = self.cur + 1
def next(self):
if self.cur == self.end:
raise StopIteration
cur = self.cur
v = cur.dereference()['first' if self.first else 'second']
if not self.first:
self.cur = self.cur + 1
self.advancePastEmptyBuckets()
self.first = True
else:
self.first = False
return 'x', v
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(3).pointer()
begin = self.val['Buckets'].cast(t)
end = (begin + self.val['NumBuckets']).cast(t)
return self._iterator(self.val.type.template_argument(2), begin, end)
def to_string(self):
return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
def display_hint(self):
return 'map'
class TwinePrinter:
"Print a Twine"
def __init__(self, val):
self._val = val
def display_hint(self):
return 'string'
def string_from_pretty_printer_lookup(self, val):
'''Lookup the default pretty-printer for val and use it.
If no pretty-printer is defined for the type of val, print an error and
return a placeholder string.'''
pp = gdb.default_visualizer(val)
if pp:
s = pp.to_string()
# The pretty-printer may return a LazyString instead of an actual Python
# string. Convert it to a Python string. However, GDB doesn't seem to
# register the LazyString type, so we can't check
# "type(s) == gdb.LazyString".
if 'LazyString' in type(s).__name__:
s = s.value().address.string()
else:
print(('No pretty printer for {} found. The resulting Twine ' +
'representation will be incomplete.').format(val.type.name))
s = '(missing {})'.format(val.type.name)
return s
def is_twine_kind(self, kind, expected):
if not kind.endswith(expected):
return False
# apparently some GDB versions add the NodeKind:: namespace
# (happens for me on GDB 7.11)
return kind in ('llvm::Twine::' + expected,
'llvm::Twine::NodeKind::' + expected)
def string_from_child(self, child, kind):
'''Return the string representation of the Twine::Child child.'''
if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
return ''
if self.is_twine_kind(kind, 'TwineKind'):
return self.string_from_twine_object(child['twine'].dereference())
if self.is_twine_kind(kind, 'CStringKind'):
return child['cString'].string()
if self.is_twine_kind(kind, 'StdStringKind'):
val = child['stdString'].dereference()
return self.string_from_pretty_printer_lookup(val)
if self.is_twine_kind(kind, 'StringRefKind'):
val = child['stringRef'].dereference()
pp = StringRefPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'SmallStringKind'):
val = child['smallString'].dereference()
pp = SmallStringPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'CharKind'):
return chr(child['character'])
if self.is_twine_kind(kind, 'DecUIKind'):
return str(child['decUI'])
if self.is_twine_kind(kind, 'DecIKind'):
return str(child['decI'])
if self.is_twine_kind(kind, 'DecULKind'):
return str(child['decUL'].dereference())
if self.is_twine_kind(kind, 'DecLKind'):
return str(child['decL'].dereference())
if self.is_twine_kind(kind, 'DecULLKind'):
return str(child['decULL'].dereference())
if self.is_twine_kind(kind, 'DecLLKind'):
return str(child['decLL'].dereference())
if self.is_twine_kind(kind, 'UHexKind'):
val = child['uHex'].dereference()
return hex(int(val))
print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
'incomplete.').format(kind))
return '(unhandled {})'.format(kind)
def string_from_twine_object(self, twine):
'''Return the string representation of the Twine object twine.'''
lhs_str = ''
rhs_str = ''
lhs = twine['LHS']
rhs = twine['RHS']
lhs_kind = str(twine['LHSKind'])
rhs_kind = str(twine['RHSKind'])
lhs_str = self.string_from_child(lhs, lhs_kind)
rhs_str = self.string_from_child(rhs, rhs_kind)
return lhs_str + rhs_str
def to_string(self):
return self.string_from_twine_object(self._val)
pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
pp.add_printer('llvm::ArrayRef', '^llvm::(Const)?ArrayRef<.*>$', ArrayRefPrinter)
pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
the-stack_0_14144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import tqdm
tqdm.monitor_interval = 0 # workaround for https://github.com/tqdm/tqdm/issues/481
class SimpleTqdm():
def __init__(self, iterable=None, total=None, **kwargs):
self.iterable = list(iterable) if iterable is not None else None
self.total = len(self.iterable) if self.iterable is not None else total
assert self.iterable is not None or self.total is not None
self.current_step = 0
self.print_frequency = max(self.total // 50, 1)
self.desc = ""
def set_description_str(self, desc):
self.desc = desc
def set_description(self, desc):
self.desc = desc
def update(self, steps):
last_print_step = (self.current_step // self.print_frequency) * self.print_frequency
i = 1
while last_print_step + i * self.print_frequency <= self.current_step + steps:
print("*", end='')
i += 1
self.current_step += steps
def close(self):
print("\n" + self.desc)
def __iter__(self):
assert self.iterable is not None
self.index = 0
return self
def __next__(self):
if self.index < self.total:
element = self.iterable[self.index]
self.update(1)
self.index += 1
return element
else:
self.close()
raise StopIteration
def tqdm_notebook_failsafe(*args, **kwargs):
try:
return tqdm.tqdm_notebook(*args, **kwargs)
except:
# tqdm is broken on Google Colab
return SimpleTqdm(*args, **kwargs)
|
the-stack_0_14151 | # -*- coding: UTF-8 -*-
# @Time : 04/02/2020 10:58
# @Author : BubblyYi
# @FileName: seeds_net_data_provider_aug.py
# @Software: PyCharm
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch
import pandas as pd
import os
import numpy as np
import SimpleITK as sitk
import random
class DataGenerater(Dataset):
def __init__(self,data_path, pre_fix_path, transform = None, flag = '', target_transform = None):
self.flag = flag
data = []
print("csv path:",data_path)
csv_data = pd.read_csv(data_path)
x_data = csv_data['patch_name']
if self.flag == 'train' or self.flag == 'val':
proximity = csv_data["proximity"]
for i in range(len(x_data)):
if pre_fix_path is None:
data.append((temp, proximity[i]))
else:
temp = os.path.join(pre_fix_path,x_data[i])
data.append((temp, proximity[i]))
else:
for i in range(len(x_data)):
if pre_fix_path is None:
data.append(x_data[i])
else:
temp = os.path.join(pre_fix_path, x_data[i])
data.append(temp)
self.data = data
self.transform = transform
self.target_transform = target_transform
self.p_gaussian_noise = 0.2
def __getitem__(self, index):
if self.flag == 'train' or self.flag == 'val':
data_path, p = self.data[index]
img = sitk.GetArrayFromImage(sitk.ReadImage(data_path, sitk.sitkFloat32))
proximity = p
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
if self.flag=='train':
if np.random.uniform() <= self.p_gaussian_noise:
img = self.augment_gaussian_noise(img)
mean_intensity = np.mean(img)
std_intensity = np.std(img)
img = (img - mean_intensity) / (std_intensity+1e-9)
img = img.astype(np.float32)
img = torch.from_numpy(img)
return img.unsqueeze(0), proximity
elif self.flag == 'test':
data_path = self.data[index]
img = sitk.GetArrayFromImage(sitk.ReadImage(data_path, sitk.sitkFloat32))
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
mean_intensity = np.mean(img)
std_intensity = np.std(img)
# 防止除0
img = (img - mean_intensity) / (std_intensity+1e-9)
img = torch.from_numpy(img)
return img.unsqueeze(0)
def augment_gaussian_noise(self,data_sample, noise_variance=(0, 0.1)):
if noise_variance[0] == noise_variance[1]:
variance = noise_variance[0]
else:
variance = random.uniform(noise_variance[0], noise_variance[1])
data_sample = data_sample + np.random.normal(0.0, variance, size=data_sample.shape)
return data_sample
def __len__(self):
return len(self.data) |
the-stack_0_14153 | import torch
import os
from skimage import io, transform
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import seaborn as sns
from torch.nn.modules.module import _addindent
import numpy as np
import re
from tqdm import tqdm
from scipy.interpolate import interp1d
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, mean_absolute_error, classification_report, roc_curve, roc_auc_score
from torch.utils.data import DataLoader
from torchsummary import summary
batch_size = 128
epochs = 2000
no_cuda = False
seed = 1
log_interval = 50
cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
print(device)
kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
# HIGH res
train_root = 'data/deepfake_bgr/train/'
TRANSFORM_IMG = transforms.Compose([
transforms.Resize(100),
# transforms.CenterCrop(100),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
TRANSFORM_IMG_TEST = transforms.Compose([
transforms.Resize(100),
# transforms.CenterCrop(100),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(train_root, transform=TRANSFORM_IMG),
batch_size=batch_size, shuffle=True)
# for evaluation/testing
def mse_loss_cal(input, target, avg_batch=True):
ret = torch.mean((input - target) ** 2)
return ret.item()
class VAE_CNN(nn.Module):
def __init__(self):
super(VAE_CNN, self).__init__()
# Encoder
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3,
stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 16, kernel_size=3,
stride=2, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(16)
#self.drop = nn.Dropout(0.2)
# Latent vectors mu and sigma
self.fc1 = nn.Linear(25 * 25 * 16, 1024)
self.fc_bn1 = nn.BatchNorm1d(1024)
self.fc21 = nn.Linear(1024, 1024)
self.fc22 = nn.Linear(1024, 1024)
# Sampling vector
self.fc3 = nn.Linear(1024, 1024)
self.fc_bn3 = nn.BatchNorm1d(1024)
self.fc4 = nn.Linear(1024, 25 * 25 * 16)
self.fc_bn4 = nn.BatchNorm1d(25 * 25 * 16)
self.relu = nn.ReLU()
# Decoder
self.conv5 = nn.ConvTranspose2d(
16, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(64)
self.conv6 = nn.ConvTranspose2d(
64, 32, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.bn6 = nn.BatchNorm2d(32)
self.conv7 = nn.ConvTranspose2d(
32, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn7 = nn.BatchNorm2d(16)
self.conv8 = nn.ConvTranspose2d(
16, 3, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
def encode(self, x):
conv1 = self.relu(self.bn1(self.conv1(x)))
conv2 = self.relu(self.bn2(self.conv2(conv1)))
conv3 = self.relu(self.bn3(self.conv3(conv2)))
conv4 = self.relu(self.bn4(self.conv4(conv3)))
conv4 = conv4.view(-1, 25 * 25 * 16)
fc1 = self.relu(self.fc_bn1(self.fc1(conv4)))
r1 = self.fc21(fc1)
r2 = self.fc22(fc1)
return r1, r2
def reparameterize(self, mu, logvar):
std = logvar.mul(0.50).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def decode(self, z):
fc3 = self.relu(self.fc_bn3(self.fc3(z)))
fc4 = self.relu(self.fc_bn4(self.fc4(fc3)))
fc4 = fc4.view(-1, 16, 25, 25)
conv5 = self.relu(self.bn5(self.conv5(fc4)))
conv6 = self.relu(self.bn6(self.conv6(conv5)))
conv7 = self.relu(self.bn7(self.conv7(conv6)))
conv8 = self.conv8(conv7)
return conv8.view(-1, 3, 100, 100)
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
class customLoss(nn.Module):
def __init__(self):
super(customLoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction="sum")
def forward(self, x_recon, x, mu, logvar):
loss_MSE = self.mse_loss(x_recon, x)
loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return loss_MSE + loss_KLD
model = VAE_CNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_mse = customLoss()
train_losses = []
print(summary(model, (3, 100, 100)))
#ckpt = torch.load("dfdc/vae_pytorch_dfdc_FT_.pt")
# model.load_state_dict(ckpt)
#model = model.to(device)
for epoch in range(1, epochs + 1):
# train(epoch)
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
#permute = [2, 1, 0]
#data = data[:, permute, :, :]
recon_batch, mu, logvar = model(data)
loss = loss_mse(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
128. * batch_idx / len(train_loader),
loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
train_losses.append(train_loss / len(train_loader.dataset))
# EVALUATE YOUR MODEL HERE
# model.eval()
# with torch.no_grad():
plt.figure(figsize=(15, 10))
plt.plot(range(len(train_losses[1:])), train_losses[1:], c="dodgerblue")
plt.title("Loss per epoch", fontsize=18)
plt.xlabel("epoch", fontsize=18)
plt.ylabel("loss", fontsize=18)
plt.legend(['Train. Loss'], fontsize=18)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.