filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_4361 | import os
from alipay import AliPay
from django.conf import settings
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from orders.models import OrderInfo
# Create your views here.
# PUT /payment/status/?<支付结果数据>
from payment.models import Payment
class PaymentStatusView(APIView):
permission_classes = [IsAuthenticated]
def put(self,request):
"""
保存支付结果
1.获取支付结果数据并进行签名认证
2.校验订单是否有效
3.保存支付结果并修改订单支付状态
4.返回支付交易编号
"""
data = request.query_params.dict()
signature = data.pop('sign')
alipay = AliPay(
appid=settings.ALIPAY_APPID, # 开发应用appid
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/app_private_key.pem'),
# 网站的私钥文件路径
alipay_public_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/alipay_public_key.pem'),
# 支付宝公钥文件路径
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
success = alipay.verify(data, signature)
if not success:
return Response({'message':'非法操作'},status=status.HTTP_403_FORBIDDEN)
try:
order = OrderInfo.objects.get(order_id=data.get('out_trade_no'),
user=request.user,
pay_method=OrderInfo.PAY_METHODS_ENUM['ALIPAY'],
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
except OrderInfo.DoesNotExist:
return Response({'message': '无效的订单id'}, status=status.HTTP_400_BAD_REQUEST)
trade_id = data.get('trade_no')
Payment.objects.create(
order = order,
trade_id=trade_id
)
order.status = OrderInfo.ORDER_STATUS_ENUM['UNSEND']
order.save()
return Response({'trade_id':trade_id})
# GET /orders/(?P<order_id>\d+)/payment/
class PaymentView(APIView):
permission_classes = [IsAuthenticated]
def get(self,request,order_id):
"""
获取支付宝支付网址
1.获取order_id并校验订单是否有效
2.组织支付宝支付网址和参数
3.返回支付宝支付网址
"""
user = request.user
try:
order = OrderInfo.objects.get(order_id=order_id,
user=user,
pay_method=OrderInfo.PAY_METHODS_ENUM['ALIPAY'],
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
except OrderInfo.DoesNotExist:
return Response({'message': '无效的订单id'}, status=status.HTTP_400_BAD_REQUEST)
# 初始化
alipay = AliPay(
appid=settings.ALIPAY_APPID, # 开发应用appid
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/app_private_key.pem'),
# 网站的私钥文件路径
alipay_public_key_path=os.path.join(settings.BASE_DIR, 'apps/payment/keys/alipay_public_key.pem'),
# 支付宝公钥文件路径
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
# 组织支付参数
# 电脑网站支付,需要跳转到https://openapi.alipaydev.com/gateway.do? + order_string
total_pay = order.total_amount # Decimal
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id, # 订单id
total_amount=str(total_pay),
subject='闫氏商城%s' % order_id, # 订单标题
return_url="http://www.meiduo.site:8080/pay_success.html", # 回调地址
)
alipay_url = settings.ALIPAY_URL + '?' + order_string
return Response({'alipay_url': alipay_url})
|
the-stack_0_4366 | from itertools import chain
import glob
import torch
from PIL import Image
from os import path
from torch.utils.data import Dataset
class SegmentationDataset(Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(SegmentationDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
# Find all images
self.images = []
for img_path in chain(*(glob.iglob(path.join(self.in_dir, ext)) for ext in SegmentationDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
self.images.append({
"idx": idx,
"path": img_path
})
def __len__(self):
return len(self.images)
def __getitem__(self, item):
# Load image
with Image.open(self.images[item]["path"]) as img_raw:
size = img_raw.size
img = self.transform(img_raw.convert(mode="RGB"))
return {"img": img, "meta": {"idx": self.images[item]["idx"], "size": size}}
def segmentation_collate(items):
imgs = torch.stack([item["img"] for item in items])
metas = [item["meta"] for item in items]
return {"img": imgs, "meta": metas}
|
the-stack_0_4369 | #!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# dbc.c - Teradata DBC-10/12 performance model
#
# PDQ calculation of optimal parallel configuration.
#
# $Id: dbc.py,v 4.3 2009/03/26 02:55:32 pfeller Exp $
#
#---------------------------------------------------------------------
import pdq
#---------------------------------------------------------------------
def itoa(n, s):
sign = n
if (sign < 0):
n = -n
i = 0
while (n > 0):
# generate digits in reverse order
s[i] = '0' + (n % 10)
i += 1
n /= 10
if (sign < 0):
s[i] = '-'
i += 1
s[i] = '\0'
# reverse
l = len(s)
j = l - 1
for i in range(l):
c = s[i]
s[i] = s[j]
s[j] = c
i += 1
j -= 1
if i >= j:
break
#---------------------------------------------------------------------
think = 10.0
importrs = 300
Sifp = 0.10
Samp = 0.60
Sdsu = 1.20
Nifp = 15
Namp = 50
Ndsu = 100
pdq.Init("Teradata DBC-10/12")
# Create parallel centers
for k in range(Nifp):
name = "IFP%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
for k in range(Namp):
name = "AMP%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
for k in range(Ndsu):
name = "DSU%d" % k
nodes = pdq.CreateNode(name, pdq.CEN, pdq.FCFS)
streams = pdq.CreateClosed("query", pdq.TERM, importrs, think)
# pdq.SetGraph("query", 100) - unsupported call
for k in range(Nifp):
name = "IFP%d" % k
pdq.SetDemand(name, "query", Sifp / Nifp)
for k in range(Namp):
name = "AMP%d" % k
pdq.SetDemand(name, "query", Samp / Namp)
for k in range(Ndsu):
name = "DSU%d" % k
pdq.SetDemand(name, "query", Sdsu / Ndsu)
# 300 nodes takes about a minute to solve on a PowerMac
print("Solving ... ")
pdq.Solve(pdq.EXACT)
print("Done.\n")
# pdq.PrintXLS()
pdq.Report()
|
the-stack_0_4372 | __author__ = 'Sergey Matyunin'
import numpy as np
def interp2linear(z, xi, yi, extrapval=np.nan):
"""
Linear interpolation equivalent to interp2(z, xi, yi,'linear') in MATLAB
@param z: function defined on square lattice [0..width(z))X[0..height(z))
@param xi: matrix of x coordinates where interpolation is required
@param yi: matrix of y coordinates where interpolation is required
@param extrapval: value for out of range positions. default is numpy.nan
@return: interpolated values in [xi,yi] points
@raise Exception:
"""
x = xi.copy()
y = yi.copy()
nrows, ncols = z.shape
if nrows < 2 or ncols < 2:
raise Exception("z shape is too small")
if not x.shape == y.shape:
raise Exception("sizes of X indexes and Y-indexes must match")
# find x values out of range
x_bad = ( (x < 0) | (x > ncols - 1))
if x_bad.any():
x[x_bad] = 0
# find y values out of range
y_bad = ((y < 0) | (y > nrows - 1))
if y_bad.any():
y[y_bad] = 0
# linear indexing. z must be in 'C' order
ndx = np.floor(y) * ncols + np.floor(x)
ndx = ndx.astype('int32')
# fix parameters on x border
d = (x == ncols - 1)
x = (x - np.floor(x))
if d.any():
x[d] += 1
ndx[d] -= 1
# fix parameters on y border
d = (y == nrows - 1)
y = (y - np.floor(y))
if d.any():
y[d] += 1
ndx[d] -= ncols
# interpolate
one_minus_t = 1 - y
z = z.ravel()
f = (z[ndx] * one_minus_t + z[ndx + ncols] * y ) * (1 - x) + (
z[ndx + 1] * one_minus_t + z[ndx + ncols + 1] * y) * x
# Set out of range positions to extrapval
if x_bad.any():
f[x_bad] = extrapval
if y_bad.any():
f[y_bad] = extrapval
return f
|
the-stack_0_4379 | import yaml
d = {'subcommand': 'lottery', 'platform': 'local', 'display_output_location': False, 'num_workers': 0, 'gpu': '6',
'replicate': 2, 'default_hparams': 'mnist_lenet_300_100', 'quiet': False, 'evaluate_only_at_end': False,
'levels': 0, 'rewinding_steps': None, 'pretrain': False, 'dataset_name': 'fashionmnist', 'batch_size': 128,
'do_not_augment': False, 'transformation_seed': None, 'subsample_fraction': None, 'random_labels_fraction': None,
'unsupervised_labels': None, 'blur_factor': None, 'model_name': 'mnist_lenet_300_100',
'model_init': 'kaiming_normal', 'batchnorm_init': 'uniform', 'batchnorm_frozen': False, 'output_frozen': False,
'others_frozen': False, 'others_frozen_exceptions': None, 'optimizer_name': 'sgd', 'lr': 0.1,
'training_steps': '40ep', 'data_order_seed': None, 'momentum': 0.0, 'nesterov_momentum': 0.0,
'milestone_steps': None, 'gamma': None, 'warmup_steps': None, 'weight_decay': None, 'apex_fp16': False,
'pruning_strategy': 'sparse_global', 'pruning_fraction': 0.2, 'pruning_layers_to_ignore': 'fc.weight'}
with open(r'./myyaml.yaml', 'w') as file:
print(yaml.dump(d, file))
|
the-stack_0_4380 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 1e-3
SAVE_WEIGHTS_INTE = 5000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'Total_Text'
IMG_SHORT_SIDE_LEN = 512
IMG_MAX_LENGTH = 512
CLASS_NUM = 1
# model
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 0.01
POINT_SAMPLING_NUM = 12
# post-processing
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 20
FILTERED_SCORE = 0.05
VIS_SCORE = 0.75
VERSION = 'RetinaNet_Total-Text_RIDet_1x_20210519'
"""
FLOPs: 489267589; Trainable params: 33244941
"""
|
the-stack_0_4382 | #!/usr/bin/env python
# encoding: utf-8
import argparse
from zstacklib import *
start_time = datetime.now()
# set default value
file_root = "files/appliancevm"
pip_url = "https=//pypi.python.org/simple/"
proxy = ""
sproxy = ""
chroot_env = 'false'
zstack_repo = 'false'
post_url = ""
chrony_servers = None
pkg_appliancevm = ""
virtualenv_version = "12.1.1"
remote_user = "root"
remote_pass = None
remote_port = None
# get parameter from shell
parser = argparse.ArgumentParser(description='Deploy appliancevm to management node')
parser.add_argument('-i', type=str, help="""specify inventory host file
default=/etc/ansible/hosts""")
parser.add_argument('--private-key', type=str, help='use this file to authenticate the connection')
parser.add_argument('-e', type=str, help='set additional variables as key=value or YAML/JSON')
args = parser.parse_args()
argument_dict = eval(args.e)
locals().update(argument_dict)
# update the variable from shell arguments
virtenv_path = "%s/virtualenv/appliancevm/" % zstack_root
appliancevm_root = "%s/appliancevm/package" % zstack_root
# create log
logger_dir = "/var/log/zstack/"
create_log(logger_dir)
host_post_info = HostPostInfo()
host_post_info.host_inventory = args.i
host_post_info.host = host
host_post_info.post_url = post_url
host_post_info.chrony_servers = chrony_servers
host_post_info.private_key = args.private_key
host_post_info.remote_user = remote_user
host_post_info.remote_pass = remote_pass
host_post_info.remote_port = remote_port
if remote_pass is not None and remote_user != 'root':
host_post_info.become = True
# include zstacklib.py
(distro, distro_version, distro_release) = get_remote_host_info(host_post_info)
zstacklib_args = ZstackLibArgs()
zstacklib_args.distro = distro
zstacklib_args.distro_release = distro_release
zstacklib_args.distro_version = distro_version
zstacklib_args.zstack_repo = zstack_repo
zstacklib_args.yum_server = yum_server
zstacklib_args.zstack_root = zstack_root
zstacklib_args.host_post_info = host_post_info
zstacklib_args.pip_url = pip_url
zstacklib_args.trusted_host = trusted_host
zstacklib = ZstackLib(zstacklib_args)
# name: judge this process is init install or upgrade
if file_dir_exist("path=" + appliancevm_root, host_post_info):
init_install = False
else:
init_install = True
# name: create root directories
command = 'mkdir -p %s %s' % (appliancevm_root, virtenv_path)
run_remote_command(command, host_post_info)
run_remote_command("rm -rf %s/*" % appliancevm_root, host_post_info)
# name: copy zstacklib and install
copy_arg = CopyArg()
copy_arg.src = "files/zstacklib/%s" % pkg_zstacklib
copy_arg.dest = "%s/%s" % (appliancevm_root, pkg_zstacklib)
copy_zstacklib = copy(copy_arg, host_post_info)
# name: copy appliancevm and install
copy_arg = CopyArg()
copy_arg.src = "%s/%s" % (file_root, pkg_appliancevm)
copy_arg.dest = "%s/%s" % (appliancevm_root, pkg_appliancevm)
copy_appliancevm = copy(copy_arg, host_post_info)
# name: copy bootstrap script
copy_arg = CopyArg()
copy_arg.src = "%s/zstack-appliancevm-bootstrap.py" % file_root
copy_arg.dest = '/sbin/zstack-appliancevm-bootstrap.py'
copy_arg.args = "mode=0777"
copy(copy_arg, host_post_info)
# name: copy appliancevm service file
copy_arg = CopyArg()
copy_arg.src = "%s/zstack-appliancevm" % file_root
copy_arg.dest = "/etc/init.d/"
copy_arg.args = "mode=755"
copy(copy_arg, host_post_info)
# name: install virtualenv
virtual_env_status = check_and_install_virtual_env(virtualenv_version, trusted_host, pip_url, host_post_info)
if virtual_env_status is False:
command = "rm -rf %s && rm -rf %s" % (virtenv_path, appliancevm_root)
run_remote_command(command, host_post_info)
sys.exit(1)
# name: make sure virtualenv has been setup
command = "[ -f %s/bin/python ] || virtualenv %s " % (virtenv_path, virtenv_path)
run_remote_command(command, host_post_info)
if distro in RPM_BASED_OS:
if zstack_repo != 'false':
# name: install appliance vm related packages on RedHat based OS from user defined repo
command = ("pkg_list=`rpm -q iputils tcpdump ethtool | grep \"not installed\" | awk '{ print $2 }'` && for pkg"
" in $pkg_list; do yum --disablerepo=* --enablerepo=%s install -y $pkg; done;") % zstack_repo
run_remote_command(command, host_post_info)
else:
# name: install appliance vm related packages on RedHat based OS
for pkg in ['iputils', 'tcpdump', 'ethtool']:
yum_install_package("openssh-clients", host_post_info)
if distro_version >= 7:
# name: workaround RHEL7 iptables service issue
command = 'mkdir -p /var/lock/subsys/'
run_remote_command(command, host_post_info)
# name: remove RHEL7 firewalld
yum_remove_package("firewalld", host_post_info)
# name: copy iptables initial rules in RedHat
copy_arg = CopyArg()
copy_arg.src = "%s/iptables" % file_root
copy_arg.dest = "/etc/sysconfig/iptables"
iptables_copy_result = copy(copy_arg, host_post_info)
if chroot_env == 'false':
if iptables_copy_result != "changed:False":
service_status("iptables", "state=restarted enabled=yes", host_post_info)
else:
# name: enable appliancevm service for RedHat on chroot
service_status("zstack-appliancevm", "enabled=yes state=stopped", host_post_info)
elif distro in DEB_BASED_OS:
install_pkg_list = ['iputils-arping', 'tcpdump', 'ethtool']
apt_install_packages(install_pkg_list, host_post_info)
# name: copy iptables initial rules in Debian
copy_arg = CopyArg()
copy_arg.src = "%s/iptables" % file_root
copy_arg.dest = "/etc/iptables"
copy(copy_arg, host_post_info)
# name: copy iptables initial start script in Debian
copy_arg = CopyArg()
copy_arg.src = "%s/iptables.up" % file_root
copy_arg.dest = "/etc/network/if-pre-up.d/iptables.up"
copy_arg.args = "mode=0777"
iptables_script_result = copy(copy_arg, host_post_info)
if iptables_script_result == "status:changed":
command = "/etc/network/if-pre-up.d/iptables.up"
run_remote_command(command, host_post_info)
# name: enable appliancevm service for Debian -1
command = "sed -i '/zstack-appliancevm start/d' /etc/rc.local"
run_remote_command(command, host_post_info)
# name: enable appliancevm service for Debian -2
update_arg = "insertbefore='^exit 0' line='/etc/init.d/zstack-appliancevm start\n'"
update_file("/etc/rc.local", update_arg, host_post_info)
# name: restore iptables
command = '/etc/network/if-pre-up.d/iptables.up'
run_remote_command(command, host_post_info)
else:
error("unsupported OS!")
# name: install zstacklib
if copy_zstacklib != "changed:False":
agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
agent_install_arg.agent_name = "appliancevm"
agent_install_arg.agent_root = appliancevm_root
agent_install_arg.pkg_name = pkg_zstacklib
agent_install(agent_install_arg, host_post_info)
# name: install appliancevm
if copy_appliancevm != "changed:False":
agent_install_arg = AgentInstallArg(trusted_host, pip_url, virtenv_path, init_install)
agent_install_arg.agent_name = "appliancevm"
agent_install_arg.agent_root = appliancevm_root
agent_install_arg.pkg_name = pkg_appliancevm
agent_install(agent_install_arg, host_post_info)
if chroot_env == 'false':
# name: restart appliancevm
if distro in RPM_BASED_OS:
command = "service zstack-appliancevm stop && service zstack-appliancevm start && chkconfig zstack-appliancevm on"
elif distro in DEB_BASED_OS:
command = "update-rc.d zstack-appliancevm start 97 3 4 5 . stop 3 0 1 2 6 . && service zstack-appliancevm stop && service zstack-appliancevm start"
run_remote_command(command, host_post_info)
else:
if distro in RPM_BASED_OS:
# name: restart iptables
service_status("iptables", "state=restarted enabled=yes", host_post_info)
host_post_info.start_time = start_time
handle_ansible_info("SUCC: Deploy appliancevm successful", host_post_info, "INFO")
sys.exit(0)
|
the-stack_0_4383 | from typing import Union
from pydantic.types import UUID5
from account.models import JWTModel
import uuid
from time import time
from datetime import datetime, timedelta
from pathlib import Path
from config.conf import JWT_KEY_PATH, JWT_CERT_PATH
from cryptography.x509 import load_pem_x509_certificate
from fastapi import HTTPException
import jwt
class JWT:
rsa_crt_path: Path = JWT_CERT_PATH
rsa_JWT_KEY_PATH: Path = JWT_KEY_PATH
JWT_NAMESPACE: uuid.UUID = uuid.UUID("69d3e8f4-0872-4f7f-9f35-d2ee437e0887")
@classmethod
def jti(cls, uid: str) -> str:
now = round(time() * 1000)
return str(uuid.uuid5(cls.JWT_NAMESPACE, str(uid) + str(now)))
@classmethod
def base_payload(cls, duration: int) -> dict:
now = datetime.utcnow()
nbf = {"nbf": now}
iat = {"iat": now}
exp = {"exp": now + timedelta(days=duration)}
payload = {**nbf, **iat, **exp}
return payload
@classmethod
def create(cls, user: dict, duration=30) -> str:
try:
jti = {"jti": cls.jti(user["uid"])}
key = cls.rsa_JWT_KEY_PATH.read_text()
payload = cls.base_payload(duration)
payload = {**payload, **user, **jti}
token = jwt.encode(payload, key, algorithm="RS256")
return token
except Exception as e:
raise HTTPException(500, "JWT error DAG: " + str(e))
@classmethod
def verify(cls, token: str) -> JWTModel:
try:
crt = cls.rsa_crt_path.read_text()
cert_obj = load_pem_x509_certificate(crt.encode())
public_key = cert_obj.public_key()
# private_key = cert_obj.private_key()
decoded = jwt.decode(token, public_key, algorithms=["RS256"])
return JWTModel(**decoded)
except Exception as e:
raise HTTPException(500, "JWT verify error DAG: " + str(e))
|
the-stack_0_4385 | import mdtraj as md
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
import scipy.optimize
import unyt as u
class BondCalculator:
def __init__(self, traj, T):
self.traj = traj
graph = traj.top.to_bondgraph()
bonds = self.identify_bonds(graph)
angles = self.identify_angles(graph)
bond_params = dict()
angle_params = dict()
for bond_type, pairs in bonds.items():
bond_lengths, bond_prob = self.calc_lengths(pairs, range=[0, 1.0])
params = self.calc_parameters(bond_lengths, bond_prob)
k = 2 * u.kb * (T*u.K) / (params[0] * u.nm)**2 * u.Na
l0 = params[1] * u.nm
bond_params[bond_type]= {"k": k, "x0": l0}
for angle_type, triplets in angles.items():
bond_angles, angle_prob = self.calc_angles(triplets, range=[0, 2*np.pi])
params = self.calc_parameters(bond_angles, angle_prob)
k = 2 * u.kb * (T*u.K) / (params[0] * u.rad)**2 * u.Na
t0 = params[1] * u.rad
angle_params[angle_type]= {"k": k, "x0": t0}
self.bond_params = bond_params
self.angle_params = angle_params
def identify_bonds(self, graph):
all_bonds = [edge for edge in graph.edges]
bonds = defaultdict(list)
for bond in all_bonds:
index = tuple(sorted([bond[0].name, bond[1].name]))
pair = tuple([particle.index for particle in bond])
bonds[index].append(pair)
return bonds
def identify_angles(self, graph):
angle_subgraph = nx.Graph()
angle_subgraph.add_edge(0, 1)
angle_subgraph.add_edge(1, 2)
matcher = nx.algorithms.isomorphism.GraphMatcher(graph, angle_subgraph)
all_angles = []
for m in matcher.subgraph_isomorphisms_iter():
all_angles.append(tuple(k for k in m.keys()))
angles = defaultdict(list)
for angle in all_angles:
index = tuple(particle.name for particle in angle)
if angle[0].name < angle[2].name:
index = tuple(reversed(index))
triplet = tuple(particle.index for particle in angle)
angles[index].append(triplet)
return angles
def calc_lengths(self, pairs, range=None):
quantity = md.compute_distances(self.traj, pairs)
hist, edges = np.histogram(quantity, density=True, range=range, bins=200)
bins = (edges[1:]+edges[:-1]) * 0.5
return bins, hist
def calc_angles(self, triplets, range=None):
quantity = md.compute_angles(self.traj, triplets)
hist, edges = np.histogram(quantity, density=True, range=range, bins=200)
bins = (edges[1:]+edges[:-1]) * 0.5
hist /= np.sin(bins)
hist /= np.sum(hist)*(bins[1]-bins[0])
return bins, hist
def cost_function(self, args, x, y):
w, x0 = args
return np.sum((self.gaussian(w, x0, x) - y)**2)
def gaussian(self, w, x0, x):
return ((w * np.sqrt(np.pi / 2))**-1)*(np.exp(-2 * (x - x0)**2 / (w**2)))
def calc_parameters(self, x, y):
res = scipy.optimize.minimize(lambda args: self.cost_function(args, x, y), [np.ptp(x)/10, x[np.argmax(y)]])
return res.x
|
the-stack_0_4388 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import unicode_literals
import re
from ..._env import E2E_FIXTURE_NAME, deserialize_data
CONFIG_MESSAGE_PATTERN = 'DDEV_E2E_START_MESSAGE (.+) DDEV_E2E_END_MESSAGE'
def parse_config_from_result(env, result):
if 'NO E2E FIXTURE AVAILABLE' in result.stdout:
return None, None, 'The environment fixture `{}` does not exist.'.format(E2E_FIXTURE_NAME)
if '{}: platform mismatch'.format(env) in result.stdout:
return None, None, 'The environment `{}` does not support this platform.'.format(env)
decoded = parse_encoded_config_data(result.stdout)
if decoded is None:
return (
None,
None,
(
'{}\n{}\nUnable to parse configuration. Try recreating your env to get the '
'latest version of the dev package.'.format(result.stdout, result.stderr)
),
)
config = decoded['config']
metadata = decoded['metadata']
if config is None:
return None, None, 'The environment fixture `{}` did not yield any configuration.'.format(E2E_FIXTURE_NAME)
return config, metadata, None
def parse_encoded_config_data(output):
match = re.search(CONFIG_MESSAGE_PATTERN, output)
if match:
return deserialize_data(match.group(1))
|
the-stack_0_4390 | # -*- coding: utf-8 -*-
import numpy as np
"""
This script is for outputting PC1/PC2/PC3 data from preprocd_dataset.npz
of MD 1000K-LCx3 samples
"""
def makePC123(dtsetfile, outfile, grpname):
dtset= np.load(dtsetfile, allow_pickle=True)
#allow_pickle op is for adapting spec change of numpy 1.16.3 and later
dts= dtset['dataset']
dataset0=[]
for dt in dts:
dt0=dt['inputs/0']
dataset0.append(dt0)
dim0=len(dataset0)
dim1=len(dataset0[0])
dim2=len(dataset0[0][0])
with open(outfile, 'w') as f1:
for dt64 in dataset0:
for dt in dt64:
wdt=str(dt[0])+" "+str(dt[1])+" "+str(dt[2])+"\n"
f1.write(wdt)
print(f'Saved PC1/PC2/PC3 data of {grpname}: Shape= {dim0} x {dim1} x {dim2}')
if __name__ == '__main__':
mdfolder="/home/okugawa/HDNNP/Si-190808-md"
outfolder=mdfolder+"/result-LC/PC123/"
grps=['1000K0.99', '1000K1.0', '1000K1.01']
for grp in grps:
for j in range(1,11):
grpname=grp+"-"+str(j)
dtsetdir=mdfolder+"/"+grp+"/"+str(j)
dtsetfile=dtsetdir+"/data/CrystalSi64/preprocd_dataset.npz"
outfile=outfolder+grpname+"-PC123.txt"
makePC123(dtsetfile, outfile, grpname)
|
the-stack_0_4391 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
__version__ = '1.2b1'
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
setup(
name='tensorflow-compression',
version=__version__,
description=('Data compression in TensorFlow'),
url='https://tensorflow.github.io/compression/',
author='Google LLC',
# Contained modules and scripts.
packages=find_packages(),
install_requires=[
'scipy >= 1.0.0',
'tensorflow >= 1.13.0',
],
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
project_urls={
'Documentation': 'https://tensorflow.github.io/compression/docs/api_docs/python/tfc.html',
'Discussion': 'https://groups.google.com/forum/#!forum/tensorflow-compression',
'Source': 'https://github.com/tensorflow/compression',
'Tracker': 'https://github.com/tensorflow/compression/issues',
},
license='Apache 2.0',
keywords='compression data-compression tensorflow machine-learning python deep-learning deep-neural-networks neural-network ml',
)
|
the-stack_0_4392 | import time
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from transformers import BartTokenizer, BartForConditionalGeneration
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Device: {device}")
# Load Model
pretrained = "sshleifer/distilbart-xsum-12-6"
model = BartForConditionalGeneration.from_pretrained(pretrained)
tokenizer = BartTokenizer.from_pretrained(pretrained)
# Switch to cuda, eval mode, and FP16 for faster inference
if device == "cuda":
model = model.half()
model.to(device)
model.eval()
# Define app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
controls = dbc.Card(
[
dbc.FormGroup(
[
dbc.Label("Output Length (# Tokens)"),
dcc.Slider(
id="max-length",
min=10,
max=50,
value=30,
marks={i: str(i) for i in range(10, 51, 10)},
),
]
),
dbc.FormGroup(
[
dbc.Label("Beam Size"),
dcc.Slider(
id="num-beams",
min=2,
max=6,
value=4,
marks={i: str(i) for i in [2, 4, 6]},
),
]
),
dbc.FormGroup(
[
dbc.Spinner(
[
dbc.Button("Summarize", id="button-run"),
html.Div(id="time-taken"),
]
)
]
),
],
body=True,
style={"height": "275px"},
)
# Define Layout
app.layout = dbc.Container(
fluid=True,
children=[
html.H1("Dash Automatic Summarization (with DistilBART)"),
html.Hr(),
dbc.Row(
[
dbc.Col(
width=5,
children=[
controls,
dbc.Card(
body=True,
children=[
dbc.FormGroup(
[
dbc.Label("Summarized Content"),
dcc.Textarea(
id="summarized-content",
style={
"width": "100%",
"height": "calc(75vh - 275px)",
},
),
]
)
],
),
],
),
dbc.Col(
width=7,
children=[
dbc.Card(
body=True,
children=[
dbc.FormGroup(
[
dbc.Label("Original Text (Paste here)"),
dcc.Textarea(
id="original-text",
style={"width": "100%", "height": "75vh"},
),
]
)
],
)
],
),
]
),
],
)
@app.callback(
[Output("summarized-content", "value"), Output("time-taken", "children")],
[
Input("button-run", "n_clicks"),
Input("max-length", "value"),
Input("num-beams", "value"),
],
[State("original-text", "value")],
)
def summarize(n_clicks, max_len, num_beams, original_text):
if original_text is None or original_text == "":
return "", "Did not run"
t0 = time.time()
inputs = tokenizer.batch_encode_plus(
[original_text], max_length=1024, return_tensors="pt"
)
inputs = inputs.to(device)
# Generate Summary
summary_ids = model.generate(
inputs["input_ids"],
num_beams=num_beams,
max_length=max_len,
early_stopping=True,
)
out = [
tokenizer.decode(
g, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
for g in summary_ids
]
t1 = time.time()
time_taken = f"Summarized on {device} in {t1-t0:.2f}s"
return out[0], time_taken
if __name__ == "__main__":
app.run_server(debug=True)
|
the-stack_0_4393 | import requests
import json
import configparser as cfg
class telegram_chatbot():
def __init__(self, config):
self.token = self.read_token_from_config_file(config)
self.base = "https://api.telegram.org/bot{}/".format(self.token)
def get_updates(self, offset=None):
url = self.base + "getUpdates?timeout=100"
if offset:
url = url + "&offset={}".format(offset + 1)
r = requests.get(url)
return json.loads(r.content)
def send_message(self, msg, chat_id):
url = self.base + "sendMessage?chat_id={}&text={}".format(chat_id, msg)
if msg is not None:
requests.get(url)
def read_token_from_config_file(self, config):
parser = cfg.ConfigParser()
parser.read(config)
return parser.get('creds', 'token')
|
the-stack_0_4394 | from discord.ext.commands import Cog
class Cancer(Cog):
def __init__(self, bot):
self.bot = bot
self.ok_list = [198101180180594688, 246291440106340352]
@Cog.listener()
async def on_member_join(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("yes " + member.mention)
@Cog.listener()
async def on_member_remove(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("no " + member.mention)
@Cog.listener()
async def on_guild_emojis_update(self, guild, before, after):
if guild.id not in self.ok_list:
return
await guild.system_channel.send("the emojis were updated")
def setup(bot):
bot.add_cog(Cancer(bot))
|
the-stack_0_4395 | import pandas as pd
train = pd.read_csv('../data/train_mapped.tsv', sep='\t', header=0)
data = pd.DataFrame(columns=['SentenceId','Phrase', 'Sentiment'])
temp = list(train['SentenceId'])
count = 1
for index, row in train.iterrows():
if row['SentenceId'] == count:
data = data.append(row[['SentenceId', 'Phrase', 'Sentiment']])
count += 1
# if count == 2628 or count == 2746 or count == 4044 or count == 4365:
# count += 1
if count not in temp:
print(count)
count += 1
data = data.reset_index()
data = data.drop('index', axis=1)
print(len(data))
data.to_csv('../data/train_extract.tsv', sep='\t', index=False)
|
the-stack_0_4396 | """
"""
from __future__ import division
from datetime import date
import logging
from date_helper import *
logger = logging.getLogger(__name__).addHandler(logger.NullHandler())
def check_date_objects(date1, date2):
if not(isinstance(date1, date) or isinstance(date2, date)):
raise InputError(expr = "Dates must be instances of datetime.date class")
class Error(Exception):
"""Base class for exceptions in this module.
"""
pass
class InputError(Error):
"""Exception raised for errors in parameters.
"""
pass
def _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day):
"""Base formula calculation for the numerator and denominator of day count 30/360.
"""
num = 360 * (f_year - i_year) + 30 * (f_month - i_month) + (f_day - i_day)
den = 360
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_act_act_ISDA(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from datetime module
act/act, ISDA
Days in a month: actual
Days in a year: actual
Flavor: ISDA
This method splits up the actual number of days falling in leap years and in non-leap years.
The year fraction is the sum of the actual number of days falling in leap years divided by 366 and the actual number of days falling in non-leap years divided by 365.
"""
days_in_commons, days_in_leaps = _days_in_leap_and_common_years(i_date, f_date)
if days_in_commons == 0:
num = days_in_leaps
den = 366
elif days_in_leaps == 0:
num = days_in_commons
den = 365
else:
num = (366 * days_in_commons) + (365 * days_in_leaps)
den = 133590 #least common multiple between 366 and 365
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_act_ISDA', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_act_act_Euro(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
act/act, Euro, AFB
Days in a month: actual
Days in a year: actual
This method first calculates the number of full years counting backwards from the second date.
For any resulting stub periods, the numerator is the actual number of days in the period, the denominator being 365 or 366 depending on whether February 29th falls in the stub period.
"""
# delta = f_date - i_date
# days1 = delta.days
#
# log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_act_Euro', 'i_date':i_date, 'f_date':f_date}
# logger.debug(log)
# log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
# logger.debug(log)
# return num / den
def _daycount_act_365_Fixed(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
act/365, act/365 fixed
Days in a month: actual
Days in a year: 365 Always
Flavor: Fixed
This method first calculates the number of full years counting backwards from the second date.
For any resulting stub periods, the numerator is the actual number of days in the period, the denominator being 365 or 366 depending on whether February 29th falls in the stub period.
"""
delta = f_date - i_date
num = delta.days
den = 365
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_act_365_Fixed', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
log = "[%(num)r/%(den)r]" % {'num':num, 'den':den}
logger.debug(log)
return num / den
def _daycount_30_360(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
Days in a month: 30
Days in a year: 360
Flavor: None
"""
i_year = i_date.year
i_month = i_date.month
i_day = i_date.day
f_year = f_date.year
f_month = f_date.month
f_day = f_date.day
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_30_360', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
factor = _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day)
return factor
def _daycount_30_360_US(i_date, f_date):
"""Return factor to apply for interests between i_date and f_date.
:i_date: initial date.
:f_date: final date.
*i_date* and *f_date* must be instances of datetime.date class from the datetime module
Days in a month: 30
Days in a year: 360
Flavor: US
"""
i_year = i_date.year
i_month = i_date.month
i_day = i_date.day
f_year = f_date.year
f_month = f_date.month
f_day = f_date.day
if (i_date.month == 2 and _is_end_of_month(i_date)) and (f_date.month == 2 and _is_end_of_month(f_date)):
f_day = 30
if (i_date.month == 2 and _is_end_of_month(i_date)):
i_day = 30
if (f_day == 31) and (i_day in [30, 31]):
f_day = 30
if (i_day == 31):
i_day = 30
log = "%(f_name)r(%(i_date)r, %(f_date)r)" % {'f_name':'daycount_30_360_US', 'i_date':i_date, 'f_date':f_date}
logger.debug(log)
factor = _days_30_360_main(i_year, i_month, i_day, f_year, f_month, f_day)
return factor
class InterestFactor(object):
""".
Usage::
>>> date1 = date(2012, 2, 5)
>>> date2 = date(2012, 4, 6)
>>> myCounter = DayCounter(30, 360, 'fixed')
>>> myCounter.count(date1, date2)
>>>
"""
def __init__(self, dim=30, diy=360, flavor=None):
"""Constructor.
"""
self.dim = dim
self.diy = diy
self.flavor = flavor
method = '_'.join([str(self.dim), str(self.diy), str(self.flavor)])
#try:
self.factor = self._methods[method]
#except KeyError as e:
#pass #TODO: catch this key error
def __repr__(self):
"""Representation.
"""
return "interestFactor(dim=%(dim)r, diy=%(diy)r, flavor=%(flavor)r)" % {'dim':self.dim, 'diy':self.diy, 'flavor':self.flavor}
_methods = {
'30_360_None': _daycount_30_360,
'30_360_US': _daycount_30_360_US,
'act_act_Fixed': _daycount_act_365_Fixed,
'act_act_ISDA': _daycount_act_act_ISDA,
'act_act_Euro': _daycount_act_act_Euro,
}
if __name__ == '__main__':
date1 = date(2012, 2, 5)
date2 = date(2012, 4, 6)
days360 = InterestFactor(30, 360)
print(days360)
print(days360.factor(date1, date2))
|
the-stack_0_4398 | # %% [markdown]
# ##
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from umap import UMAP
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspy.plot import pairplot
from graspy.simulations import sbm
from graspy.utils import (
augment_diagonal,
binarize,
pass_to_ranks,
symmetrize,
to_laplace,
)
from src.align import Procrustes
from src.cluster import MaggotCluster, get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
graph_type = "G"
def plot_pairs(
X, labels, model=None, left_pair_inds=None, right_pair_inds=None, equal=False
):
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X)
data["label"] = labels
for i in range(n_dims):
for j in range(n_dims):
ax = axs[i, j]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=j,
y=i,
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=CLASS_COLOR_DICT,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def preprocess_adjs(adjs, method="ase"):
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse":
adjs = [to_laplace(a) for a in adjs]
return adjs
def omni(
adjs,
n_components=4,
remove_first=None,
concat_graphs=True,
concat_directed=True,
method="ase",
):
adjs = preprocess_adjs(adjs, method=method)
omni = OmnibusEmbed(n_components=n_components, check_lcc=False, n_iter=10)
embed = omni.fit_transform(adjs)
if concat_directed:
embed = np.concatenate(
embed, axis=-1
) # this is for left/right latent positions
if remove_first is not None:
embed = embed[remove_first:]
if concat_graphs:
embed = np.concatenate(embed, axis=0)
return embed
def ipsi_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [ll_adj, rr_adj]
if co_adj is not None:
co_ll_adj = co_adj[np.ix_(lp_inds, lp_inds)]
co_rr_adj = co_adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs += [co_ll_adj, co_rr_adj]
out_ipsi, in_ipsi = omni(
ipsi_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_ipsi[0], in_ipsi[0]), axis=1)
right_embed = np.concatenate((out_ipsi[1], in_ipsi[1]), axis=1)
ipsi_embed = np.concatenate((left_embed, right_embed), axis=0)
return ipsi_embed
def contra_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [lr_adj, rl_adj]
if co_adj is not None:
co_lr_adj = co_adj[np.ix_(lp_inds, rp_inds)]
co_rl_adj = co_adj[np.ix_(rp_inds, lp_inds)]
contra_adjs += [co_lr_adj, co_rl_adj]
out_contra, in_contra = omni(
contra_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_contra[0], in_contra[1]), axis=1)
right_embed = np.concatenate((out_contra[1], in_contra[0]), axis=1)
contra_embed = np.concatenate((left_embed, right_embed), axis=0)
return contra_embed
def lateral_omni(adj, lp_inds, rp_inds, n_components=4, method="ase"):
ipsi_embed = ipsi_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
contra_embed = contra_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def multi_lateral_omni(adjs, lp_inds, rp_inds, n_components=4):
ipsi_adjs = []
for a in adjs:
ll_adj = a[np.ix_(lp_inds, lp_inds)]
rr_adj = a[np.ix_(rp_inds, rp_inds)]
ipsi_adjs.append(ll_adj)
ipsi_adjs.append(rr_adj)
ipsi_embed = omni(ipsi_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(ipsi_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
ipsi_embed = np.concatenate((left, right), axis=0)
contra_adjs = []
for a in adjs:
lr_adj = a[np.ix_(lp_inds, rp_inds)]
rl_adj = a[np.ix_(rp_inds, lp_inds)]
contra_adjs.append(lr_adj)
contra_adjs.append(rl_adj)
contra_embed = omni(contra_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(contra_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
contra_embed = np.concatenate((left, right), axis=0)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def reg_lateral_omni(adj, base_adj, lp_inds, rp_inds, n_components=4):
base_ll_adj = base_adj[np.ix_(lp_inds, lp_inds)]
base_rr_adj = base_adj[np.ix_(rp_inds, rp_inds)]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [base_ll_adj, base_rr_adj, ll_adj, rr_adj]
ipsi_embed = omni(ipsi_adjs, remove_first=2, n_components=n_components)
base_lr_adj = base_adj[np.ix_(lp_inds, rp_inds)]
base_rl_adj = base_adj[np.ix_(rp_inds, lp_inds)]
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [base_lr_adj, base_rl_adj, lr_adj, rl_adj]
contra_embed = omni(contra_adjs, remove_first=2, n_components=n_components)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def quick_embed_viewer(
embed, labels=None, lp_inds=None, rp_inds=None, left_right_indexing=False
):
if left_right_indexing:
lp_inds = np.arange(len(embed) // 2)
rp_inds = np.arange(len(embed) // 2) + len(embed) // 2
fig, axs = plt.subplots(3, 2, figsize=(20, 30))
cmds = ClassicalMDS(n_components=2)
cmds_euc = cmds.fit_transform(embed)
plot_df = pd.DataFrame(data=cmds_euc)
plot_df["labels"] = labels
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
ax = axs[0, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o euclidean")
cmds = ClassicalMDS(n_components=2, dissimilarity="precomputed")
pdist = symmetrize(pairwise_distances(embed, metric="cosine"))
cmds_cos = cmds.fit_transform(pdist)
plot_df[0] = cmds_cos[:, 0]
plot_df[1] = cmds_cos[:, 1]
ax = axs[0, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o cosine")
tsne = TSNE(metric="euclidean")
tsne_euc = tsne.fit_transform(embed)
plot_df[0] = tsne_euc[:, 0]
plot_df[1] = tsne_euc[:, 1]
ax = axs[1, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o euclidean")
tsne = TSNE(metric="precomputed")
tsne_cos = tsne.fit_transform(pdist)
plot_df[0] = tsne_cos[:, 0]
plot_df[1] = tsne_cos[:, 1]
ax = axs[1, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o cosine")
umap = UMAP(metric="euclidean", n_neighbors=30, min_dist=1)
umap_euc = umap.fit_transform(embed)
plot_df[0] = umap_euc[:, 0]
plot_df[1] = umap_euc[:, 1]
ax = axs[2, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o euclidean")
umap = UMAP(metric="cosine", n_neighbors=30, min_dist=1)
umap_cos = umap.fit_transform(embed)
plot_df[0] = umap_cos[:, 0]
plot_df[1] = umap_cos[:, 1]
ax = axs[2, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o cosine")
def umapper(embed, metric="euclidean", n_neighbors=30, min_dist=1, **kws):
umap = UMAP(metric=metric, n_neighbors=n_neighbors, min_dist=min_dist)
umap_euc = umap.fit_transform(embed)
plot_df = pd.DataFrame(data=umap_euc)
plot_df["labels"] = labels
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
left_right_indexing = True
if left_right_indexing:
tlp_inds = np.arange(len(embed) // 2)
trp_inds = np.arange(len(embed) // 2) + len(embed) // 2
add_connections(
plot_df.iloc[tlp_inds, 0],
plot_df.iloc[trp_inds, 0],
plot_df.iloc[tlp_inds, 1],
plot_df.iloc[trp_inds, 1],
ax=ax,
)
return fig, ax
# %% [markdown]
# ## Load and preprocess data
VERSION = "2020-04-23"
graph_type = "G"
master_mg = load_metagraph(graph_type, version="2020-04-23")
mg = preprocess(
master_mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
degrees = mg.calculate_degrees()
quant_val = np.quantile(degrees["Total edgesum"], 0.05)
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > quant_val].index
print(quant_val)
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
idx = mg.meta[mg.meta["Pair"].isin(mg.meta.index)].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["pair_td"] = meta["Pair ID"].map(meta.groupby("Pair ID")["Total degree"].mean())
mg = mg.sort_values(["pair_td", "Pair ID"], ascending=False)
meta["inds"] = range(len(meta))
adj = mg.adj.copy()
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
print(len(mg))
# %% [markdown]
# ## Plot the ipsilateral connectomes
if meta["pair_td"].max() > 0:
meta["pair_td"] = -meta["pair_td"]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
plot_kws = dict(
plot_type="scattermap",
sort_class="merge_class",
item_order=["pair_td", "Pair ID"],
colors="merge_class",
palette=CLASS_COLOR_DICT,
ticks=False,
class_order="pair_td",
sizes=(1, 1),
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
plot_adjs = False
if plot_adjs:
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(ll_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ L")
_, _, top, _ = adjplot(rr_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ R")
plt.tight_layout()
stashfig("ipsilateral-adj")
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(lr_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ R")
_, _, top, _ = adjplot(rl_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ L")
plt.tight_layout()
stashfig("contralateral-adj")
# %% [markdown]
# ## Load the 4-color graphs
graph_types = ["Gad", "Gaa", "Gdd", "Gda"]
adjs = []
for g in graph_types:
temp_mg = load_metagraph(g, version=VERSION)
temp_mg.reindex(mg.meta.index, use_ids=True)
temp_adj = temp_mg.adj
adjs.append(temp_adj)
# %% [markdown]
# ## simple demo of "in" vs "out" latent positions
# blocks 0, 1 differ only in their inputs, not their outputs
B = np.array(
[
[0.1, 0.1, 0.2, 0.05],
[0.1, 0.1, 0.2, 0.05],
[0.35, 0.15, 0.1, 0.1],
[0.1, 0.05, 0.3, 0.4],
]
)
sns.heatmap(B, square=True, annot=True)
sbm_sample, sbm_labels = sbm([100, 100, 100, 100], B, directed=True, return_labels=True)
ase = AdjacencySpectralEmbed()
out_embed, in_embed = ase.fit_transform(sbm_sample)
pairplot(out_embed, sbm_labels) # don't see separation between [0, 1]
pairplot(in_embed, sbm_labels) # do see separation between [0, 1]
# from this we can conclude that the "right" embedding or right singular vectors are the
# ones corresponding to input
# (out, in)
# %% [markdown]
# ## Options for the embedding
# - ASE and procrustes (not shown here)
# - Bilateral OMNI on G, SVD
# - Bilateral OMNI on each of the 4-colors, concatenated, SVD
# - Bilateral OMNI on each of the 4-colors, with regularization, concatenated, SVD
# - Bilateral OMNI jointly with all 4-colors
n_omni_components = 8 # this is used for all of the embedings initially
n_svd_components = 16 # this is for the last step
def svd(X, n_components=n_svd_components):
return selectSVD(X, n_components=n_components, algorithm="full")[0]
# %% [markdown]
# ## only contra
# just_contra_embed = omni(
# [full_adjs[0], full_adjs[2]],
# n_components=n_omni_components,
# remove_first=None,
# concat_graphs=True,
# concat_directed=True,
# method="ase",
# )
# svd_contra_embed = svd(just_contra_embed)
# %% [markdown]
# # Omni of contra/ipsi together
full_adjs = [
adj[np.ix_(lp_inds, lp_inds)],
adj[np.ix_(lp_inds, rp_inds)],
adj[np.ix_(rp_inds, rp_inds)],
adj[np.ix_(rp_inds, lp_inds)],
]
out_embed, in_embed = omni(
full_adjs,
n_components=n_omni_components,
remove_first=None,
concat_graphs=False,
concat_directed=False,
method="ase",
)
# ipsi out, contra out, ipsi in, contra in
left_embed = np.concatenate(
(out_embed[0], out_embed[1], in_embed[0], in_embed[3]), axis=1
)
right_embed = np.concatenate(
(out_embed[2], out_embed[3], in_embed[2], in_embed[1]), axis=1
)
omni_naive_embed = np.concatenate((left_embed, right_embed), axis=0)
ase_naive_embed = svd(omni_naive_embed)
# ##
# out_embed, in_embed = omni(
# full_adjs,
# n_components=n_omni_components,
# remove_first=None,
# concat_graphs=False,
# concat_directed=False,
# method="lse",
# )
# # ipsi out, contra out, ipsi in, contra in
# left_embed = np.concatenate(
# (out_embed[0], out_embed[1], in_embed[0], in_embed[3]), axis=1
# )
# right_embed = np.concatenate(
# (out_embed[2], out_embed[3], in_embed[2], in_embed[1]), axis=1
# )
# omni_naive_embed = np.concatenate((left_embed, right_embed), axis=0)
# lse_naive_embed = svd(omni_naive_embed)
# %% [markdown]
# ## Bilateral OMNI on G, SVD
omni_flat_embed = lateral_omni(
adj, lp_inds, rp_inds, n_components=n_omni_components, method="ase"
)
ase_flat_embed = svd(omni_flat_embed)
# %% [markdown]
# ## just compare
# %% [markdown]
# ## Bilateral OMNI on each of the 4-colors, concatenated, SVD
omni_multi_embed = []
for a in adjs:
omni_multi_embed.append(
lateral_omni(a, lp_inds, rp_inds, n_components=n_omni_components)
)
omni_multi_embed = np.concatenate(omni_multi_embed, axis=1)
ase_multi_embed = svd(omni_multi_embed)
# %% [markdown]
# ## Bilateral OMNI on each of the 4-colors, with regularization, concatenated, SVD
omni_reg_embed = []
for a in adjs:
omni_reg_embed.append(
reg_lateral_omni(a, adj, lp_inds, rp_inds, n_components=n_omni_components)
)
omni_reg_embed = np.concatenate(omni_reg_embed, axis=1)
ase_reg_embed = svd(omni_reg_embed)
# %% [markdown]
# ## Bilateral OMNI on all 4-colors
adjs_and_sum = adjs + [adj]
omni_joint_embed = multi_lateral_omni(
adjs_and_sum, lp_inds, rp_inds, n_components=n_omni_components
)
ase_joint_embed = svd(omni_joint_embed)
# %% [markdown]
# ## Compute neighbors at K
new_lp_inds = np.arange(len(mg) // 2)
new_rp_inds = np.arange(len(mg) // 2) + len(mg) // 2
def compute_neighbors_at_k(X, left_inds, right_inds, k_max=10, metric="euclidean"):
nn = NearestNeighbors(radius=0, n_neighbors=k_max + 1, metric=metric)
nn.fit(X)
neigh_dist, neigh_inds = nn.kneighbors(X)
is_neighbor_mat = np.zeros((X.shape[0], k_max), dtype=bool)
for left_ind, right_ind in zip(left_inds, right_inds):
left_neigh_inds = neigh_inds[left_ind]
right_neigh_inds = neigh_inds[right_ind]
for k in range(k_max):
if right_ind in left_neigh_inds[: k + 2]:
is_neighbor_mat[left_ind, k] = True
if left_ind in right_neigh_inds[: k + 2]:
is_neighbor_mat[right_ind, k] = True
neighbors_at_k = np.sum(is_neighbor_mat, axis=0) / is_neighbor_mat.shape[0]
neighbors_at_k = pd.Series(data=neighbors_at_k, index=np.arange(1, k_max + 1))
neighbors_at_k.name = "p_at_k"
return neighbors_at_k
# names = ["flat", "multi", "joint", "reg", "naive"]
# embeds = [
# ase_flat_embed,
# ase_multi_embed,
# ase_joint_embed,
# ase_reg_embed,
# ase_naive_embed,
# ]
names = ["iso", "aniso", "multi"]
embeds = [ase_naive_embed, ase_flat_embed, ase_multi_embed]
dims = np.arange(1, 16)
dfs = []
for d in dims:
for name, embed in zip(names, embeds):
p_at_k = compute_neighbors_at_k(embed[:, :d], new_lp_inds, new_rp_inds)
neighbor_df = p_at_k.to_frame()
neighbor_df.reset_index(inplace=True)
neighbor_df.rename(columns={"index": "K"}, inplace=True)
neighbor_df["method"] = name
neighbor_df["d"] = d
dfs.append(neighbor_df)
neighbor_df = pd.concat(dfs, ignore_index=True)
# %% [markdown]
# ## Plot nearest neighbor results
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
k = 5
sns.lineplot(
data=neighbor_df[neighbor_df["K"] == k],
x="d",
y="p_at_k",
hue="method",
style="method",
# style_order=["reg", "joint", "multi", "flat"],
)
ax.set_ylabel(f"P @ K = {k}")
ax.set_xlabel("# dimensions")
stashfig(f"p_at_k={k}_embed-iso-aniso-multi")
# %% [markdown]
# ## Look at the best one! (ish)
new_meta = meta.iloc[np.concatenate((lp_inds, rp_inds), axis=0)].copy()
labels = new_meta["merge_class"].values
plot_pairs(
ase_flat_embed[:, :8],
labels,
left_pair_inds=new_lp_inds,
right_pair_inds=new_rp_inds,
)
stashfig("ase-flat-pairs")
quick_embed_viewer(
ase_flat_embed[:, :8], labels=labels, lp_inds=new_lp_inds, rp_inds=new_rp_inds
)
stashfig("ase-flat-manifold")
# %% [markdown]
# ## Now, try to do a similar quantification but for classes
# KC
# MBON
# MBIN
# ORN
# UPN
# some of the antennal lobe stuff
def class_neighbors_at_k(X, labels, target, k_max=10, metric="euclidean"):
nn = NearestNeighbors(radius=0, n_neighbors=k_max + 1, metric=metric)
nn.fit(X)
neigh_dist, neigh_inds = nn.kneighbors(X)
neigh_inds = neigh_inds[:, 1:] # remove self as neighbor
mask = labels == target
target_inds = np.arange(len(X))[mask]
target_neigh_inds = neigh_inds[mask]
p_nearby = []
neighbors_in_target = np.isin(target_neigh_inds, target_inds)
for k in np.arange(1, k_max + 1):
p_nearby_at_k = neighbors_in_target[:, :k].sum() / (k * len(target_inds))
p_nearby.append(p_nearby_at_k)
p_nearby = np.array(p_nearby)
neighbor_df = pd.DataFrame(data=p_nearby, index=np.arange(1, k_max + 1))
neighbor_df.index.name = "K"
neighbor_df.rename(columns={0: target}, inplace=True)
return neighbor_df
new_meta = meta.iloc[np.concatenate((lp_inds, rp_inds), axis=0)].copy()
labels = new_meta["merge_class"].values
k_max = 10
embed_df = []
for name, embed in zip(names, embeds):
neighbor_df = []
for d in np.arange(1, 16):
X = embed[:, :d]
class1 = new_meta["class1"].values
neighbors = []
for target in ["uPN", "sens-ORN"]:
neighbors.append(class_neighbors_at_k(X, labels, target))
for target in ["KC", "mPN", "MBON", "MBIN"]:
neighbors.append(class_neighbors_at_k(X, class1, target))
neighbors = pd.concat(neighbors, ignore_index=False, axis=1)
neighbors = neighbors.reset_index()
neighbors = neighbors.melt(value_name="p_at_k", var_name="class", id_vars=["K"])
neighbors["d"] = d
neighbor_df.append(neighbors)
neighbor_df = pd.concat(neighbor_df, axis=0)
neighbor_df["method"] = name
embed_df.append(neighbor_df)
embed_df = pd.concat(embed_df, axis=0)
# k = 5
# temp_df = embed_df[embed_df["K"] == k]
# fig, axs = plt.subplots(2, 2, figsize=(20, 10), sharex=True, sharey=True)
# axs = axs.ravel()
# for i, name in enumerate(names):
# ax = axs[i]
# plot_df = temp_df[temp_df["method"] == name]
# sns.lineplot(data=plot_df, x="d", y="p_at_k", hue="class", ax=ax)
# ax.set_title(name)
# ax.get_legend().remove()
# plt.tight_layout()
# ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
# hard to compare directly on the above
# %% [markdown]
# ##
# fix d
# one plot for each class
# line for each of the embeddings
k = 5
plot_df = embed_df[embed_df["K"] == k]
# plot_df = plot_df[plot_df["d"] == d]
classes = ["uPN", "sens-ORN", "KC", "mPN", "MBON", "MBIN"]
fig, axs = plt.subplots(2, 3, figsize=(20, 10), sharex=True, sharey=True)
axs = axs.ravel()
for i, cell_class in enumerate(classes):
ax = axs[i]
temp_df = plot_df[plot_df["class"] == cell_class]
sns.lineplot(
data=temp_df,
x="d",
y="p_at_k",
hue="method",
ax=ax,
style="method",
# style_order=["reg", "joint", "multi", "flat"],
)
ax.set_title(cell_class)
axs[0].set_ylabel(f"Prop. @ K = {k}")
axs[3].set_ylabel(f"Prop. @ K = {k}")
plt.tight_layout()
stashfig(f"embed-class-knn-k={k}")
# %%
# # Notes
# I like aniso better than iso
# not sure about reg or not
# for sides, we have {iso, aniso}
# for method, we have {lse, ase}
# for color, we have {flat, multi (separate), joint (omni), reg (multi but with G)}
# there seems to be no single embedding that is winning at everything.
n_levels = 12
metric = "bic"
bic_ratio = 1
d = 8
basename = f"aniso-omni-bic_ratio={bic_ratio}-d={d}"
mc = MaggotCluster(
"0",
adj=adj,
n_init=25,
meta=new_meta,
stashfig=stashfig,
min_clusters=1,
max_clusters=3,
X=ase_flat_embed[:, :d],
bic_ratio=bic_ratio,
reembed=False,
min_split=4,
)
for i in range(n_levels):
for j, node in enumerate(mc.get_lowest_level()):
node.fit_candidates(show_plot=False)
for j, node in enumerate(mc.get_lowest_level()):
node.select_model(k=None, metric=metric)
mc.collect_labels()
n_levels = mc.height
fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 40))
for i in range(n_levels):
ax = axs[i]
stacked_barplot(
mc.meta[f"lvl{i}_labels_side"],
mc.meta["merge_class"],
category_order=np.unique(mc.meta[f"lvl{i}_labels_side"].values),
color_dict=CLASS_COLOR_DICT,
norm_bar_width=False,
ax=ax,
)
ax.set_yticks([])
ax.get_legend().remove()
plt.tight_layout()
stashfig(f"count-barplot-lvl{i}" + basename)
plt.close()
fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 40))
for i in range(n_levels):
ax = axs[i]
stacked_barplot(
mc.meta[f"lvl{i}_labels_side"],
mc.meta["merge_class"],
category_order=np.unique(mc.meta[f"lvl{i}_labels_side"].values),
color_dict=CLASS_COLOR_DICT,
norm_bar_width=True,
ax=ax,
)
ax.set_yticks([])
ax.get_legend().remove()
plt.tight_layout()
stashfig(f"prop-barplot-lvl{i}" + basename)
plt.close()
inds = np.concatenate((lp_inds, rp_inds))
new_adj = adj[np.ix_(inds, inds)]
new_meta = mc.meta
new_meta["sf"] = -signal_flow(new_adj)
for l in range(n_levels):
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
sort_class = [f"lvl{i}_labels" for i in range(l)]
sort_class += [f"lvl{l}_labels_side"]
adjplot(
new_adj,
meta=new_meta,
sort_class=sort_class,
item_order="merge_class",
plot_type="scattermap",
class_order="sf",
sizes=(0.5, 1),
ticks=False,
colors="merge_class",
ax=ax,
palette=CLASS_COLOR_DICT,
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
stashfig(f"adj-lvl{l}" + basename)
plt.close()
pairs = np.unique(new_meta["Pair ID"])
p_same_clusters = []
for l in range(n_levels):
n_same = 0
for p in pairs:
if new_meta[new_meta["Pair ID"] == p][f"lvl{l}_labels"].nunique() == 1:
n_same += 1
p_same = n_same / len(pairs)
print(p_same)
p_same_clusters.append(p_same)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(x=range(n_levels), y=p_same_clusters, ax=ax)
sns.scatterplot(x=range(n_levels), y=p_same_clusters, ax=ax)
ax.set_ylabel("P same cluster")
ax.set_xlabel("Level")
stashfig("p_in_same_cluster" + basename)
n_clusters = []
for l in range(n_levels):
n_clusters.append(new_meta[f"lvl{l}_labels"].nunique())
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(x=range(n_levels), y=n_clusters, ax=ax)
sns.scatterplot(x=range(n_levels), y=n_clusters, ax=ax)
ax.set_ylabel("Clusters per side")
ax.set_xlabel("Level")
stashfig("n_cluster" + basename)
size_dfs = []
for l in range(n_levels):
sizes = new_meta.groupby(f"lvl{l}_labels_side").size().values
sizes = pd.DataFrame(data=sizes, columns=["Size"])
sizes["Level"] = l
size_dfs.append(sizes)
size_df = pd.concat(size_dfs)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.stripplot(data=size_df, x="Level", y="Size", ax=ax, jitter=0.45, alpha=0.5)
ax.set_yscale("log")
stashfig("log-sizes" + basename)
# %% [markdown]
# ## some other kind of visualization
import networkx as nx
import colorcet as cc
def to_minigraph(
adj,
labels,
drop_neg=True,
remove_diag=True,
size_scaler=1,
use_counts=False,
use_weights=True,
color_map=None,
):
# convert the adjacency and a partition to a minigraph based on SBM probs
prob_df = get_blockmodel_df(
adj, labels, return_counts=use_counts, use_weights=use_weights
)
if drop_neg and ("-1" in prob_df.index):
prob_df.drop("-1", axis=0, inplace=True)
prob_df.drop("-1", axis=1, inplace=True)
if remove_diag:
adj = prob_df.values
adj -= np.diag(np.diag(adj))
prob_df = pd.DataFrame(data=adj, index=prob_df.index, columns=prob_df.columns)
g = nx.from_pandas_adjacency(prob_df, create_using=nx.DiGraph())
uni_labels, counts = np.unique(labels, return_counts=True)
# add size attribute base on number of vertices
size_map = dict(zip(uni_labels, size_scaler * counts))
nx.set_node_attributes(g, size_map, name="Size")
# add signal flow attribute (for the minigraph itself)
mini_adj = nx.to_numpy_array(g, nodelist=uni_labels)
node_signal_flow = signal_flow(mini_adj)
sf_map = dict(zip(uni_labels, node_signal_flow))
nx.set_node_attributes(g, sf_map, name="Signal Flow")
# add spectral properties
# sym_adj = symmetrize(mini_adj)
# n_components = 10
# latent = AdjacencySpectralEmbed(n_components=n_components).fit_transform(sym_adj)
# for i in range(n_components):
# latent_dim = latent[:, i]
# lap_map = dict(zip(uni_labels, latent_dim))
# nx.set_node_attributes(g, lap_map, name=f"AdjEvec-{i}")
# add spring layout properties
pos = nx.spring_layout(g)
spring_x = {}
spring_y = {}
for key, val in pos.items():
spring_x[key] = val[0]
spring_y[key] = val[1]
nx.set_node_attributes(g, spring_x, name="Spring-x")
nx.set_node_attributes(g, spring_y, name="Spring-y")
# add colors
if color_map is None:
color_map = dict(zip(uni_labels, cc.glasbey_light))
nx.set_node_attributes(g, color_map, name="Color")
return g
from src.visualization import draw_networkx_nice
from src.utils import get_blockmodel_df
for l in range(n_levels):
labels = new_meta[f"lvl{l}_labels_side"].values
# block_df = get_blockmodel_df(new_adj, labels, return_counts=False, use_weights=True)
mini_g = to_minigraph(new_adj, labels, use_counts=True, use_weights=True)
draw_networkx_nice(
mini_g,
"Spring-x",
"Signal Flow",
colors="Color",
sizes="Size",
weight_scale=1 / 1000,
)
# %%
from src.visualization import plot_neurons
from src.pymaid import start_instance
lvl = 4
uni_labels = np.unique(new_meta[f"lvl{lvl}_labels"])
start_instance()
for label in uni_labels:
plot_neurons(new_meta, f"lvl{lvl}_labels", label=label, barplot=True)
stashfig(f"label{label}_lvl{lvl}" + basename)
# %% [markdown]
# ## Do the distance thing for Michael
d = 12
n_pairs = len(X) // 2
X = ase_flat_embed[:, :d]
new_lp_inds = np.arange(n_pairs)
new_rp_inds = np.arange(n_pairs).copy() + n_pairs
left_X = X[new_lp_inds]
right_X = X[new_rp_inds]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
# get nearest right neighbor for everyone on the left
def rank_neighbors(source_X, target_X, metric="euclidean"):
n_target = len(target_X)
n_source = len(source_X)
nn = NearestNeighbors(radius=0, n_neighbors=n_target, metric=metric)
nn.fit(target_X)
neigh_dist, neigh_inds = nn.kneighbors(source_X)
source_rank_neighbors = np.empty((n_source, n_target), dtype=int)
for i in range(n_source):
source_rank_neighbors[i, neigh_inds[i]] = np.arange(1, n_target + 1, dtype=int)
return source_rank_neighbors
left_neighbors = rank_neighbors(left_X, right_X)
right_neighbors = rank_neighbors(right_X, left_X)
left_df = pd.DataFrame(
index=left_meta.index, columns=right_meta.index, data=left_neighbors
)
stashcsv(left_df, f"left_rank_neighbors_on_right-aniso_omni-d={d}")
right_df = pd.DataFrame(
index=right_meta.index, columns=left_meta.index, data=right_neighbors
)
stashcsv(right_df, f"right_rank_neighbors_on_right-aniso_omni-d={d}")
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.distplot(
np.diag(left_neighbors), bins=np.arange(0, n_pairs, 1), kde=False, norm_hist=True
)
sns.distplot(
np.diag(right_neighbors), bins=np.arange(0, n_pairs, 1), kde=False, norm_hist=True
)
ax.set_xlim((0, 20))
ax.set_xticks(np.arange(0, 20, 2))
# ax.xaxis.set_major_locator(plt.IndexLocator(1, 2))
# %%
|
the-stack_0_4399 | """
Mask R-CNN
Multi-GPU Support for Keras.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
"""
#import tensorflow as tf
#changing as tesorflow v2 does not support place holder
import tensorflow.compat as tf
tf.disable_v2_behavior()
import keras.backend as K
import keras.layers as KL
import keras.models as KM
class ParallelModel(KM.Model):
"""Subclasses the standard Keras Model and adds multi-GPU support.
It works by creating a copy of the model on each GPU. Then it slices
the inputs and sends a slice to each copy of the model, and then
merges the outputs together and applies the loss on the combined
outputs.
"""
def __init__(self, keras_model, gpu_count):
"""Class constructor.
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,
outputs=merged_outputs)
def __getattribute__(self, attrname):
"""Redirect loading and saving methods to the inner model. That's where
the weights are stored."""
if 'load' in attrname or 'save' in attrname:
return getattr(self.inner_model, attrname)
return super(ParallelModel, self).__getattribute__(attrname)
def summary(self, *args, **kwargs):
"""Override summary() to display summaries of both, the wrapper
and inner models."""
super(ParallelModel, self).summary(*args, **kwargs)
self.inner_model.summary(*args, **kwargs)
def make_parallel(self):
"""Creates a new wrapper model that consists of multiple replicas of
the original model placed on different GPUs.
"""
# Slice inputs. Slice inputs on the CPU to avoid sending a copy
# of the full inputs to all GPUs. Saves on bandwidth and memory.
input_slices = {name: tf.split(x, self.gpu_count)
for name, x in zip(self.inner_model.input_names,
self.inner_model.inputs)}
output_names = self.inner_model.output_names
outputs_all = []
for i in range(len(self.inner_model.outputs)):
outputs_all.append([])
# Run the model call() on each GPU to place the ops there
for i in range(self.gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
# Run a slice of inputs through this replica
zipped_inputs = zip(self.inner_model.input_names,
self.inner_model.inputs)
inputs = [
KL.Lambda(lambda s: input_slices[name][i],
output_shape=lambda s: (None,) + s[1:])(tensor)
for name, tensor in zipped_inputs]
# Create the model replica and get the outputs
outputs = self.inner_model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later
for l, o in enumerate(outputs):
outputs_all[l].append(o)
# Merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs, name in zip(outputs_all, output_names):
# Concatenate or average outputs?
# Outputs usually have a batch dimension and we concatenate
# across it. If they don't, then the output is likely a loss
# or a metric value that gets averaged across the batch.
# Keras expects losses and metrics to be scalars.
if K.int_shape(outputs[0]) == ():
# Average
m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
else:
# Concatenate
m = KL.Concatenate(axis=0, name=name)(outputs)
merged.append(m)
return merged
if __name__ == "__main__":
# Testing code below. It creates a simple model to train on MNIST and
# tries to run it on 2 GPUs. It saves the graph so it can be viewed
# in TensorBoard. Run it as:
#
# python3 parallel_model.py
import os
import numpy as np
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
GPU_COUNT = 2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype('float32') / 255
x_test = np.expand_dims(x_test, -1).astype('float32') / 255
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Build data generator and model
datagen = ImageDataGenerator()
model = build_model(x_train, 10)
# Add multi-GPU support.
model = ParallelModel(model, GPU_COUNT)
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Train
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=50, epochs=10, verbose=1,
validation_data=(x_test, y_test),
callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,
write_graph=True)]
)
|
the-stack_0_4401 | import random
import numpy as np
import skimage.color as sc
import torch
def get_patch(*args, patch_size=96, scale=2, multi=False, input_large=False):
ih, iw = args[0].shape[:2]
if not input_large:
p = scale if multi else 1
tp = p * patch_size
ip = tp // scale
else:
tp = patch_size
ip = patch_size
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
if not input_large:
tx, ty = scale * ix, scale * iy
else:
tx, ty = ix, iy
ret = [
args[0][iy : iy + ip, ix : ix + ip, :],
*[a[ty : ty + tp, tx : tx + tp, :] for a in args[1:]],
]
return ret
def set_channel(*args, n_channels=3):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channels == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channels == 3 and c == 1:
img = np.concatenate([img] * n_channels, 2)
return img
return [_set_channel(a) for a in args]
def np2Tensor(*args, rgb_range=255):
def _np2Tensor(img):
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
return tensor
return [_np2Tensor(a) for a in args]
def augment(*args, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(a) for a in args]
|
the-stack_0_4402 | # Examples of mouse input
import simplegui
import math
# intialize globals
width = 450
height = 300
ball_list = []
ball_radius = 15
ball_color = "Red"
# helper function
def distance(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# define event handler for mouse click, draw
def click(pos):
ball_list.append(pos)
# if distance(ball_pos, pos) < ball_radius:
# if ball_color == "Red":
# ball_color = "Green"
# else:
# ball_pos = [pos[0], pos[1]]
# ball_color = "Red"
def draw(canvas):
for ball_pos in ball_list:
canvas.draw_circle(ball_pos, ball_radius, 1, "Black", ball_color)
# create frame
frame = simplegui.create_frame("Mouse selection", width, height)
frame.set_canvas_background("White")
# register event handler
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
# start frame
frame.start()
|
the-stack_0_4403 | import time
import shelve
import datetime
import settings
from twython import Twython
from contextlib import contextmanager
@contextmanager
def closing(this):
try:
yield this
finally:
this.close()
class TwitterStats():
def __init__(self):
# connect to twitter api
self.twitter = Twython(
app_key=settings.consumer_key,
app_secret=settings.consumer_secret,
oauth_token=settings.oauth_token,
oauth_token_secret=settings.oauth_token_secret
)
def init_storage(self):
storage = shelve.open('twitter_stats', writeback=True)
if not storage:
storage['followers'] = set()
storage['unfollowers'] = []
storage['unfollowers_since_last_check'] = None
storage['last_update'] = None
return storage
def get_followers(self):
follower_ids = self.twitter.getFollowersIDs()['ids']
return set(follower_ids)
def show_screen_name(self, user_id):
user = self.twitter.showUser(user_id=user_id)
screen_name = user['screen_name']
return screen_name
def update_unfollower_stats(self):
with closing(self.init_storage()) as storage:
previous_followers = storage['followers']
current_followers = self.get_followers()
new_unfollower_ids = previous_followers - current_followers
unfollowers_since_last_check = []
for follower_id in new_unfollower_ids:
unfollower = {
'id': follower_id,
'screen_name': self.show_screen_name(follower_id),
'timestamp': datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
}
storage['unfollowers'].append(unfollower)
unfollowers_since_last_check.append(unfollower)
storage['followers'] = current_followers
storage['unfollowers_since_last_check'] = unfollowers_since_last_check
storage['last_update'] = datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
def main():
twitter_stats = TwitterStats()
while True:
twitter_stats.update_unfollower_stats()
time.sleep(settings.update_interval)
if __name__ == '__main__':
main()
|
the-stack_0_4404 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/D/Scanner.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the D scanner can return multiple modules imported by
a single statement.
"""
import TestSCons
import sys
from os.path import abspath, dirname, join
sys.path.append(join(dirname(abspath(__file__)), 'Support'))
from executablesSearch import isExecutableOfToolAvailable
test = TestSCons.TestSCons()
_obj = TestSCons._obj
if not isExecutableOfToolAvailable(test, 'dmd'):
test.skip_test("Could not find 'dmd'; skipping test.\n")
test.subdir(['p'])
test.write('SConstruct', """
env = Environment()
env.Program('test1.d')
env.Program('test2.d')
""")
test.write(['test1.d'], """\
import module1;
import module2;
import module3;
import p.submodule1;
import p.submodule2;
int main() {
return 0;
}
""")
test.write(['test2.d'], """\
import
module1,
module2,
module3;
import
p.submodule1,
p.submodule2;
int main() {
return 0;
}
""")
test.write(['ignored.d'], """\
module ignored;
int something;
""")
test.write(['module1.d'], """\
module module1;
int something;
""")
test.write(['module2.d'], """\
module module2;
int something;
""")
test.write(['module3.di'], """\
module module3;
int something;
""")
test.write(['p', 'ignored.d'], """\
module p.ignored;
int something;
""")
test.write(['p', 'submodule1.d'], """\
module p.submodule1;
int something;
""")
test.write(['p', 'submodule2.d'], """\
module p.submodule2;
int something;
""")
arguments = 'test1%(_obj)s test2%(_obj)s' % locals()
test.run(arguments = arguments)
test.up_to_date(arguments = arguments)
test.write(['module2.d'], """\
module module2;
int something_else;
""")
test.not_up_to_date(arguments = arguments)
test.up_to_date(arguments = arguments)
test.write(['p', 'submodule2.d'], """\
module p.submodule2;
int something_else;
""")
test.not_up_to_date(arguments = arguments)
test.up_to_date(arguments = arguments)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_4405 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class _WishartOperatorPD(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `OperatorPDBase`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
def __init__(self,
df,
scale_operator_pd,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator_pd: `float` or `double` instance of `OperatorPDBase`.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is `(k, k)`
"""
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[df, scale_operator_pd]):
if not scale_operator_pd.dtype.is_floating:
raise TypeError(
"scale_operator_pd.dtype=%s is not a floating-point type" %
scale_operator_pd.dtype)
self._scale_operator_pd = scale_operator_pd
self._df = ops.convert_to_tensor(
df, dtype=scale_operator_pd.dtype, name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator_pd))
if (self._scale_operator_pd.get_shape().ndims is None or
self._scale_operator_pd.get_shape()[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator_pd.vector_space_dimension(),
dtype=self._scale_operator_pd.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator_pd.get_shape()[-1].value,
dtype=self._scale_operator_pd.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape: df_val = (df_val,)
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than dimension of "
"scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies([assertions], self._df)
super(_WishartOperatorPD, self).__init__(
dtype=self._scale_operator_pd.dtype,
parameters={"df": self._df,
"scale_operator_pd": self._scale_operator_pd,
"dimension": self._dimension},
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=True,
name=ns)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator_pd.sqrt_to_dense()
else:
return self.scale_operator_pd.to_dense()
@property
def scale_operator_pd(self):
"""Wishart distribution scale matrix as an OperatorPD."""
return self._scale_operator_pd
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape(self):
s = self.scale_operator_pd.shape()
return array_ops.slice(s, array_ops.shape(s) - 2, [2])
def _get_event_shape(self):
return self.scale_operator_pd.get_shape()[-2:]
def _batch_shape(self):
return self.scale_operator_pd.batch_shape()
def _get_batch_shape(self):
return self.scale_operator_pd.get_batch_shape()
def _sample_n(self, n, seed):
batch_shape = self.batch_shape()
event_shape = self.event_shape()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat(0, ((n,), batch_shape, event_shape))
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=(n,),
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk^2)
x = array_ops.batch_matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.batch_matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk^2)
perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
x = array_ops.transpose(x, perm)
shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
x = array_ops.reshape(x, shape)
perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.batch_matmul(x, x, adj_y=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape()
event_shape = self.event_shape()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk^2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat(0, (math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
0, (batch_shape,
(math_ops.cast(self.dimension, dtype=dtypes.int32), -1)))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for OperatorPDDiag, each solve is O(k), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each solve is
# O(k^2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator_pd.sqrt_solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, sample_shape))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat(0, (math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}^2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk^2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
reduction_indices=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.batch_matrix_diag_part(x_sqrt)),
reduction_indices=[-1])
# Complexity: O(nbk^2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalizing_constant())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.get_batch_shape().ndims is not None and
self.get_batch_shape().ndims > 0):
log_prob.get_shape()[-self.get_batch_shape().ndims:].merge_with(
self.get_batch_shape())
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
half_dp1 * self.scale_operator_pd.log_det() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return math_ops.sqrt(self.df) * self.scale_operator_pd.sqrt_to_dense()
return self.df * self.scale_operator_pd.to_dense()
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.batch_matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _std(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
self.scale_operator_pd.log_det())
def log_normalizing_constant(self, name="log_normalizing_constant"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator_pd.sqrt_log_det() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
reduction_indices=(-1,)))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
reduction_indices=(-1,))
class WishartCholesky(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.cholesky(...) # Shape is [3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.cholesky(...) # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.batch_matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
super(WishartCholesky, self).__init__(
df=df,
scale_operator_pd=operator_pd_cholesky.OperatorPDCholesky(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
class WishartFull(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.batch_matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
super(WishartFull, self).__init__(
df=df,
scale_operator_pd=operator_pd_full.OperatorPDFull(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
|
the-stack_0_4406 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 11:05:24 2017
The oil and sugar separation (pretreatment) section for the baseline lipid cane biorefinery is defined here as System objects. The systems include all streams and units starting from enzyme treatment to purification of the sugar solution and the oil stream.
@author: Yoel
"""
import numpy as np
from biosteam import System, Stream
from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, \
HXutility, RVF, SplitFlash, VibratingScreen, \
MagneticSeparator, Clarifier, MixTank, \
Shredder, ConveyingBelt, Splitter, \
SplitCentrifuge_LLE, Pump, StorageTank
from biorefineries.lipidcane.species import pretreatment_species
from biorefineries.lipidcane.process_settings import price
__all__ = ('pretreatment_sys', 'lipid_cane', 'lipidcane', 'area_100', 'area_200')
# %% Species
Stream.species = pretreatment_species
psp = ('Ash', 'CaO', 'Cellulose', 'Ethanol', 'Flocculant',
'Glucose', 'Hemicellulose', 'Lignin', 'Lipid',
'Solids', 'H3PO4', 'Sucrose', 'Water')
psp1 = ('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Lipid', 'Solids', 'Sucrose', 'Water')
psp2 = ('Ash', 'CaO', 'Cellulose', 'Flocculant', 'Glucose',
'Hemicellulose', 'Lignin', 'Lipid',
'H3PO4', 'Sucrose', 'Water')
# %% Streams
f1 = (2000.042, 26986.69 , 2007.067, 15922.734, 14459.241,
10035.334, 5017.667, 22746.761, 234157.798)
lipidcane = lipid_cane = Stream('lipid_cane', f1, psp1, units='kg/hr',
price=price['Lipid cane'])
enzyme = Stream('enzyme', Cellulose=100, Water=900, units='kg/hr',
price=price['Protease'])
imbibition_water = Stream('imbibition_water',
Water=87023.35,
T = 338.15, units='kg/hr')
H3PO4 = Stream('H3PO4', H3PO4=74.23, Water=13.10, units='kg/hr',
price=price['H3PO4']) # to T203
lime = Stream('lime', CaO=333.00, Water=2200.00, units='kg/hr',
price=price['Lime']) # to P5
polymer = Stream('polymer', Flocculant=0.83, units='kg/hr',
price=price['Polymer']) # to T205
rvf_wash_water = Stream('rvf_wash_water',
Water=16770, units='kg/hr',
T=363.15) # to C202
oil_wash_water = Stream('oil_wash_water',
Water=1350, units='kg/hr',
T=358.15) # to T207
# %% Units
Stream.default_ID = 'd'
Stream.default_ID_number = 0
# Stream.default_ID_number = 100
# Feed the shredder
U101 = ConveyingBelt('U101', ins=lipid_cane)
U101.cost_items['Conveying belt'].ub = 2500
# Separate metals
U102 = MagneticSeparator('U102', ins=U101.outs)
# Shredded cane
U103 = Shredder('U103', ins=U102.outs)
# Stream.default_ID_number = 200
# Hydrolyze starch
T201 = EnzymeTreatment('T201', T=323.15) # T=50
# Finely crush lipid cane
U201 = CrushingMill('U201',
split=(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1),
order=('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Sucrose', 'Lipid', 'Solids'),
moisture_content=0.5)
# Convey out bagasse
U202 = ConveyingBelt('U202', ins=U201.outs[0], outs='Bagasse')
# Mix in water
M201 = Mixer('M201')
# Screen out fibers
S201 = VibratingScreen('S201',
split=(0.35, 0.35, 0.88, 0.35,
0.35, 0.88, 0, 0.88, 0.88),
order=psp1)
# Store juice before treatment
T202 = StorageTank('T202')
T202.tau = 12
# Heat up before adding acid
H201 = HXutility('H201', T=343.15)
# Mix in acid
T203 = MixTank('T203')
# Pump acid solution
P201 = Pump('P201')
# Mix lime solution
T204 = MixTank('T204')
T204.tau = 1
P202 = Pump('P202')
# Blend acid lipid solution with lime
T205 = MixTank('T205')
# Mix recycle
M202 = Mixer('M202')
# Heat before adding flocculant
H202 = HXutility('H202', T=372.15)
# Mix in flocculant
T206 = MixTank('T206')
T206.tau = 1/4
# Separate residual solids
C201 = Clarifier('C201',
split=(0, 0, 0, 0.522, 0.522, 0, 0,
0.98, 0.522, 0.522, 0.522),
order=psp2)
# Remove solids as filter cake
C202 = RVF('C202',
outs=('filte_cake', ''),
moisture_content=0.80,
split=(0.85, 0.85, 0.85, 0.01, 0.85, 0.85, 0.01),
order=('Ash', 'CaO', 'Cellulose', 'Glucose',
'Hemicellulose', 'Lignin', 'Sucrose'))
P203 = Pump('P203')
# Separate oil and sugar
T207 = MixTank('T207', outs=('', ''))
split = np.zeros(len(pretreatment_species), float)
index = pretreatment_species.indices(('Lipid', 'Water'))
split[index] = (1, 0.0001)
T207._split = split
T207._run = lambda : Splitter._run(T207)
del split, index
# Cool the oil
H203 = HXutility('H203', T=343.15)
# Screen out small fibers from sugar stream
S202 = VibratingScreen('S202', outs=('', 'fiber_fines'),
split=1-np.array((0, 0, 0, 1, 0.002, 0, 0,0, 0, 0.002, 0.002)),
order=psp2)
sugar = S202-0
S202.mesh_opening = 2
# Add distilled water to wash lipid
T208 = MixTank('T208')
T208.tau = 2
# Centrifuge out water
C203 = SplitCentrifuge_LLE('C203',
split=(0.99, 0.01),
order=('Lipid', 'Water'))
# Vacume out water
F201 = SplitFlash('F201', T=347.15, P=2026.5,
split=(0.0001, 0.999), order=('Lipid', 'Water'))
lipid = F201.outs[1]
# %% Process specifications
# Specifications dependent on lipid cane flow rate
_enzyme_mass = enzyme.mass[[9, 12]]
_CaO_Water_mass = lime.mass[[7, 12]]
_H3PO4_Water_mass = H3PO4.mass[[1, 12]]
last_lipidcane_massnet = int(lipid_cane.massnet)
def correct_flows():
global last_lipidcane_massnet
massnet = lipid_cane.massnet
if int(massnet) != last_lipidcane_massnet:
# correct enzyme, lime, phosphoric acid, and imbibition water
_enzyme_mass[:] = 0.003 * massnet * np.array([0.1, 0.9])
_CaO_Water_mass[:] = 0.001 * massnet * np.array([0.046, 0.954])
_H3PO4_Water_mass[:] = 0.00025 * massnet
imbibition_water_mass.value = 0.25* massnet
last_lipidcane_massnet = int(massnet)
# Specifications within a system
def correct_lipid_wash_water():
oil_wash_water.mol[12] = H202.outs[0].mol[-2]*100/11
solids_index = Stream.indices(['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin'])
def correct_wash_water():
solids = solidsmol[solids_index].sum()
rvf_wash_water.mol[12] = 0.0574*solids
imbibition_water_mass = imbibition_water.mass.item(12)
# %% Pretreatment system set-up
(U103-0, enzyme)-T201
(T201-0, M201-0)-U201-1-S201-0-T202
(S201-1, imbibition_water)-M201
crushing_mill_recycle_sys = System('crushing_mill_recycle_sys',
network=(U201, S201, M201),
recycle=M201-0)
T202-0-H201
(H201-0, H3PO4)-T203-P201
(P201-0, lime-T204-0)-T205-P202
(P202-0, P203-0)-M202-H202
(H202-0, polymer)-T206-C201
(C201-1, rvf_wash_water)-C202-1-P203
clarification_recycle_sys = System('clarification_recycle_sys',
network=(M202, H202, T206, C201, C202, P203),
recycle=C202-1)
C201-0-T207-0-H203
(H203-0, oil_wash_water)-T208-C203-0-F201
T207-1-S202
pretreatment_sys = System('pretreatment_sys',
network=(U101, U102, U103,
correct_flows, T201,
crushing_mill_recycle_sys,
U202, T202, H201, T203,
P201, T204, T205, P202,
correct_wash_water,
clarification_recycle_sys,
T207, H203, S202,
correct_lipid_wash_water,
T208, C203, F201,))
solidsmol = P202.outs[0].mol
area_100 = System('area_100', network=(U101, U102, U103))
units = pretreatment_sys.units.copy()
for i in area_100.network: units.discard(i)
area_200_network = sorted(units, key=lambda x: x.ID)
area_200 = System('area_200', network=area_200_network)
|
the-stack_0_4409 | #!/usr/bin/python3
class Rectangle():
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
self.height = height
self.width = width
Rectangle.number_of_instances += 1
def area(self):
return self.__height * self.__width
def perimeter(self):
if self.width == 0 or self.height == 0:
Perimeter = 0
else:
Perimeter = self.__height * 2 + self.__width * 2
return Perimeter
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise TypeError('width must be >= 0')
self.__width = value
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise TypeError('height must be >= 0')
self.__height = value
def __str__(self):
rectangle_string = ""
if self.__width == 0 or self.__height == 0:
return rectangle_string
for row in range(self.__height):
for column in range(self.__width):
rectangle_string += str(self.print_symbol)
if row != self.__height:
rectangle_string += "\n"
rectangle_string = rectangle_string[:-1]
str(column)
return rectangle_string
def __repr__(self):
return 'Rectangle({}, {})'.format(self.width, self.height)
def __del__(self):
print('Bye rectangle...')
Rectangle.number_of_instances -= 1
|
the-stack_0_4411 | # -*- coding: utf-8 -*-
"""
Interpolation
=============
Defines the classes and definitions for interpolating variables.
- :class:`colour.KernelInterpolator`: 1-D function generic interpolation with
arbitrary kernel.
- :class:`colour.NearestNeighbourInterpolator`: 1-D function
nearest-neighbour interpolation.
- :class:`colour.LinearInterpolator`: 1-D function linear interpolation.
- :class:`colour.SpragueInterpolator`: 1-D function fifth-order polynomial
interpolation using *Sprague (1880)* method.
- :class:`colour.CubicSplineInterpolator`: 1-D function cubic spline
interpolation.
- :class:`colour.PchipInterpolator`: 1-D function piecewise cube Hermite
interpolation.
- :class:`colour.NullInterpolator`: 1-D function null interpolation.
- :func:`colour.lagrange_coefficients`: Computation of
*Lagrange Coefficients*.
- :func:`colour.algebra.table_interpolation_trilinear`: Trilinear
interpolation with table.
- :func:`colour.algebra.table_interpolation_tetrahedral`: Tetrahedral
interpolation with table.
- :attr:`colour.TABLE_INTERPOLATION_METHODS`: Supported table interpolation
methods.
- :func:`colour.table_interpolation`: Interpolation with table using given
method.
References
----------
- :cite:`Bourkeb` : Bourke, P. (n.d.). Trilinear Interpolation. Retrieved
January 13, 2018, from http://paulbourke.net/miscellaneous/interpolation/
- :cite:`Burger2009b` : Burger, W., & Burge, M. J. (2009). Principles of
Digital Image Processing. Springer London. doi:10.1007/978-1-84800-195-4
- :cite:`CIETC1-382005f` : CIE TC 1-38. (2005). 9.2.4 Method of
interpolation for uniformly spaced independent variable. In CIE 167:2005
Recommended Practice for Tabulating Spectral Data for Use in Colour
Computations (pp. 1-27). ISBN:978-3-901906-41-1
- :cite:`CIETC1-382005h` : CIE TC 1-38. (2005). Table V. Values of the
c-coefficients of Equ.s 6 and 7. In CIE 167:2005 Recommended Practice for
Tabulating Spectral Data for Use in Colour Computations (p. 19).
ISBN:978-3-901906-41-1
- :cite:`Fairman1985b` : Fairman, H. S. (1985). The calculation of weight
factors for tristimulus integration. Color Research & Application, 10(4),
199-203. doi:10.1002/col.5080100407
- :cite:`Kirk2006` : Kirk, R. (2006). Truelight Software Library 2.0.
Retrieved July 8, 2017, from
https://www.filmlight.ltd.uk/pdf/whitepapers/FL-TL-TN-0057-SoftwareLib.pdf
- :cite:`Westland2012h` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
Interpolation Methods. In Computational Colour Science Using MATLAB (2nd
ed., pp. 29-37). ISBN:978-0-470-66569-5
- :cite:`Wikipedia2003a` : Wikipedia. (2003). Lagrange polynomial -
Definition. Retrieved January 20, 2016, from
https://en.wikipedia.org/wiki/Lagrange_polynomial#Definition
- :cite:`Wikipedia2005b` : Wikipedia. (2005). Lanczos resampling. Retrieved
October 14, 2017, from https://en.wikipedia.org/wiki/Lanczos_resampling
"""
import itertools
import numpy as np
import scipy.interpolate
from collections import OrderedDict
from collections.abc import Mapping
from functools import reduce
from colour.constants import DEFAULT_FLOAT_DTYPE, DEFAULT_INT_DTYPE
from colour.utilities import (
CaseInsensitiveMapping, as_float_array, as_float, closest_indexes,
interval, is_integer, is_numeric, runtime_warning, tsplit, validate_method)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'kernel_nearest_neighbour', 'kernel_linear', 'kernel_sinc',
'kernel_lanczos', 'kernel_cardinal_spline', 'KernelInterpolator',
'NearestNeighbourInterpolator', 'LinearInterpolator',
'SpragueInterpolator', 'CubicSplineInterpolator', 'PchipInterpolator',
'NullInterpolator', 'lagrange_coefficients',
'vertices_and_relative_coordinates', 'table_interpolation_trilinear',
'table_interpolation_tetrahedral', 'TABLE_INTERPOLATION_METHODS',
'table_interpolation'
]
def kernel_nearest_neighbour(x):
"""
Returns the *nearest-neighbour* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *nearest-neighbour* kernel.
Returns
-------
ndarray
The *nearest-neighbour* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_nearest_neighbour(np.linspace(0, 1, 10))
array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
"""
return np.where(np.abs(x) < 0.5, 1, 0)
def kernel_linear(x):
"""
Returns the *linear* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *linear* kernel.
Returns
-------
ndarray
The *linear* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_linear(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1. , 0.8888888..., 0.7777777..., \
0.6666666..., 0.5555555...,
0.4444444..., 0.3333333..., 0.2222222..., \
0.1111111..., 0. ])
"""
return np.where(np.abs(x) < 1, 1 - np.abs(x), 0)
def kernel_sinc(x, a=3):
"""
Returns the *sinc* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *sinc* kernel.
a : int, optional
Size of the *sinc* kernel.
Returns
-------
ndarray
The *sinc* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_sinc(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1.0000000...e+00, 9.7981553...e-01, 9.2072542...e-01,
8.2699334...e-01, 7.0531659...e-01, 5.6425327...e-01,
4.1349667...e-01, 2.6306440...e-01, 1.2247694...e-01,
3.8981718...e-17])
"""
assert a >= 1, '"a" must be equal or superior to 1!'
return np.where(np.abs(x) < a, np.sinc(x), 0)
def kernel_lanczos(x, a=3):
"""
Returns the *lanczos* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *lanczos* kernel.
a : int, optional
Size of the *lanczos* kernel.
Returns
-------
ndarray
The *lanczos* kernel evaluated at given samples.
References
----------
:cite:`Wikipedia2005b`
Examples
--------
>>> kernel_lanczos(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1.0000000...e+00, 9.7760615...e-01, 9.1243770...e-01,
8.1030092...e-01, 6.8012706...e-01, 5.3295773...e-01,
3.8071690...e-01, 2.3492839...e-01, 1.0554054...e-01,
3.2237621...e-17])
"""
assert a >= 1, '"a" must be equal or superior to 1!'
return np.where(np.abs(x) < a, np.sinc(x) * np.sinc(x / a), 0)
def kernel_cardinal_spline(x, a=0.5, b=0.0):
"""
Returns the *cardinal spline* kernel evaluated at given samples.
Notable *cardinal spline* :math:`a` and :math:`b` parameterizations:
- *Catmull-Rom*: :math:`(a=0.5, b=0)`
- *Cubic B-Spline*: :math:`(a=0, b=1)`
- *Mitchell-Netravalli*: :math:`(a=\\cfrac{1}{3}, b=\\cfrac{1}{3})`
Parameters
----------
x : array_like
Samples at which to evaluate the *cardinal spline* kernel.
a : int, optional
:math:`a` control parameter.
b : int, optional
:math:`b` control parameter.
Returns
-------
ndarray
The *cardinal spline* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_cardinal_spline(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1. , 0.9711934..., 0.8930041..., \
0.7777777..., 0.6378600...,
0.4855967..., 0.3333333..., 0.1934156..., \
0.0781893..., 0. ])
"""
x_abs = np.abs(x)
y = np.where(
x_abs < 1,
(-6 * a - 9 * b + 12) * x_abs ** 3 + (6 * a + 12 * b - 18) * x_abs ** 2
- 2 * b + 6,
(-6 * a - b) * x_abs ** 3 + (30 * a + 6 * b) * x_abs ** 2 +
(-48 * a - 12 * b) * x_abs + 24 * a + 8 * b,
)
y[x_abs >= 2] = 0
return 1 / 6 * y
class KernelInterpolator:
"""
Kernel based interpolation of a 1-D function.
The reconstruction of a continuous signal can be described as a linear
convolution operation. Interpolation can be expressed as a convolution of
the given discrete function :math:`g(x)` with some continuous interpolation
kernel :math:`k(w)`:
:math:`\\hat{g}(w_0) = [k * g](w_0) = \
\\sum_{x=-\\infty}^{\\infty}k(w_0 - x)\\cdot g(x)`
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
window : int, optional
Width of the window in samples on each side.
kernel : callable, optional
Kernel to use for interpolation.
kernel_kwargs : dict, optional
Arguments to use when calling the kernel.
padding_kwargs : dict, optional
Arguments to use when padding :math:`y` variable values with the
:func:`np.pad` definition.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.KernelInterpolator.x`
- :attr:`~colour.KernelInterpolator.y`
- :attr:`~colour.KernelInterpolator.window`
- :attr:`~colour.KernelInterpolator.kernel`
- :attr:`~colour.KernelInterpolator.kernel_kwargs`
- :attr:`~colour.KernelInterpolator.padding_kwargs`
Methods
-------
- :meth:`~colour.KernelInterpolator.__init__`
- :meth:`~colour.KernelInterpolator.__call__`
References
----------
:cite:`Burger2009b`, :cite:`Wikipedia2005b`
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = KernelInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
6.9411400...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.1806208..., 8.0823848...])
Using a different *lanczos* kernel:
>>> f = KernelInterpolator(x, y, kernel=kernel_sinc)
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.5147317..., 8.3965466...])
Using a different window size:
>>> f = KernelInterpolator(
... x,
... y,
... window=16,
... kernel=kernel_lanczos,
... kernel_kwargs={'a': 16})
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 5.3961792..., 5.6521093...])
"""
def __init__(self,
x,
y,
window=3,
kernel=kernel_lanczos,
kernel_kwargs=None,
padding_kwargs=None,
dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x_p = None
self._y_p = None
self._x = None
self._y = None
self._window = None
self._padding_kwargs = {
'pad_width': (window, window),
'mode': 'reflect'
}
self._dtype = dtype
self.x = x
self.y = y
self.window = window
self.padding_kwargs = padding_kwargs
self._kernel = None
self.kernel = kernel
self._kernel_kwargs = {}
self.kernel_kwargs = kernel_kwargs
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
value_interval = interval(value)
if value_interval.size != 1:
runtime_warning('"x" independent variable is not uniform, '
'unpredictable results may occur!')
self._x = value
if self._window is not None:
self._x_p = np.pad(
self._x, (self._window, self._window),
'linear_ramp',
end_values=(
np.min(self._x) - self._window * value_interval[0],
np.max(self._x) + self._window * value_interval[0]))
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
if self._window is not None:
self._y_p = np.pad(self._y, **self._padding_kwargs)
@property
def window(self):
"""
Getter and setter property for the window.
Parameters
----------
value : int
Value to set the window with.
Returns
-------
int
Window.
"""
return self._window
@window.setter
def window(self, value):
"""
Setter for the **self.window** property.
"""
if value is not None:
assert is_integer(value), '"window" must be an integer!'
assert value >= 1, (
'"window" must be equal to or or greater than 1!')
self._window = value
# Triggering "self._x_p" update.
if self._x is not None:
self.x = self._x
# Triggering "self._y_p" update.
if self._y is not None:
self.y = self._y
@property
def kernel(self):
"""
Getter and setter property for the kernel callable.
Parameters
----------
value : callable
Value to set the kernel callable.
Returns
-------
callable
Kernel callable.
"""
return self._kernel
@kernel.setter
def kernel(self, value):
"""
Setter for the **self.kernel** property.
"""
if value is not None:
assert hasattr(
value,
'__call__'), ('"{0}" attribute: "{1}" is not callable!'.format(
'kernel', value))
self._kernel = value
@property
def kernel_kwargs(self):
"""
Getter and setter property for the kernel call time arguments.
Parameters
----------
value : dict
Value to call the interpolation kernel with.
Returns
-------
dict
Kernel call time arguments.
"""
return self._kernel_kwargs
@kernel_kwargs.setter
def kernel_kwargs(self, value):
"""
Setter for the **self.kernel_kwargs** property.
"""
if value is not None:
assert isinstance(value, (dict, OrderedDict)), (
'"{0}" attribute: "{1}" type is not "dict" or "OrderedDict"!'
).format('kernel_kwargs', value)
self._kernel_kwargs = value
@property
def padding_kwargs(self):
"""
Getter and setter property for the kernel call time arguments.
Parameters
----------
value : dict
Value to call the interpolation kernel with.
Returns
-------
dict
Kernel call time arguments.
"""
return self._padding_kwargs
@padding_kwargs.setter
def padding_kwargs(self, value):
"""
Setter for the **self.padding_kwargs** property.
"""
if value is not None:
assert isinstance(value, Mapping), (
'"{0}" attribute: "{1}" type is not a "Mapping" instance!'
).format('padding_kwargs', value)
self._padding_kwargs = value
# Triggering "self._y_p" update.
if self._y is not None:
self.y = self._y
def __call__(self, x):
"""
Evaluates the interpolator at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolator evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
x_interval = interval(self._x)[0]
x_f = np.floor(x / x_interval)
windows = (x_f[:, np.newaxis] + np.arange(-self._window + 1,
self._window + 1))
clip_l = min(self._x_p) / x_interval
clip_h = max(self._x_p) / x_interval
windows = np.clip(windows, clip_l, clip_h) - clip_l
windows = np.around(windows).astype(DEFAULT_INT_DTYPE)
return np.sum(
self._y_p[windows] * self._kernel(
x[:, np.newaxis] / x_interval - windows -
min(self._x_p) / x_interval, **self._kernel_kwargs),
axis=-1)
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class NearestNeighbourInterpolator(KernelInterpolator):
"""
A nearest-neighbour interpolator.
Other Parameters
----------------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
window : int, optional
Width of the window in samples on each side.
padding_kwargs : dict, optional
Arguments to use when padding :math:`y` variable values with the
:func:`np.pad` definition.
dtype : type
Data type used for internal conversions.
Methods
-------
- :meth:`~colour.NearestNeighbourInterpolator.__init__`
"""
def __init__(self, *args, **kwargs):
kwargs['kernel'] = kernel_nearest_neighbour
if 'kernel_kwargs' in kwargs:
del kwargs['kernel_kwargs']
super(NearestNeighbourInterpolator, self).__init__(*args, **kwargs)
class LinearInterpolator:
"""
Linearly interpolates a 1-D function.
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.LinearInterpolator.x`
- :attr:`~colour.LinearInterpolator.y`
Methods
-------
- :meth:`~colour.LinearInterpolator.__init__`
- :meth:`~colour.LinearInterpolator.__call__`
Notes
-----
- This class is a wrapper around *numpy.interp* definition.
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = LinearInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
7.64...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75])
array([ 6.7825, 8.5075])
"""
def __init__(self, x, y, dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x = None
self._y = None
self._dtype = dtype
self.x = x
self.y = y
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
def __call__(self, x):
"""
Evaluates the interpolating polynomial at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolating polynomial evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
return np.interp(x, self._x, self._y)
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class SpragueInterpolator:
"""
Constructs a fifth-order polynomial that passes through :math:`y` dependent
variable.
*Sprague (1880)* method is recommended by the *CIE* for interpolating
functions having a uniformly spaced independent variable.
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.SpragueInterpolator.x`
- :attr:`~colour.SpragueInterpolator.y`
Methods
-------
- :meth:`~colour.SpragueInterpolator.__init__`
- :meth:`~colour.SpragueInterpolator.__call__`
Notes
-----
- The minimum number :math:`k` of data points required along the
interpolation axis is :math:`k=6`.
References
----------
:cite:`CIETC1-382005f`, :cite:`Westland2012h`
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = SpragueInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
7.2185025...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.7295161..., 7.8140625...])
"""
SPRAGUE_C_COEFFICIENTS = np.array([
[884, -1960, 3033, -2648, 1080, -180],
[508, -540, 488, -367, 144, -24],
[-24, 144, -367, 488, -540, 508],
[-180, 1080, -2648, 3033, -1960, 884],
])
"""
Defines the coefficients used to generate extra points for boundaries
interpolation.
SPRAGUE_C_COEFFICIENTS : array_like, (4, 6)
References
----------
:cite:`CIETC1-382005h`
"""
def __init__(self, x, y, dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._xp = None
self._yp = None
self._x = None
self._y = None
self._dtype = dtype
self.x = x
self.y = y
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
value_interval = interval(value)[0]
xp1 = value[0] - value_interval * 2
xp2 = value[0] - value_interval
xp3 = value[-1] + value_interval
xp4 = value[-1] + value_interval * 2
self._xp = np.concatenate(((xp1, xp2), value, (xp3, xp4)))
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
assert len(value) >= 6, (
'"y" dependent variable values count must be equal to or '
'greater than 6!')
yp1 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[0],
np.array(value[0:6]).reshape([6, 1]))) / 209)[0]
yp2 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[1],
np.array(value[0:6]).reshape([6, 1]))) / 209)[0]
yp3 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[2],
np.array(value[-6:]).reshape([6, 1]))) / 209)[0]
yp4 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[3],
np.array(value[-6:]).reshape([6, 1]))) / 209)[0]
self._yp = np.concatenate(((yp1, yp2), value, (yp3, yp4)))
self._y = value
def __call__(self, x):
"""
Evaluates the interpolating polynomial at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
numeric or ndarray
Interpolated value(s).
"""
return self._evaluate(x)
def _evaluate(self, x):
"""
Performs the interpolating polynomial evaluation at given point.
Parameters
----------
x : numeric
Point to evaluate the interpolant at.
Returns
-------
float
Interpolated point values.
"""
x = as_float_array(x)
self._validate_dimensions()
self._validate_interpolation_range(x)
i = np.searchsorted(self._xp, x) - 1
X = (x - self._xp[i]) / (self._xp[i + 1] - self._xp[i])
r = self._yp
a0p = r[i]
a1p = ((2 * r[i - 2] - 16 * r[i - 1] + 16 * r[i + 1] -
2 * r[i + 2]) / 24) # yapf: disable
a2p = ((-r[i - 2] + 16 * r[i - 1] - 30 * r[i] + 16 * r[i + 1] -
r[i + 2]) / 24) # yapf: disable
a3p = ((-9 * r[i - 2] + 39 * r[i - 1] - 70 * r[i] + 66 * r[i + 1] -
33 * r[i + 2] + 7 * r[i + 3]) / 24)
a4p = ((13 * r[i - 2] - 64 * r[i - 1] + 126 * r[i] - 124 * r[i + 1] +
61 * r[i + 2] - 12 * r[i + 3]) / 24)
a5p = ((-5 * r[i - 2] + 25 * r[i - 1] - 50 * r[i] + 50 * r[i + 1] -
25 * r[i + 2] + 5 * r[i + 3]) / 24)
y = (a0p + a1p * X + a2p * X ** 2 + a3p * X ** 3 + a4p * X ** 4 +
a5p * X ** 5)
return y
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class CubicSplineInterpolator(scipy.interpolate.interp1d):
"""
Interpolates a 1-D function using cubic spline interpolation.
Methods
-------
- :meth:`~colour.CubicSplineInterpolator.__init__`
Notes
-----
- This class is a wrapper around *scipy.interpolate.interp1d* class.
"""
def __init__(self, *args, **kwargs):
super(CubicSplineInterpolator, self).__init__(
kind='cubic', *args, **kwargs)
class PchipInterpolator(scipy.interpolate.PchipInterpolator):
"""
Interpolates a 1-D function using Piecewise Cubic Hermite Interpolating
Polynomial interpolation.
Attributes
----------
- :attr:`~colour.PchipInterpolator.y`
Methods
-------
- :meth:`~colour.PchipInterpolator.__init__`
Notes
-----
- This class is a wrapper around *scipy.interpolate.PchipInterpolator*
class.
"""
def __init__(self, x, y, *args, **kwargs):
super(PchipInterpolator, self).__init__(x, y, *args, **kwargs)
self._y = y
@property
def y(self):
"""
Getter property for the dependent and already known :math:`y`
variable.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
class NullInterpolator:
"""
Performs 1-D function null interpolation, i.e. a call within given
tolerances will return existing :math:`y` variable values and ``default``
if outside tolerances.
Parameters
----------
x : ndarray
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : ndarray
Dependent and already known :math:`y` variable values to
interpolate.
absolute_tolerance : numeric, optional
Absolute tolerance.
relative_tolerance : numeric, optional
Relative tolerance.
default : numeric, optional
Default value for interpolation outside tolerances.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.NullInterpolator.x`
- :attr:`~colour.NullInterpolator.y`
- :attr:`~colour.NullInterpolator.relative_tolerance`
- :attr:`~colour.NullInterpolator.absolute_tolerance`
- :attr:`~colour.NullInterpolator.default`
Methods
-------
- :meth:`~colour.NullInterpolator.__init__`
- :meth:`~colour.NullInterpolator.__call__`
Examples
--------
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = NullInterpolator(x, y)
>>> f(0.5)
nan
>>> f(1.0) # doctest: +ELLIPSIS
9.3699999...
>>> f = NullInterpolator(x, y, absolute_tolerance=0.01)
>>> f(1.01) # doctest: +ELLIPSIS
9.3699999...
"""
def __init__(self,
x,
y,
absolute_tolerance=10e-7,
relative_tolerance=10e-7,
default=np.nan,
dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x = None
self._y = None
self._absolute_tolerance = None
self._relative_tolerance = None
self._default = None
self._dtype = dtype
self.x = x
self.y = y
self.absolute_tolerance = absolute_tolerance
self.relative_tolerance = relative_tolerance
self.default = default
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
@property
def relative_tolerance(self):
"""
Getter and setter property for the relative tolerance.
Parameters
----------
value : numeric
Value to set the relative tolerance with.
Returns
-------
numeric
Relative tolerance.
"""
return self._relative_tolerance
@relative_tolerance.setter
def relative_tolerance(self, value):
"""
Setter for the **self.relative_tolerance** property.
"""
if value is not None:
assert is_numeric(value), (
'"relative_tolerance" variable must be a "numeric"!')
self._relative_tolerance = value
@property
def absolute_tolerance(self):
"""
Getter and setter property for the absolute tolerance.
Parameters
----------
value : numeric
Value to set the absolute tolerance with.
Returns
-------
numeric
Absolute tolerance.
"""
return self._absolute_tolerance
@absolute_tolerance.setter
def absolute_tolerance(self, value):
"""
Setter for the **self.absolute_tolerance** property.
"""
if value is not None:
assert is_numeric(value), (
'"absolute_tolerance" variable must be a "numeric"!')
self._absolute_tolerance = value
@property
def default(self):
"""
Getter and setter property for the default value for call outside
tolerances.
Parameters
----------
value : numeric
Value to set the default value with.
Returns
-------
numeric
Default value.
"""
return self._default
@default.setter
def default(self, value):
"""
Setter for the **self.default** property.
"""
if value is not None:
assert is_numeric(value), (
'"default" variable must be a "numeric"!')
self._default = value
def __call__(self, x):
"""
Evaluates the interpolator at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolator evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
indexes = closest_indexes(self._x, x)
values = self._y[indexes]
values[~np.isclose(
self._x[indexes],
x,
rtol=self._absolute_tolerance,
atol=self._relative_tolerance)] = self._default
return values
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
def lagrange_coefficients(r, n=4):
"""
Computes the *Lagrange Coefficients* at given point :math:`r` for degree
:math:`n`.
Parameters
----------
r : numeric
Point to get the *Lagrange Coefficients* at.
n : int, optional
Degree of the *Lagrange Coefficients* being calculated.
Returns
-------
ndarray
References
----------
:cite:`Fairman1985b`, :cite:`Wikipedia2003a`
Examples
--------
>>> lagrange_coefficients(0.1)
array([ 0.8265, 0.2755, -0.1305, 0.0285])
"""
r_i = np.arange(n)
L_n = []
for j in range(len(r_i)):
basis = [(r - r_i[i]) / (r_i[j] - r_i[i]) for i in range(len(r_i))
if i != j]
L_n.append(reduce(lambda x, y: x * y, basis)) # noqa
return np.array(L_n)
def vertices_and_relative_coordinates(V_xyz, table):
"""
Computes the vertices coordinates and indexes relative :math:`V_{xyzr}`
coordinates from given :math:`V_{xyzr}` values and interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to transform to indexes relative
:math:`V_{xyzr}` values.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
tuple
Vertices coordinates and indexes relative :math:`V_{xyzr}` coordinates.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
>>> print(vertices)
[[[ 0.833311 0.833311 0.833311]
[ 0.349416 0.657749 0.041083]
[ 0.797894 -0.035412 -0.035412]]
<BLANKLINE>
[[ 0.833311 0.833311 1.249963]
[ 0.340435 0.743769 0.340435]
[ 0.752767 -0.028479 0.362144]]
<BLANKLINE>
[[ 0.707102 1.110435 0.707102]
[ 0.344991 1.050213 -0.007621]
[ 0.633333 0.316667 0. ]]
<BLANKLINE>
[[ 0.519714 0.744729 0.744729]
[ 0.314204 1.120871 0.314204]
[ 0.732278 0.315626 0.315626]]
<BLANKLINE>
[[ 1.06561 0.648957 0.648957]
[ 0.589195 0.589195 0.139164]
[ 1.196841 -0.053117 -0.053117]]
<BLANKLINE>
[[ 1. 0.666667 1. ]
[ 0.594601 0.594601 0.369586]
[ 1.162588 -0.050372 0.353948]]
<BLANKLINE>
[[ 0.894606 0.894606 0.66959 ]
[ 0.663432 0.930188 0.12992 ]
[ 1.038439 0.310899 -0.05287 ]]
<BLANKLINE>
[[ 1.249966 1.249966 1.249966]
[ 0.682749 0.991082 0.374416]
[ 1.131225 0.29792 0.29792 ]]]
>>> print(V_xyzr) # doctest: +ELLIPSIS
[[ 0.9010895... 0.1444479... 0.9288233...]
[ 0.6416967... 0.0931864... 0.0186907...]
[ 0.9180530... 0.6482684... 0.7589470...]]
"""
V_xyz = np.clip(V_xyz, 0, 1)
table = as_float_array(table)
V_xyz = np.reshape(V_xyz, (-1, 3))
# Indexes computations where ``i_m`` is the maximum index value on a given
# table axis, ``i_f`` and ``i_c`` respectively the floor and ceiling
# indexes encompassing a given V_xyz value.
i_m = np.array(table.shape[0:-1]) - 1
i_f = np.floor(V_xyz * i_m).astype(DEFAULT_INT_DTYPE)
i_c = np.clip(i_f + 1, 0, i_m)
# Relative to indexes ``V_xyz`` values.
V_xyzr = i_m * V_xyz - i_f
i_f_c = i_f, i_c
# Vertices computations by indexing ``table`` with the ``i_f`` and ``i_c``
# indexes. 8 encompassing vertices are computed for a given V_xyz value
# forming a cube around it:
vertices = np.array([
table[i_f_c[i[0]][..., 0], i_f_c[i[1]][..., 1], i_f_c[i[2]][..., 2]]
for i in itertools.product(*zip([0, 0, 0], [1, 1, 1]))
])
return vertices, V_xyzr
def table_interpolation_trilinear(V_xyz, table):
"""
Performs trilinear interpolation of given :math:`V_{xyz}` values using
given interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Bourkeb`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation_trilinear(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0120664..., 0.7539146..., 1.0228540...],
[ 0.5075794..., 0.6479459..., 0.1066404...],
[ 1.0976519..., 0.1785998..., 0.2299897...]])
"""
V_xyz = as_float_array(V_xyz)
vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
vertices = np.moveaxis(vertices, 0, 1)
x, y, z = [f[:, np.newaxis] for f in tsplit(V_xyzr)]
weights = np.moveaxis(
np.transpose(
[(1 - x) * (1 - y) * (1 - z), (1 - x) * (1 - y) * z,
(1 - x) * y * (1 - z), (1 - x) * y * z, x * (1 - y) * (1 - z),
x * (1 - y) * z, x * y * (1 - z), x * y * z]), 0, -1)
xyz_o = np.reshape(np.sum(vertices * weights, 1), V_xyz.shape)
return xyz_o
def table_interpolation_tetrahedral(V_xyz, table):
"""
Performs tetrahedral interpolation of given :math:`V_{xyz}` values using
given interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Kirk2006`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation_tetrahedral(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0196197..., 0.7674062..., 1.0311751...],
[ 0.5105603..., 0.6466722..., 0.1077296...],
[ 1.1178206..., 0.1762039..., 0.2209534...]])
"""
V_xyz = as_float_array(V_xyz)
vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
vertices = np.moveaxis(vertices, 0, -1)
V000, V001, V010, V011, V100, V101, V110, V111 = tsplit(vertices)
x, y, z = [r[:, np.newaxis] for r in tsplit(V_xyzr)]
xyz_o = np.select([
np.logical_and(x > y, y > z),
np.logical_and(x > y, x > z),
np.logical_and(x > y, np.logical_and(y <= z, x <= z)),
np.logical_and(x <= y, z > y),
np.logical_and(x <= y, z > x),
np.logical_and(x <= y, np.logical_and(z <= y, z <= x)),
], [
(1 - x) * V000 + (x - y) * V100 + (y - z) * V110 + z * V111,
(1 - x) * V000 + (x - z) * V100 + (z - y) * V101 + y * V111,
(1 - z) * V000 + (z - x) * V001 + (x - y) * V101 + y * V111,
(1 - z) * V000 + (z - y) * V001 + (y - x) * V011 + x * V111,
(1 - y) * V000 + (y - z) * V010 + (z - x) * V011 + x * V111,
(1 - y) * V000 + (y - x) * V010 + (x - z) * V110 + z * V111,
])
xyz_o = np.reshape(xyz_o, V_xyz.shape)
return xyz_o
TABLE_INTERPOLATION_METHODS = CaseInsensitiveMapping({
'Trilinear': table_interpolation_trilinear,
'Tetrahedral': table_interpolation_tetrahedral,
})
TABLE_INTERPOLATION_METHODS.__doc__ = """
Supported table interpolation methods.
References
----------
:cite:`Bourkeb`, :cite:`Kirk2006`
TABLE_INTERPOLATION_METHODS : CaseInsensitiveMapping
**{'Trilinear', 'Tetrahedral'}**
"""
def table_interpolation(V_xyz, table, method='Trilinear'):
"""
Performs interpolation of given :math:`V_{xyz}` values using given
interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
method : unicode, optional
**{'Trilinear', 'Tetrahedral'}**,
Interpolation method.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Bourkeb`, :cite:`Kirk2006`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0120664..., 0.7539146..., 1.0228540...],
[ 0.5075794..., 0.6479459..., 0.1066404...],
[ 1.0976519..., 0.1785998..., 0.2299897...]])
>>> table_interpolation(V_xyz, table, method='Tetrahedral')
... # doctest: +ELLIPSIS
array([[ 1.0196197..., 0.7674062..., 1.0311751...],
[ 0.5105603..., 0.6466722..., 0.1077296...],
[ 1.1178206..., 0.1762039..., 0.2209534...]])
"""
method = validate_method(method, TABLE_INTERPOLATION_METHODS)
return TABLE_INTERPOLATION_METHODS[method](V_xyz, table)
|
the-stack_0_4413 | import os
import numpy as np
from shapely import geometry, affinity
from pyquaternion import Quaternion
from shapely.geometry import Point
from nuscenes.eval.detection.utils import category_to_detection_name
from nuscenes.eval.detection.constants import DETECTION_NAMES
from nuscenes.utils.data_classes import LidarPointCloud
import logging
from src.data.utils import transform_polygon, render_polygon, transform
import cv2
import time
CAMERA_NAMES = ['CAM_FRONT']
# CAMERA_NAMES = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT',
# 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', 'CAM_BACK']
NUSCENES_CLASS_NAMES = [
'drivable_area', 'ped_crossing', 'walkway', 'carpark', 'car', 'truck',
'bus', 'trailer', 'construction_vehicle', 'pedestrian', 'motorcycle',
'bicycle', 'traffic_cone', 'barrier'
]
# NUSCENES_CLASS_NAMES = [
# 'drivable_area', 'ped_crossing', 'walkway', 'carpark']
STATIC_CLASSES = ['drivable_area', 'ped_crossing', 'walkway', 'carpark_area']
LOCATIONS = ['boston-seaport', 'singapore-onenorth', 'singapore-queenstown',
'singapore-hollandvillage']
def iterate_samples(nuscenes, start_token):
sample_token = start_token
while sample_token != '':
sample = nuscenes.get('sample', sample_token)
yield sample
sample_token = sample['next']
def get_map_masks(nuscenes, map_data, sample_data, extents, resolution):
# Render each layer sequentially
layers = [get_layer_mask(nuscenes, polys, sample_data, extents,
resolution) for layer, polys in map_data.items()]
return np.stack(layers, axis=0)
def get_layer_mask(nuscenes, polygons, sample_data, extents, resolution):
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint8)
# Find all polygons which intersect with the area of interest
for polygon in polygons.query(map_patch):
polygon = polygon.intersection(map_patch)
# Transform into map coordinates
polygon = transform_polygon(polygon, inv_tfm)
# Render the polygon to the mask
render_shapely_polygon(mask, polygon, extents, resolution)
return mask.astype(np.bool)
def get_object_masks(nuscenes, sample_data, extents, resolution):
# Initialize object masks
nclass = len(DETECTION_NAMES) + 1
grid_width = int((extents[2] - extents[0]) / resolution)
grid_height = int((extents[3] - extents[1]) / resolution)
masks = np.zeros((nclass, grid_height, grid_width), dtype=np.uint8)
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
inv_tfm = np.linalg.inv(tfm)
obj_list=[]
for box in nuscenes.get_boxes(sample_data['token']):
# Get the index of the class
det_name = category_to_detection_name(box.name)
if det_name not in DETECTION_NAMES:
class_id = -1
else:
class_id = DETECTION_NAMES.index(det_name)
# Get bounding box coordinates in the grid coordinate frame
bbox = box.bottom_corners()[:2]
local_bbox = np.dot(inv_tfm[:2, :2], bbox).T + inv_tfm[:2, 2]
temp_ar = np.squeeze(np.zeros((9,1),np.float32))
temp_ar[:8] = np.float32(local_bbox).flatten()
temp_ar[-1] = class_id
obj_list.append(np.copy(temp_ar))
# Render the rotated bounding box to the mask
render_polygon(masks[class_id], local_bbox, extents, resolution)
return np.array(obj_list), masks
#
#def get_object_masks(nuscenes, sample_data, extents, resolution):
#
# # Initialize object masks
# nclass = len(DETECTION_NAMES) + 2
# grid_width = int((extents[2] - extents[0]) / resolution)
# grid_height = int((extents[3] - extents[1]) / resolution)
# masks = np.zeros((nclass, grid_height, grid_width), dtype=np.uint8)
#
# # Get the 2D affine transform from bev coords to map coords
# tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
# inv_tfm = np.linalg.inv(tfm)
#
# for box in nuscenes.get_boxes(sample_data['token']):
#
# # Get the index of the class
# det_name = category_to_detection_name(box.name)
# if det_name not in DETECTION_NAMES:
# class_id = -1
# else:
# class_id = DETECTION_NAMES.index(det_name)
#
# # Get bounding box coordinates in the grid coordinate frame
# bbox = box.bottom_corners()[:2]
# local_bbox = np.dot(inv_tfm[:2, :2], bbox).T + inv_tfm[:2, 2]
#
# # Render the rotated bounding box to the mask
# render_polygon(masks[class_id], local_bbox, extents, resolution)
#
# return masks.astype(np.bool)
def get_sensor_transform(nuscenes, sample_data):
# Load sensor transform data
sensor = nuscenes.get(
'calibrated_sensor', sample_data['calibrated_sensor_token'])
sensor_tfm = make_transform_matrix(sensor)
# Load ego pose data
pose = nuscenes.get('ego_pose', sample_data['ego_pose_token'])
pose_tfm = make_transform_matrix(pose)
return np.dot(pose_tfm, sensor_tfm)
def load_point_cloud(nuscenes, sample_data):
# Load point cloud
lidar_path = os.path.join(nuscenes.dataroot, sample_data['filename'])
pcl = LidarPointCloud.from_file(lidar_path)
return pcl.points[:3, :].T
def make_transform_matrix(record):
"""
Create a 4x4 transform matrix from a calibrated_sensor or ego_pose record
"""
my_transform = np.eye(4)
my_transform[:3, :3] = Quaternion(record['rotation']).rotation_matrix
my_transform[:3, 3] = np.array(record['translation'])
return my_transform
def render_shapely_polygon(mask, polygon, extents, resolution):
if polygon.geom_type == 'Polygon':
# Render exteriors
# logging.error('POLYGON ' + str(polygon.exterior.coords))
# time.sleep(1)
render_polygon(mask, polygon.exterior.coords, extents, resolution, 1)
# Render interiors
for hole in polygon.interiors:
render_polygon(mask, hole.coords, extents, resolution, 0)
# Handle the case of compound shapes
else:
for poly in polygon:
render_shapely_polygon(mask, poly, extents, resolution)
def render_point(mask, polygon, extents, resolution,value):
# Render exteriors
# logging.error('POLYGON ' + str(polygon.coords))
# logging.error('EXTENTS ' + str(np.array(extents[:2])))
polygon = (polygon - np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
cv2.fillConvexPoly(mask, polygon, value)
render_polygon(mask, polygon.coords, extents, resolution, value)
#def render_centerlines(map_api,resolution_meters=0.5,
# figsize: Union[None, float, Tuple[float, float]] = None,
# bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]:
# """
# Render the centerlines of all lanes and lane connectors.
# :param resolution_meters: How finely to discretize the lane. Smaller values ensure curved
# lanes are properly represented.
# :param figsize: Size of the figure.
# :param bitmap: Optional BitMap object to render below the other map layers.
# """
# # Discretize all lanes and lane connectors.
# pose_lists = map_api.discretize_centerlines(resolution_meters)
#
#
#
# # Render connectivity lines.
# fig = plt.figure(figsize=self._get_figsize(figsize))
# ax = fig.add_axes([0, 0, 1, 1 / self.canvas_aspect_ratio])
#
# if bitmap is not None:
# bitmap.render(self.map_api.canvas_edge, ax)
#
# for pose_list in pose_lists:
# if len(pose_list) > 0:
# plt.plot(pose_list[:, 0], pose_list[:, 1])
#
# return fig, ax
def view_points(points, view, normalize=True):
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a perspective projection the view should be a 3x3 camera matrix, and normalize=True
For an orthographic projection with translation the view is a 3x4 matrix and normalize=False
For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns
all zeros) and normalize=False
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates.
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
norm_const = points[2:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points,norm_const
# def check_visible(polygon, vis_mask):
def get_centerlines(nuscenes, new_ar, sample_data, extents, resolution, vis_mask, already_found=None):
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
my_thresh = 100
my_x = tfm[0,-1]
my_y = tfm[1,-1]
road_ind_ar = np.arange(len(new_ar))
selecteds = np.abs(new_ar[:,:,0] - my_x) + np.abs(new_ar[:,:,1] - my_y) < my_thresh
selected_lines = np.any(selecteds, axis=-1)
logging.error('FOUND ' + str(np.sum(selected_lines)) + ' LINES')
my_road_ar = road_ind_ar[selected_lines]
my_lines = new_ar[selected_lines]
my_sel_points = selecteds[selected_lines]
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint16)
# Find all polygons which intersect with the area of interest
loc_array = np.zeros((len(new_ar),2,2),np.uint8)
for road_id in range(len(my_lines)):
cons_points = my_lines[road_id][my_sel_points[road_id]]
cur_min = False
cur_last = (None,None)
for p in range(len(cons_points)):
cur = cons_points[p][:2]
cur_point = Point(cur)
cont = map_patch.contains(cur_point)
if cont:
# # Transform into map coordinates
polygon = transform_polygon(cur_point, inv_tfm)
if len(polygon.coords) > 0:
polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
if ((polygon[0] >= 0) & (polygon[1] >= 0)):
if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
mask[polygon[1],polygon[0]] = my_road_ar[road_id] + 1
#
if vis_mask[polygon[1],polygon[0]] > 0.5:
if not cur_min:
#
#
loc_array[my_road_ar[road_id],0,0] = np.int32(polygon[1])
loc_array[my_road_ar[road_id],0,1] = np.int32(polygon[0])
cur_min = True
#
cur_last = (np.int32(polygon[1]),np.int32(polygon[0]))
#
if cur_last[0] != None:
#
loc_array[my_road_ar[road_id],1,0] = np.int32(cur_last[0])
loc_array[my_road_ar[road_id],1,1] = np.int32(cur_last[1])
else:
loc_array[my_road_ar[road_id],1,0] = 255
loc_array[my_road_ar[road_id],1,1] = 255
if not cur_min:
loc_array[my_road_ar[road_id],0,0] = 255
loc_array[my_road_ar[road_id],0,1] = 255
return mask, loc_array
#
#def get_centerlines(nuscenes, centers, sample_data, extents, resolution, vis_mask, already_found=None):
#
# # Get the 2D affine transform from bev coords to map coords
# tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
#
# tfm[1,-1] = tfm[1,-1]
#
#
# inv_tfm = np.linalg.inv(tfm)
#
# # Create a patch representing the birds-eye-view region in map coordinates
# map_patch = geometry.box(*extents)
# map_patch = transform_polygon(map_patch, tfm)
#
# # Initialise the map mask
# x1, z1, x2, z2 = extents
#
#
# mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
# dtype=np.uint16)
#
# # Find all polygons which intersect with the area of interest
#
# loc_array = np.zeros((len(centers),2,2),np.uint8)
#
# for road_id in range(len(centers)):
#
# cur_min = False
# cur_last = (None,None)
#
# for p in range(len(centers[road_id])):
# cur = centers[road_id][p][:2]
# cur_point = Point(cur)
# cont = map_patch.contains(cur_point)
#
# if cont:
#
## # Transform into map coordinates
# polygon = transform_polygon(cur_point, inv_tfm)
# if len(polygon.coords) > 0:
# polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
# polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
# if ((polygon[0] >= 0) & (polygon[1] >= 0)):
# if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
# mask[polygon[1],polygon[0]] = road_id + 1
# #
# if vis_mask[polygon[1],polygon[0]] > 0.5:
#
# if not cur_min:
# #
# #
# loc_array[road_id,0,0] = np.uint8(polygon[1])
# loc_array[road_id,0,1] = np.uint8(polygon[0])
# cur_min = True
# #
# cur_last = (polygon[1],polygon[0])
##
# if cur_last[0] != None:
##
# loc_array[road_id,1,0] = np.uint8(cur_last[0])
# loc_array[road_id,1,1] = np.uint8(cur_last[1])
# else:
# loc_array[road_id,1,0] = 255
# loc_array[road_id,1,1] = 255
#
# if not cur_min:
# loc_array[road_id,0,0] = 255
# loc_array[road_id,0,1] = 255
#
# return mask, loc_array
def get_moved_centerlines(nuscenes, centers, sample_data, extents, resolution, vis_mask, beta, already_found):
start_point_base = 5000
end_point_base = 10000
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
tfm[1,-1] = tfm[1,-1] - beta
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint16)
# Find all polygons which intersect with the area of interest
# selected_roads=[]
# found=False
loc_array = np.zeros((len(centers),2,2),np.uint8)
road_ids = list(np.int64(np.unique(already_found)[1:] - 1))
for road_id in road_ids:
# temp_mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
# dtype=np.uint16)
# per_road_check = False
cur_min = False
cur_last = (None,None)
for p in range(len(centers[road_id])):
cur = centers[road_id][p][:2]
cur_point = Point(cur)
cont = map_patch.contains(cur_point)
if cont:
# logging.error('road_id ' + str(road_id))
# logging.error('point ' + str(p))
# found=True
# break
# # Transform into map coordinates
polygon = transform_polygon(cur_point, inv_tfm)
if len(polygon.coords) > 0:
polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
if ((polygon[0] >= 0) & (polygon[1] >= 0)):
if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
mask[polygon[1],polygon[0]] = road_id + 1
#
if vis_mask[polygon[1],polygon[0]] > 0.5:
if not cur_min:
#
# if mask[polygon[1],polygon[0]] > start_point_base:
#
# if mask[polygon[1],polygon[0]] > end_point_base:
#
# rel_id = mask[polygon[1],polygon[0]] - end_point_base - 1
# logging.error('START OF ROAD '+ str(road_id) + ' and END OF '+ str(rel_id))
#
# else:
# rel_id = mask[polygon[1],polygon[0]] - start_point_base - 1
#
# logging.error('START OF ROAD '+ str(road_id) + ' and START OF '+ str(rel_id))
#
loc_array[road_id,0,0] = np.uint8(polygon[1])
loc_array[road_id,0,1] = np.uint8(polygon[0])
cur_min = True
#
cur_last = (polygon[1],polygon[0])
#
# # Render the polygon to the mask
# logging.error('POLYGON ' + str(polygon.coords[1]))
# logging.error('EXTENTS ' + str(np.array(extents[:2])))
# polygon = (polygon - np.array(extents[:2])) / resolution
# polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
# cv2.fillConvexPoly(mask, polygon, road_id)
# render_point(mask, polygon, extents, resolution,road_id)
# if found:
# break
if cur_last[0] != None:
# if mask[cur_last[0],cur_last[1]] > 25000:
# logging.error('ENDPOITNS COLLIDED IN ROAD '+ str(road_id) + ' and '+ str(np.float32(mask[cur_last[0],cur_last[1]])//10))
# mask[cur_last[0],cur_last[1]] = (road_id + 1)*10 + 1
loc_array[road_id,1,0] = np.uint8(cur_last[0])
loc_array[road_id,1,1] = np.uint8(cur_last[1])
else:
loc_array[road_id,1,0] = 255
loc_array[road_id,1,1] = 255
if not cur_min:
loc_array[road_id,0,0] = 255
loc_array[road_id,0,1] = 255
return mask, loc_array
def zoom_augment_grids(image_shape, intrinsics, cs, beta):
image = np.zeros(image_shape)
col_ar2 = np.arange(image.shape[1])
row_ar2 = np.arange(image.shape[0])
mesh2_col, mesh2_row = np.meshgrid(col_ar2, row_ar2)
write_col = np.copy(mesh2_col)
write_row = np.copy(mesh2_row)
col_ar1 = np.arange(image.shape[1])
row_ar1 = np.arange(image.shape[0])
mesh1_col, mesh1_row = np.meshgrid(col_ar1, row_ar1)
x_center = intrinsics[0,-1]
y_center = intrinsics[1,-1]
f = intrinsics[0,0]
Y = -cs[-1]
for m in range(mesh1_row.shape[0]):
for n in range(mesh1_row.shape[1]):
write_col[m,n] = int((mesh2_col[m,n] - x_center)*f*Y/(f*Y - beta*mesh2_row[m,n] + beta*y_center) + x_center)
write_row[m,n] = int(f*Y*(mesh2_row[m,n] - y_center)/(f*Y - beta*mesh2_row[m,n] + beta*y_center) + y_center)
total_mask = np.ones_like(write_col)
total_mask[write_col < 0] = 0
total_mask[write_col > (image.shape[1]-1)] = 0
total_mask[write_row < 0] = 0
total_mask[write_row > (image.shape[0]-1)] = 0
write_col[write_col < 0] = 0
write_col[write_col > (image.shape[1]-1)] = 0
write_row[write_row < 0] = 0
write_row[write_row > (image.shape[0]-1)] = 0
return write_row, write_col, total_mask |
the-stack_0_4414 | import ast
import contextlib
import json
import os
import re
import sys
import threading
from datetime import timedelta
import pytest
import retrying
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from dcoscli.test.common import (assert_command, assert_lines, exec_command,
popen_tty, update_config)
from dcoscli.test.marathon import (app, list_apps, list_deployments, show_app,
start_app, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('tests/data/marathon/help.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def test_empty_list():
list_apps()
def test_add_app_through_http():
with _zero_instance_app_through_http():
list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Error: can\'t read from resource: bad_resource. Please check that it exists\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_remove_app():
with _zero_instance_app():
pass
list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error: error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Error: Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Error: application 'zero-instance-app' only has "
"[1-9][0-9]* version\\(s\\)\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: app '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error: invalid timestamp provided')
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Error: relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_start_already_started_app():
with _zero_instance_app():
start_app('zero-instance-app')
stderr = (b"Error: application 'zero-instance-app' already "
b"started: 1 instances\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stderr = (b"Error: app '/zero-instance-app' already "
b"stopped: 0 instances\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{"message":"Invalid JSON","details":[{"path":"/cpus","errors":["error.expected.jsnumber"]}]}""" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
stderr_end = b"Invalid JSON (path: '/container/docker/network' errors: error.unknown.enum.literal)" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_json():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
"env='{\"key\":\"/value\"}'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stderr = (b"Error: unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_killing_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stderr == b''
out = stdout.decode()
assert out.startswith('Killed tasks: ')
out = out.strip('Killed tasks: ')
dictout = ast.literal_eval(out)
assert len(dictout) == 3
def test_killing_scaling_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: app '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: Marathon API error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Error: maximum count must be a positive number\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_kill_one_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id)
def test_kill_two_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_ids = [task['id'] for task in task_list]
_kill_task(task_ids)
def test_kill_and_scale_task():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, scale=True)
task_list = _list_tasks(1, 'zero-instance-app')
def test_kill_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = ['unknown-task-id']
_kill_task(task_id, expect_success=False)
def test_kill_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, wipe=True)
def test_stop_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = (b'Error: changes blocked: deployment '
b'already in progress for app\n')
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
def test_ping():
assert_command(['dcos', 'marathon', 'ping'],
stdout=b'Marathon ping response[1x]: "pong"\n')
def test_leader_show():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'leader', 'show', '--json'])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert stderr == b''
assert result['host'] == "marathon.mesos."
assert 'ip' in result
def ignore_exception(exc):
return isinstance(exc, Exception)
@pytest.fixture
def marathon_up():
yield
check_marathon_up()
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception, wait_fixed=1000)
def check_marathon_up():
# testing to see if marathon is up and can talk through the gateway
# ignore the exception until we have a successful reponse.
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode == 0
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception)
def wait_marathon_down():
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode != 0
def test_leader_delete(marathon_up):
assert_command(['dcos', 'marathon', 'leader', 'delete'],
stdout=b'Leadership abdicated\n')
# There might be a slight delay until marathon shows itself as down,
# so marathon_up() might succeed directly and the next tests would
# run with an unhealthy marathon. Explicitly wait for marathon to
# go down before waiting for it to become healthy again.
wait_marathon_down()
check_marathon_up()
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
def _kill_task(task_ids, scale=None, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'kill', '--json'] + task_ids
if scale:
cmd.append('--scale')
if wipe:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
if scale:
assert 'deploymentId' in result
else:
assert sorted(
[task['id'] for task in result['tasks']]) == sorted(task_ids)
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
|
the-stack_0_4415 | import unittest, traceback
from subprocess import TimeoutExpired
class TestcaseError(BaseException):
pass
class TestResult(unittest.TextTestResult):
def __init__(self, stream=None, descriptions=None, verbosity=0):
super(TestResult, self).__init__(stream, descriptions, verbosity)
self.success_count = 0
self.failures_count = 0
self.errors_count = 0
self.result = []
def addError(self, test, error):
self.errors_count += 1
self.saveTestCaseResult(test, "errored", error)
return super(TestResult, self).addError(test, error)
def addFailure(self, test, error):
self.failures_count += 1
self.saveTestCaseResult(test, "failed", error)
return super(TestResult, self).addFailure(test, error)
def addSuccess(self, test):
self.success_count += 1
self.saveTestCaseResult(test, "passed")
return super(TestResult, self).addSuccess(test)
def saveTestCaseResult(self, test, status, error=None):
result = {
"uid": test.testcase["uid"],
"stdin": repr(test.testcase["stdin"]),
"stdout": repr(test.stdout),
"stderr": test.response.stderr,
"generated_stdout": repr(test.response.stdout),
"status": status,
}
if error:
error = "".join(traceback.format_exception_only(error[0], error[1])).strip()
result["error"] = error
self.result.append(result)
class TestCase(unittest.TestCase):
def __init__(self, module, testcase, timeout=None):
unittest.TestCase.__init__(self)
self.module = module
self.testcase = testcase
self.timeout = timeout
def runTest(self):
self.stdout = str(self.testcase["expected_stdout"]).strip()
self.response = self.module.runTest(
stdin=self.testcase["stdin"], timeout=self.timeout
)
if self.response.returncode:
raise TestcaseError(self.response.stderr)
self.generated_stdout = self.response.stdout.strip()
if self.generated_stdout != self.stdout:
raise AssertionError(
"{} != {}".format(repr(self.generated_stdout), repr(self.stdout))
)
class Judger:
def __init__(self):
self.testresult = TestResult()
self.testsuite = unittest.TestSuite()
def _create_testsuite(self, module, testcases, timeout):
for testcase in testcases:
obj = TestCase(module=module, testcase=testcase, timeout=timeout)
self.testsuite.addTest(obj)
def judge(self, module, sourcefile, testcases, timeout=10):
self.result = {"tests": [], "compiler": {}, "summary": {}}
if hasattr(module, "compile"):
compiler = module.compile(sourcefile, timeout=timeout)
self.result["compiler"] = {
"returncode": compiler.returncode,
"error": compiler.stderr,
}
if compiler.returncode:
self.result["summary"]["status"] = "Compiler Error"
return
self._create_testsuite(module=module, testcases=testcases, timeout=timeout)
self.testsuite.run(self.testresult)
status = (
"Failed"
if (self.testresult.failures_count or self.testresult.errors_count)
else "Passed"
)
self.result.update(
{
"tests": self.testresult.result,
"summary": {
"success": self.testresult.success_count,
"failures": self.testresult.failures_count,
"errors": self.testresult.errors_count,
"status": status,
},
}
)
|
the-stack_0_4416 | from __future__ import print_function, division
import scipy
import torch.nn as nn
import torch.nn.functional as F
import torch
import functools
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import InMemoryDataLoader
import numpy as np
import pandas as pd
import os
import random
import argparse
import os
import time
import torch
import torchvision
import tqdm
import warnings
import argparse
from sklearn.metrics import accuracy_score
from models_gan_pytorch_2_bottlenec5x5 import *
from utils import *
# reproducibility
torch.manual_seed(777)
np.random.seed(777)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class C_CC_GAN():
def __init__(self, root_data_path, train_size=-1,
img_rows = 112,img_cols = 112,channels = 3,
AU_num=35,
lambda_cl=1,lambda_cyc=1,
loss_type='loss_nonsaturating',
adam_lr=0.0002,adam_beta_1=0.5,adam_beta_2=0.999):
# paths
self.root_data_path = root_data_path
# Input shape
self.img_rows = img_rows
self.img_cols = img_cols
self.channels = channels
self.img_shape = (self.channels,self.img_rows, self.img_cols)
self.AU_num = AU_num
# Loss weights
self.lambda_cl = lambda_cl
self.lambda_cyc = lambda_cyc
# loss type
self.loss_type = loss_type
# optmizer params
self.adam_lr = adam_lr
self.adam_beta_1 = adam_beta_1
self.adam_beta_2 = adam_beta_2
# Configure data loader
self.data_loader = InMemoryDataLoader(dataset_name='EmotioNet',
img_res=(self.img_rows, self.img_cols,self.channels),
root_data_path=self.root_data_path,
normalize=True,
csv_columns = ['frame', "AU01_c" , "AU02_c" , "AU04_c",
"AU05_c", "AU06_c", "AU07_c", "AU09_c",
"AU10_c", "AU12_c", "AU14_c", "AU15_c",
"AU17_c" , "AU20_c" , "AU23_c", "AU25_c",
"AU26_c" , "AU45_c"],
max_images=train_size)
#optimizer = Adam(self.adam_lr, self.adam_beta_1, self.adam_beta_2)
# Build and compile the discriminators
self.d = Discriminator(img_shape=self.img_shape,df=64,AU_num=self.AU_num).to(device)
#self.d.init_weights()
print("******** Discriminator/Classifier ********")
print(self.d)
# Build the generators
self.g = Generator(img_shape=(3,112,112),gf=64,AU_num=self.AU_num).to(device)
#xself.g.init_weights()
print("******** Generator ********")
print(self.g)
##
self.g_optimizer = torch.optim.Adam(self.g.parameters(), self.adam_lr, betas=(self.adam_beta_1, self.adam_beta_2))
self.d_optimizer = torch.optim.Adam(self.d.parameters(), self.adam_lr, betas=(self.adam_beta_1, self.adam_beta_2))
def train(self, epochs, batch_size=1, sample_interval=50 , d_g_ratio=5):
start_time = datetime.datetime.now()
# logs
epoch_history, batch_i_history, = [] , []
d_gan_loss_history, d_au_loss_history = [], [],
g_gan_loss_history, g_au_loss_history = [] , []
reconstr_history = []
##
self.g.train()
self.d.train()
for epoch in range(epochs):
for batch_i, (labels0 , imgs) in enumerate(self.data_loader.load_batch(batch_size=batch_size)):
imgs = np.transpose(imgs,(0,3,1,2))
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels0, imgs = torch.tensor(labels0).to(device).type(dtype), torch.tensor(imgs).to(device).type(dtype)
if self.loss_type == 'loss_nonsaturating':
d_loss , d_loss_dict , g_loss, g_loss_dict = loss_nonsaturating(self.g, self.d,
imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,
train_generator=(batch_i % d_g_ratio == 0))
## opt. discr.
self.d_optimizer.zero_grad()
d_loss.backward(retain_graph=True)
self.d_optimizer.step()
## opt. gen.
if g_loss is not None:
self.g_optimizer.zero_grad()
g_loss.backward()
self.g_optimizer.step()
elif self.loss_type == 'loss_wasserstein_gp':
# train critic
d_loss_dict = train_D_wasserstein_gp(self.g, self.d, imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,self.d_optimizer)
# train generator
if batch_i % d_g_ratio == 0:
g_loss_dict = train_G_wasserstein_gp(self.g, self.d, imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,self.g_optimizer)
else:
raise Exception("Unknown loss type::"+str(self.loss_type))
torch.cuda.empty_cache()
elapsed_time = datetime.datetime.now() - start_time
try:
if batch_i % d_g_ratio == 0:
print ("[Epoch %d/%d] [Batch %d/%d] [D_gan loss: %f, D_AU_loss: %f] [G_gan loss: %05f, G_AU_loss: %05f, recon: %05f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss_dict['d_adv_loss'], d_loss_dict['d_cl_loss'],
g_loss_dict['g_adv_loss'],g_loss_dict['g_cl_loss'], g_loss_dict['rec_loss'],
elapsed_time))
else:
print ("[Epoch %d/%d] [Batch %d/%d] [D_gan loss: %f, D_AU_loss: %f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss_dict['d_adv_loss'], d_loss_dict['d_cl_loss'],
elapsed_time))
except:
print("*** problem to log ***")
# log
if batch_i % d_g_ratio == 0:
epoch_history.append(epoch)
batch_i_history.append(batch_i)
d_gan_loss_history.append(d_loss_dict['d_adv_loss'].cpu().detach().numpy())
d_au_loss_history.append(d_loss_dict['d_cl_loss'].cpu().detach().numpy())
g_gan_loss_history.append(g_loss_dict['g_adv_loss'].cpu().detach().numpy())
g_au_loss_history.append(g_loss_dict['g_cl_loss'].cpu().detach().numpy())
reconstr_history.append(g_loss_dict['rec_loss'].cpu().detach().numpy())
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
with torch.no_grad():
self.g.eval()
self.sample_images(epoch, batch_i)
#self.sample_images(epoch, batch_i,use_leo=True)
self.g.train()
train_history = pd.DataFrame({
'epoch': epoch_history,
'batch': batch_i_history,
'd_gan_loss': d_gan_loss_history,
'd_AU_loss': d_au_loss_history,
'g_gan_loss': g_gan_loss_history,
'g_AU_loss': g_au_loss_history,
'reconstr_loss': reconstr_history
})
train_history.to_csv(str(sys.argv[0]).split('.')[0]+'_train_log.csv',index=False)
def sample_images(self, epoch, batch_i):
for labels0 , imgs in self.data_loader.load_batch(batch_size=1):
## disc
imgs_d = np.transpose(imgs,(0,3,1,2))
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels0_d, imgs_d = torch.tensor(labels0).to(device).type(dtype), torch.tensor(imgs_d).to(device).type(dtype)
#gan_pred_prob,au_prob = self.d(imgs_d)
#des_au_1 = torch.tensor(self.data_loader.gen_rand_cond(batch_size=1)).to(device).type(dtype)
des_au_1 = torch.tensor(self.data_loader.gen_rand_cond_for_binary_au(labels0)).to(device).type(dtype)[0]
# Translate images
zs = self.g.encode(imgs_d)
# Reconstruct image
reconstr_ = self.g.translate_decode(zs,labels0_d)
# Transl. image
transl_ = self.g.translate_decode(zs,des_au_1)
## save reconstraction
if not os.path.exists('log_images'):
os.makedirs('log_images')
#plot reconstr_
reconstr_ = reconstr_.cpu()
reconstr_ = np.transpose(reconstr_.detach().numpy(),(0,2,3,1))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(np.concatenate([imgs, reconstr_]),
row_titles=None,
col_titles=["Orig.[ep:%d]" % (epoch),'Reconstr.'],
nrow = 1,ncol = 2,
save_filename="log_images/reconstr_%d_%d.png" % (epoch, batch_i))
#plot transl_
transl_ = transl_.cpu()
transl_ = np.transpose(transl_.detach().numpy(),(0,2,3,1))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(np.concatenate([imgs, transl_]),
row_titles=None,
col_titles=["Orig.[ep:%d]" % (epoch),'Transl.'],
nrow = 1,ncol = 2,
save_filename="log_images/translat_%d_%d.png" % (epoch, batch_i))
####
n_row = 4 # alpha
n_col = 9 # AUs
col_names = ['AU1_r','AU2_r','AU4_r','AU5_r','AU10_r',
'AU12_r','AU15_r','AU25_r','AU45_r']
col_idx = [0,1,2,3,7,8,10,14,16]
assert len(col_names) == len(col_idx)
alphas = [0,.33,.66,1]
au_grid = np.repeat(labels0,n_row*n_col,axis=0)
img_tens = np.repeat(imgs,n_row*n_col,axis=0)
n = 0
for r in range(n_row):
for c in range(n_col):
au_n = au_grid[[n],:]
au_n[0,col_idx[c]] = alphas[r]
au_n = torch.tensor(au_n).to(device).type(dtype)
#
act_au = self.g.translate_decode(zs,au_n)
act_au = act_au.cpu()
act_au = np.transpose(act_au.detach().numpy(),(0,2,3,1))
act_au = act_au
img_tens[n,:] = act_au
n += 1
#plot
col_names_plot = ['AU1','AU2','AU4','AU5','AU10','AU12','AU15','AU25','AU45']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(img_tens,
row_titles=alphas,
col_titles=col_names_plot,
nrow = n_row,ncol = n_col,
save_filename="log_images/au_edition_%d_%d.png" % (epoch, batch_i))
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('-lambda_cl', help='loss weight for cond. regress. loss', dest='lambda_cl', type=float, default=100)
parser.add_argument('-lambda_cyc', help='reconstr. loss weight', dest='lambda_cyc', type=float, default=10)
parser.add_argument('-loss_type', help='loss type [loss_nonsaturating] ', dest='loss_type', type=str, default='loss_wasserstein_gp')
parser.add_argument('-d_g_ratio', help='# train iterations of critic per each train iteration of generator', dest='d_g_ratio', type=int, default=1)
parser.add_argument('-adam_lr', help='Adam l.r.', dest='adam_lr', type=float, default=0.0002)
parser.add_argument('-adam_beta_1', help='Adam beta-1', dest='adam_beta_1', type=float, default=0.5)
parser.add_argument('-adam_beta_2', help='Adam beta-2', dest='adam_beta_2', type=float, default=0.999)
parser.add_argument('-epochs', help='N. epochs', dest='epochs', type=int, default=170)
parser.add_argument('-batch_size', help='batch size', dest='batch_size', type=int, default=32)
parser.add_argument('-sample_interval', help='sample interval', dest='sample_interval', type=int, default=1000)
parser.add_argument('-root_data_path', help='base file path', dest='root_data_path', type=str, default='datasets')
parser.add_argument('-train_size', help='train size [-1 for all train data]', dest='train_size', type=int, default=-1)
args = parser.parse_args()
# print parameters
print('-' * 30)
print('Parameters .')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
# GAN
root_data_path = args.root_data_path
gan = C_CC_GAN(
root_data_path = root_data_path,
train_size = args.train_size,
AU_num=17,
lambda_cl=args.lambda_cl,lambda_cyc=args.lambda_cyc,
loss_type=args.loss_type,
adam_lr=args.adam_lr,adam_beta_1=args.adam_beta_1,adam_beta_2=args.adam_beta_2)
gan.train(epochs=args.epochs,
batch_size=args.batch_size,
sample_interval=args.sample_interval,
d_g_ratio=args.d_g_ratio)
|
the-stack_0_4418 | # ----------------------------------------------------------------------------
# Copyright (c) 2020, Franck Lejzerowicz.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import re, ast
from setuptools import find_packages, setup
classes = """
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3 :: Only
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = (
""
)
with open("README.md") as f:
long_description = f.read()
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("Xmmvec/__init__.py", "rb") as f:
hit = _version_re.search(f.read().decode("utf-8")).group(1)
version = str(ast.literal_eval(hit))
standalone = ['Xmmvec=Xmmvec.scripts._standalone_xmmvec:standalone_xmmvec']
setup(
name="Xmmvec",
version=version,
license="BSD",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author="Franck Lejzerowicz",
author_email="[email protected]",
maintainer="Franck Lejzerowicz",
maintainer_email="[email protected]",
url="https://github.com/FranckLejzerowicz/Xmmvec",
packages=find_packages(),
install_requires=[
"click >= 6.7",
'pandas >= 0.19.0',
'numpy >= 1.12.1',
'altair >= 4.1.0',
],
classifiers=classifiers,
entry_points={'console_scripts': standalone},
package_data={},
python_requires='>=3.5',
)
|
the-stack_0_4419 | from typing import Dict
from dbnd import parameter
from dbnd._core.settings import EngineConfig
from dbnd_docker.docker_ctrl import DockerRunCtrl
class AwsBatchConfig(EngineConfig):
"""Amazon Web Services Batch"""
_conf__task_family = "aws_batch"
job_definition = parameter(description="the job definition name on AWS Batch").none[
str
]
overrides = parameter(
empty_default=True,
description="the same parameter that boto3 will receive on containerOverrides (templated)"
" http://boto3.readthedocs.io/en/latest/reference/services/batch.html#submit_job",
)[Dict[str, str]]
job_queue = parameter(description="the queue name on AWS Batch")[str]
max_retries = parameter(
description="exponential backoff retries while waiter is not merged, 4200 = 48 hours"
)[int]
def get_docker_ctrl(self, task_run):
return AWSBatchCtrl(task_run=task_run)
class AWSBatchCtrl(DockerRunCtrl):
"""
Execute a job on AWS Batch Service
"""
def __init__(self, **kwargs):
super(AWSBatchCtrl, self).__init__(**kwargs)
self.runner_op = None
@property
def aws_batch_config(self):
# type: (AWSBatchCtrl) -> AwsBatchConfig
return self.task.docker_engine
def docker_run(self):
dc = self.aws_batch_config
if dc.job_definition is None:
raise Exception("Please define aws batch definition first")
from airflow.contrib.operators.awsbatch_operator import AWSBatchOperator
cloud_config = self.task.task_env
self.runner_op = AWSBatchOperator(
task_id=self.task_id,
job_name=self.job.job_id,
# per task settings
job_definition=dc.job_definition,
overrides=dc.overrides,
# more global
job_queue=dc.job_queue,
max_retries=dc.max_retries,
aws_conn_id=cloud_config.conn_id,
region_name=cloud_config.region_name,
)
self.runner_op.execute(context=None)
def on_kill(self):
if self.runner_op is not None:
self.runner_op.on_kill()
|
the-stack_0_4420 | import os
import sys
import numpy as np
import importlib
from dataclasses import dataclass
from loguru import logger
from tqdm import tqdm
import psutil
__all__ = [
'sanitize_filename',
'get_tqdm',
'show_docstring',
'Results',
]
def _is_ipython_notebook(): # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def config_logger(fmt, loglevel): # pragma: no cover
r"""
Configures loguru logger with the given format and log level.
Parameters
----------
fmt : str
loguru-compatible format used to format logger messages.
loglevel : str
Determines what messages to get printed in console. Options are:
"TRACE", "DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"
Returns
-------
None.
"""
logger.remove()
logger.add(lambda msg: tqdm.write(msg, end=""),
level=loglevel,
format=fmt,
colorize=True)
@dataclass
class Settings: # pragma: no cover
r"""
A dataclass for use at the module level to store settings. This class
is defined as a Singleton so now matter how or where it gets
instantiated the same object is returned, containing all existing
settings.
Parameters
----------
notebook : boolean
Is automatically determined upon initialization of PoreSpy, and is
``True`` if running within a Jupyter notebook and ``False``
otherwise. This is used by the ``porespy.tools.get_tqdm`` function
to determine whether a standard or a notebook version of the
progress bar should be used.
tqdm : dict
This dictionary is passed directly to the the ``tqdm`` function
throughout PoreSpy (``for i in tqdm(range(N), **settings.tqdm)``).
To see a list of available options visit the tqdm website.
Probably the most important is ``'disable'`` which when set to
``True`` will silence the progress bars. It's also possible to
adjust the formatting such as ``'colour'`` and ``'ncols'``, which
controls width.
logger_fmt : str
luguru-compatible format used to format the logger messages.
loglevel : str, or int
Determines what messages to get printed in console. Options are:
"TRACE" (5), "DEBUG" (10), "INFO" (20), "SUCCESS" (25), "WARNING" (30),
"ERROR" (40), "CRITICAL" (50)
"""
__instance__ = None
# Might need to add 'file': sys.stdout to tqdm dict
tqdm = {'disable': False,
'colour': None,
'ncols': None,
'leave': False,
'file': sys.stdout}
_logger_fmt = '<green>{time:YYYY-MM-DD HH:mm:ss}</green> | ' \
'<level>{level: <8}</level> | ' \
'<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>' \
'\n--> <level>{message}</level>'
_loglevel = "ERROR" if _is_ipython_notebook() else "WARNING"
config_logger(_logger_fmt, _loglevel)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._notebook = None
self._ncores = psutil.cpu_count()
@property
def logger_fmt(self):
return self._logger_fmt
@property
def loglevel(self):
return self._loglevel
@logger_fmt.setter
def logger_fmt(self, value):
self._logger_fmt = value
config_logger(fmt=value, loglevel=self.loglevel)
@loglevel.setter
def loglevel(self, value):
if isinstance(value, int):
options = {5: "TRACE",
10: "DEBUG",
20: "INFO",
25: "SUCESS",
30: "WARNING",
40: "ERROR",
50: "CRITICAL"}
value = options[value]
self._loglevel = value
os.environ["LOGURU_LEVEL"] = value
config_logger(fmt=self.logger_fmt, loglevel=value)
def __new__(cls):
if Settings.__instance__ is None:
Settings.__instance__ = super().__new__(cls)
return Settings.__instance__
def __repr__(self):
indent = 0
for item in self.__dir__():
if not item.startswith('_'):
indent = max(indent, len(item) + 1)
s = ''
for item in self.__dir__():
if not item.startswith('_'):
s += ''.join((item, ':', ' '*(indent-len(item))))
attr = getattr(self, item)
temp = ''.join((attr.__repr__(), '\n'))
if isinstance(attr, dict):
temp = temp.replace(',', '\n' + ' '*(indent + 1))
s += temp
return s
def _get_ncores(self):
if self._ncores is None:
self._ncores = psutil.cpu_count()
return self._ncores
def _set_ncores(self, val):
if val is None:
val = psutil.cpu_count()
elif val > psutil.cpu_count():
logger.error('Value is more than the available number of cores')
val = psutil.cpu_count()
self._ncores = val
ncores = property(fget=_get_ncores, fset=_set_ncores)
def _get_notebook(self):
if self._notebook is None:
self._notebook = _is_ipython_notebook()
return self._notebook
def _set_notebook(self, val):
logger.error('This value is determined automatically at runtime')
notebook = property(fget=_get_notebook, fset=_set_notebook)
def get_tqdm(): # pragma: no cover
r"""
Fetches a version of ``tqdm`` function that depends on the environment.
Either text-based for the IPython console or gui-based for Jupyter
notebooks.
Returns
-------
tqdm : function handle
The function to use when wrapping an iterator (i.e. tqdm(range(n)))
"""
if Settings().notebook is True:
tqdm = importlib.import_module('tqdm.notebook')
else:
tqdm = importlib.import_module('tqdm')
return tqdm.tqdm
def show_docstring(func): # pragma: no cover
r"""
Fetches the docstring for a function and returns it in markdown format.
Useful for printing in a Jupyter notebook.
Parameters
----------
func : object
Function handle to function whose docstring is desired
Returns
-------
md : str
A string with the markdown syntax included, suitable for printing
in a Jupyter notebook using the ``IPython.display.Markdown``
function.
"""
title = f'---\n ## Documentation for ``{func.__name__}``\n ---\n'
try:
from npdoc_to_md import render_md_from_obj_docstring
txt = render_md_from_obj_docstring(obj=func, obj_namespace=func.__name__)
except ModuleNotFoundError:
txt = func.__doc__
return title + txt + '\n---'
def sanitize_filename(filename, ext, exclude_ext=False):
r"""
Returns a sanitized string in the form of name.extension
Parameters
----------
filename : str
Unsanitized filename, could be 'test.vtk' or just 'test'
ext : str
Extension of the file, could be 'vtk'
exclude_ext : bool
If True, the returned string doesn't have the extension
Returns
-------
sanitized : str
Sanitized filename in form of name.extension
"""
ext.strip(".")
if filename.endswith(f".{ext}"):
name = ".".join(filename.split(".")[:-1])
else:
name = filename
filename_formatted = f"{name}" if exclude_ext else f"{name}.{ext}"
return filename_formatted
class Results:
r"""
A minimal class for use when returning multiple values from a function
This class supports dict-like assignment and retrieval
(``obj['im'] = im``), namedtuple-like attribute look-ups (``obj.im``),
and generic class-like object assignment (``obj.im = im``)
"""
_value = "Description"
_key = "Item"
def __iter__(self):
for item in self.__dict__.values():
yield item
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __str__(self):
header = "―" * 78
lines = [header, "{0:<25s} {1}".format(self._key, self._value), header]
for item in list(self.__dict__.keys()):
if item.startswith('_'):
continue
if (isinstance(self[item], np.ndarray)):
s = np.shape(self[item])
if (self[item].ndim > 1):
lines.append("{0:<25s} Image of size {1}".format(item, s))
else:
lines.append("{0:<25s} Array of size {1}".format(item, s))
else:
lines.append("{0:<25s} {1}".format(item, self[item]))
lines.append(header)
return "\n".join(lines)
|
the-stack_0_4421 | import ezdxf
import random # needed for random placing points
def get_random_point():
"""Creates random x, y coordinates."""
x = random.randint(-100, 100)
y = random.randint(-100, 100)
return x, y
# Create a new drawing in the DXF format of AutoCAD 2010
dwg = ezdxf.new('ac1024')
# Create a block with the name 'FLAG'
flag = dwg.blocks.new(name='FLAG')
# Add DXF entities to the block 'FLAG'.
# The default base point (= insertion point) of the block is (0, 0).
flag.add_polyline2d([(0, 0), (0, 5), (4, 3), (0, 3)]) # the flag as 2D polyline
flag.add_circle((0, 0), .4, dxfattribs={'color': 2}) # mark the base point with a circle
flag.add_linear_dim((1, 3), (0, 3), (4, 3), dxfattribs={'color': 2})
# Get the modelspace of the drawing.
modelspace = dwg.modelspace()
# Get 50 random placing points.
placing_points = [get_random_point() for _ in range(50)]
for point in placing_points:
# Every flag has a different scaling and a rotation of -15 deg.
random_scale = 0.5 + random.random() * 2.0
# Add a block reference to the block named 'FLAG' at the coordinates 'point'.
modelspace.add_blockref('FLAG', point, dxfattribs={
'xscale': random_scale,
'yscale': random_scale,
'rotation': -15
})
# Save the drawing.
dwg.saveas("/home/ebi/blockref_tutorial.dxf")
|
the-stack_0_4424 | from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.distributions import Categorical, Normal
class BasePolicy(nn.Module):
"""
Basic implementation of a general Policy
:param state_dim: State dimensions of the environment
:param action_dim: Action dimensions of the environment
:param hidden: Sizes of hidden layers
:param discrete: True if action space is discrete, else False
:type state_dim: int
:type action_dim: int
:type hidden: tuple or list
:type discrete: bool
"""
def __init__(
self, state_dim: int, action_dim: int, hidden: Tuple, discrete: bool, **kwargs
):
super(BasePolicy, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.hidden = hidden
self.discrete = discrete
self.action_lim = kwargs["action_lim"] if "action_lim" in kwargs else 1.0
self.action_var = kwargs["action_var"] if "action_var" in kwargs else 0.1
self.sac = kwargs["sac"] if "sac" in kwargs else False
if self.sac:
self.fc_mean = nn.Linear(self.hidden[-1], self.action_dim)
self.fc_std = nn.Linear(self.hidden[-1], self.action_dim)
self.model = None
def forward(
self, state: torch.Tensor
) -> (Tuple[torch.Tensor, Optional[torch.Tensor]]):
"""
Defines the computation performed at every call.
:param state: The state being passed as input to the policy
:type state: Tensor
"""
state = self.model.forward(state)
if self.sac:
state = nn.ReLU()(state)
mean = self.fc_mean(state)
log_std = self.fc_std(state)
log_std = torch.clamp(log_std, min=-20.0, max=2.0)
return mean, log_std
return state
def get_action(
self, state: torch.Tensor, deterministic: bool = False
) -> torch.Tensor:
"""
Get action from policy based on input
:param state: The state being passed as input to the policy
:param deterministic: (True if the action space is deterministic,
else False)
:type state: Tensor
:type deterministic: boolean
:returns: action
"""
action_probs = self.forward(state)
if self.discrete:
action_probs = nn.Softmax(dim=-1)(action_probs)
if deterministic:
action = (torch.argmax(action_probs, dim=-1), None)
else:
distribution = Categorical(probs=action_probs)
action = (distribution.sample(), distribution)
else:
action_probs = nn.Tanh()(action_probs) * self.action_lim
if deterministic:
action = (action_probs, None)
else:
distribution = Normal(action_probs, self.action_var)
action = (distribution.sample(), distribution)
return action
class BaseValue(nn.Module):
"""
Basic implementation of a general Value function
"""
def __init__(self, state_dim: int, action_dim: int):
super(BaseValue, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.model = None
def forward(self, state: torch.Tensor) -> torch.Tensor:
"""
Defines the computation performed at every call.
:param state: Input to value function
:type state: Tensor
"""
return self.model.forward(state)
def get_value(self, state: torch.Tensor) -> torch.Tensor:
"""
Get value from value function based on input
:param state: Input to value function
:type state: Tensor
:returns: Value
"""
return self.forward(state).squeeze(-1)
class BaseActorCritic(nn.Module):
"""
Basic implementation of a general Actor Critic
"""
def __init__(self):
super(BaseActorCritic, self).__init__()
self.actor = None
self.critic = None
def get_action(
self, state: torch.Tensor, deterministic: bool = False
) -> torch.Tensor:
"""
Get action from the Actor based on input
:param state: The state being passed as input to the Actor
:param deterministic: (True if the action space is deterministic,
else False)
:type state: Tensor
:type deterministic: boolean
:returns: action
"""
state = torch.as_tensor(state).float()
return self.actor.get_action(state, deterministic=deterministic)
def get_value(self, state: torch.Tensor) -> torch.Tensor:
"""
Get value from the Critic based on input
:param state: Input to the Critic
:type state: Tensor
:returns: value
"""
state = torch.as_tensor(state).float()
return self.critic.get_value(state)
|
the-stack_0_4426 | from diff_prof.diffusion_profiles import DiffusionProfiles
from msi.msi import MSI
import os
import numpy as np
def test_diffusion_profiles(ref_dp_file_path, calculated_dp_file_path):
# Saved
# this is loading the reference directory
dp_saved = DiffusionProfiles(alpha=None, max_iter=None, tol=None,
weights=None, num_cores=None, save_load_file_path=ref_dp_file_path)
msi_saved = MSI()
msi_saved.load()
msi_saved.load_saved_node_idx_mapping_and_nodelist(
dp_saved.save_load_file_path)
dp_saved.load_diffusion_profiles(
msi_saved.drugs_in_graph + msi_saved.indications_in_graph)
# Calculated
dp_calculated = DiffusionProfiles(alpha=None, max_iter=None, tol=None,
weights=None, num_cores=None, save_load_file_path=calculated_dp_file_path)
msi_calculated = MSI()
msi_calculated.load()
msi_calculated.load_saved_node_idx_mapping_and_nodelist(
dp_calculated.save_load_file_path)
dp_calculated.load_diffusion_profiles(
msi_calculated.drugs_in_graph + msi_calculated.indications_in_graph)
# Compare
# Make sure have diffusion profiles for the same drugs and indications
assert(set(dp_saved.drug_or_indication2diffusion_profile.keys()) ==
set(dp_calculated.drug_or_indication2diffusion_profile.keys()))
# Reorder calculated diffusion profile for consistency with saved diffusion profile
calculated_reorder_idxs = [msi_calculated.node2idx[node]
for node in msi_saved.nodelist]
for drug_or_indication, saved_diffusion_profile in dp_saved.drug_or_indication2diffusion_profile.items():
calculated_diffusion_profile = dp_calculated.drug_or_indication2diffusion_profile[
drug_or_indication]
# Reorder calculated diffusion_profile according to saved
calculated_diffusion_profile = calculated_diffusion_profile[calculated_reorder_idxs]
# Ensure close enough
assert(np.allclose(saved_diffusion_profile, calculated_diffusion_profile))
|
the-stack_0_4427 | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import codecs
import numpy as np
import hashlib
import random
import preprocess
class Preparation(object):
'''Convert dataset of different text matching tasks into a unified format as the input of deep matching modules. Users provide datasets contain pairs of texts along with their labels, and the module produces the following files:
* Word Dictionary: this file records the mapping from each word to a unique identifier.
* Corpus File: this file records the mapping from each text to a unique identifiers, along with a sequence of word identifiers contained in text.
* Relation File: this file records the relationship between two texts, each line containing the label and a pair of ids.
'''
def __init__(self):
pass
def get_text_id(self, hashid, text, idtag='T'):
hash_obj = hashlib.sha1(text.encode('utf8')) # if the text are the same, then the hash_code are also the same
hex_dig = hash_obj.hexdigest()
if hex_dig in hashid:
return hashid[hex_dig]
else:
tid = idtag + str(len(hashid)) # start from 0, 1, 2, ...
hashid[hex_dig] = tid
return tid
def parse_line(self, line, delimiter='\t'):
subs = line.split(delimiter)
# print('subs: ', len(subs))
if 3 != len(subs):
raise ValueError('format of data file wrong, should be \'label,text1,text2\'.')
else:
return subs[0], subs[1], subs[2]
def parse_line_for_quora(self, line, delimiter='","'):
subs = line.split(delimiter)
#print('subs: ', len(subs))
# if subs[1]=="qid1":
# return
if 6 != len(subs):
# print( "line__not satisfied",line)
# raise ValueError('format of data file wrong, should be \'label,text1,text2\'.')
return 0, 0, 0, 0, 0
else:
return subs[1], subs[2], subs[3], subs[4], subs[5][0]
def run_with_one_corpus_for_quora(self, file_path):
# hashid = {}
corpus = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
next(f)
for line in f:
# print("", i)
# print("", i)
# line = line.decode('utf8')
line = line.strip()
qid1, qid2, q1, q2, label = self.parse_line_for_quora(line, "\t")
if q1 != 0:
corpus[qid1] = q1
corpus[qid2] = q2
rels.append((label, qid1, qid2))
f.close()
return corpus, rels
def run_with_one_corpus(self, file_path):
hashid = {}
corpus = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id1 = self.get_text_id(hashid, t1, 'T')
id2 = self.get_text_id(hashid, t2, 'T')
corpus[id1] = t1
corpus[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus, rels
def run_with_two_corpus(self, file_path):
hashid_q = {}
hashid_d = {}
corpus_q = {}
corpus_d = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id1 = self.get_text_id(hashid_q, t1, 'Q')
id2 = self.get_text_id(hashid_d, t2, 'D')
corpus_q[id1] = t1
corpus_d[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus_q, corpus_d, rels
def run_with_train_valid_test_corpus(self, train_file, valid_file, test_file):
'''
Run with pre-splited train_file, valid_file, test_file
The input format should be label \t text1 \t text2
The query ids can't be duplicated. For the same query
id, the document ids can't be duplicated.
Note that if we make queries with unique id (fixed 10 candidates for a single query), then it is
possible that multiple queries have different query ids, but with the same text (in rare cases)
:param train_file: train file
:param valid_file: valid file
:param test_file: test file
:return: corpus, rels_train, rels_valid, rels_test
'''
hashid = {}
corpus = {}
rels = []
rels_train = []
rels_valid = []
rels_test = []
# merge corpus files, but return rels for train/valid/test seperately
curQ = 'init'
curQid = 0
for file_path in list([train_file, valid_file, test_file]):
if file_path == train_file:
rels = rels_train
elif file_path == valid_file:
rels = rels_valid
if file_path == test_file:
rels = rels_test
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id2 = self.get_text_id(hashid, t2, 'D')
# generate unique query ids
if t1 == curQ:
# same query
id1 = 'Q' + str(curQid)
else:
# new query
curQid += 1
id1 = 'Q' + str(curQid)
curQ = t1
corpus[id1] = t1
corpus[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus, rels_train, rels_valid, rels_test
@staticmethod
def save_corpus(file_path, corpus):
f = codecs.open(file_path, 'w', encoding='utf8')
for qid, text in corpus.items():
f.write('%s %s\n' % (qid, text))
f.close()
@staticmethod
def merge_corpus(train_corpus, valid_corpus, test_corpus):
# cat train valid test > corpus.txt
# cat corpus_train.txt corpus_valid.txt corpus_test.txt > corpus.txt
os.system('cat ' + train_corpus + ' ' + valid_corpus + ' ' + test_corpus + ' > corpus.txt')
@staticmethod
def save_relation(file_path, relations):
f = open(file_path, 'w')
for rel in relations:
f.write('%s %s %s\n' % (rel))
f.close()
@staticmethod
def check_filter_query_with_dup_doc(input_file):
""" Filter queries with duplicated doc ids in the relation files
:param input_file: input file, which could be the relation file for train/valid/test data
The format is "label qid did"
:return:
"""
with open(input_file) as f_in, open(input_file + '.fd', 'w') as f_out:
cur_qid = 'init'
cache_did_set = set()
cache_q_lines = []
found_dup_doc = False
for l in f_in:
tokens = l.split()
if tokens[1] == cur_qid:
# same qid
cache_q_lines.append(l)
if tokens[2] in cache_did_set:
found_dup_doc = True
else:
cache_did_set.add(tokens[2])
else:
# new qid
if not found_dup_doc:
f_out.write(''.join(cache_q_lines))
else:
print('found qid with duplicated doc id/text: ', ''.join(cache_q_lines))
print('filtered... continue')
cache_q_lines = []
cache_q_lines.append(l)
found_dup_doc = False
cache_did_set.clear()
cur_qid = tokens[1]
cache_did_set.add(tokens[2])
# the last query
# print len(cache_q_lines), len(cache_did_set)
if len(cache_q_lines) != 0 and len(cache_q_lines) == len(cache_did_set):
f_out.write(''.join(cache_q_lines))
print('write the last query... done: ', ''.join(cache_q_lines))
@staticmethod
def split_train_valid_test(relations, ratio=(0.8, 0.1, 0.1)):
random.shuffle(relations)
total_rel = len(relations)
num_train = int(total_rel * ratio[0])
num_valid = int(total_rel * ratio[1])
valid_end = num_train + num_valid
rel_train = relations[: num_train]
rel_valid = relations[num_train: valid_end]
rel_test = relations[valid_end:]
return rel_train, rel_valid, rel_test
@staticmethod
def split_train_valid_test_for_ranking(relations, ratio=(0.8, 0.1, 0.1)):
qid_group = set()
for r, q, d in relations:
qid_group.add(q)
qid_group = list(qid_group)
random.shuffle(qid_group)
total_rel = len(qid_group)
num_train = int(total_rel * ratio[0])
num_valid = int(total_rel * ratio[1])
valid_end = num_train + num_valid
qid_train = qid_group[: num_train]
qid_valid = qid_group[num_train: valid_end]
qid_test = qid_group[valid_end:]
def select_rel_by_qids(qids):
rels = []
qids = set(qids)
for r, q, d in relations:
if q in qids:
rels.append((r, q, d))
return rels
rel_train = select_rel_by_qids(qid_train)
rel_valid = select_rel_by_qids(qid_valid)
rel_test = select_rel_by_qids(qid_test)
return rel_train, rel_valid, rel_test
if __name__ == '__main__':
prepare = Preparation()
basedir = '/home/wtt/Code/MatchZoo/data/InsuranceQA/'#'../../data/example/ranking/'
corpus, rels = prepare.run_with_one_corpus(basedir + 'sample.txt')
print('total corpus : %d ...' % (len(corpus)))
print('total relations : %d ...' % (len(rels)))
prepare.save_corpus(basedir + 'corpus.txt', corpus)
# rel_train, rel_valid, rel_test = prepare.split_train_valid_test(rels, (0.8, 0.1, 0.1))
corpus, rel_train = prepare.run_with_one_corpus(basedir + 'trainsample.txt')
corpus, rel_valid = prepare.run_with_one_corpus(basedir + 'validsample.txt')
corpus, rel_test = prepare.run_with_one_corpus(basedir + 'testsample.txt')
prepare.save_relation(basedir + 'relation_train.txt', rel_train)
prepare.save_relation(basedir + 'relation_valid.txt', rel_valid)
prepare.save_relation(basedir + 'relation_test.txt', rel_test)
print('Done ...')
|
the-stack_0_4429 | import copy
import logging
import random
from typing import Any, Callable, Dict, List, Optional, Tuple
from generator import (
DefinitionDataset,
InteractiveGoal,
ObjectDefinition,
RetrievalGoal,
SceneException,
base_objects,
containers,
geometry,
materials,
specific_objects,
structures,
tags,
util,
)
from generator.separating_axis_theorem import sat_entry
from .hypercubes import (
Hypercube,
HypercubeFactory,
update_floor_and_walls,
update_scene_objects,
)
from .interactive_plans import (
InteractivePlan,
ObjectLocationPlan,
ObjectPlan,
create_container_hypercube_plan_list,
create_eval_4_container_hypercube_plan_list,
create_obstacle_hypercube_plan_list,
create_occluder_hypercube_plan_list,
)
from .object_data import (
ObjectData,
ReceptacleData,
TargetData,
identify_larger_definition,
)
ROOM_DIMENSIONS = geometry.DEFAULT_ROOM_DIMENSIONS
# Add or subtract the performer width to ensure it can move behind any object.
ROOM_X_MIN = -(ROOM_DIMENSIONS['x'] / 2.0) + util.PERFORMER_WIDTH
ROOM_Z_MIN = -(ROOM_DIMENSIONS['x'] / 2.0) + util.PERFORMER_WIDTH
ROOM_X_MAX = (ROOM_DIMENSIONS['x'] / 2.0) - util.PERFORMER_WIDTH
ROOM_Z_MAX = (ROOM_DIMENSIONS['x'] / 2.0) - util.PERFORMER_WIDTH
LAST_STEP = 2500
SMALL_CONTEXT_OBJECT_CHOICES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SMALL_CONTEXT_OBJECT_WEIGHTS = [5, 5, 10, 10, 12.5, 15, 12.5, 10, 10, 5, 5]
WALL_CHOICES = [0, 1, 2, 3]
WALL_WEIGHTS = [40, 30, 20, 10]
WALL_MAX_WIDTH = 4
WALL_MIN_WIDTH = 1
WALL_HEIGHT = 3
WALL_DEPTH = 0.1
WALL_SEPARATION = 1
def retrieve_template_list(object_data: ObjectData) -> List[Dict[str, Any]]:
return [object_data.trained_template, object_data.untrained_template]
class InteractiveHypercube(Hypercube):
"""A hypercube of interactive scenes that each have the same goals,
targets, distractors, walls, materials, and performer starts, except for
specific differences detailed in its plan."""
def __init__(
self,
body_template: Dict[str, Any],
goal: InteractiveGoal,
role_to_type: Dict[str, str],
plan_name: str,
plan_list: List[InteractivePlan],
training=False
) -> None:
self._goal = goal
self._plan_list = plan_list
self._role_to_type = role_to_type
self._initialize_object_data()
self._validate_object_plan()
super().__init__(
goal.get_name() + ((' ' + plan_name) if plan_name else ''),
body_template,
goal.get_goal_template(),
training=training
)
def _initialize_object_data(self) -> None:
# Save each possible object's plans across all scenes.
self._data = {
'target': [TargetData(self._plan_list[0].target_plan, 0)],
'confusor': [
ObjectData(tags.ROLES.CONFUSOR, object_plan) for object_plan
in self._plan_list[0].confusor_plan_list
],
'large_container': [
ReceptacleData(tags.ROLES.CONTAINER, object_plan)
for object_plan in self._plan_list[0].large_container_plan_list
],
'obstacle': [
ReceptacleData(tags.ROLES.OBSTACLE, object_plan)
for object_plan in self._plan_list[0].obstacle_plan_list
],
'occluder': [
ReceptacleData(tags.ROLES.OCCLUDER, object_plan)
for object_plan in self._plan_list[0].occluder_plan_list
],
'small_container': [
ReceptacleData(tags.ROLES.CONTAINER, object_plan)
for object_plan in self._plan_list[0].small_container_plan_list
]
}
# Assume that each object has a plan in each scene. An object that does
# not appear in a scene should be given a NONE location plan.
for scene_plan in self._plan_list[1:]:
for role, object_plan_list in scene_plan.object_plans().items():
for index, object_plan in enumerate(object_plan_list):
self._data[role][index].append_object_plan(object_plan)
# Assume only one target plan, and always use the index 0 target.
self._target_data = self._data['target'][0]
# Assume only zero or one confusor plan.
self._confusor_data = (
self._data['confusor'][0] if len(self._data['confusor']) > 0
else None
)
def _validate_object_plan(self) -> None:
if any([
scene_plan.target_plan.definition !=
self._target_data.original_definition
for scene_plan in self._plan_list
]):
raise SceneException(
'Interactive hypercubes cannot currently handle a target with '
'different definitions across scenes')
if any(self._target_data.untrained_plan_list):
raise SceneException(
'Interactive hypercubes cannot currently handle a target with '
'a randomly chosen (not pre-defined) untrained shape')
# Update _assign_each_object_location to handle new location plans.
for object_data in self._data['target']:
if (
object_data.is_between() or object_data.is_far()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'target location plans: BETWEEN, FAR')
for object_data in self._data['confusor']:
if (
object_data.is_between() or object_data.is_random()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'confusor location plans: BETWEEN, RANDOM')
for object_data in (
self._data['large_container'] + self._data['small_container']
):
if (
object_data.is_back() or object_data.is_between() or
object_data.is_close() or object_data.is_far() or
object_data.is_front() or object_data.is_inside()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'container location plans: BACK, BETWEEN, CLOSE, FAR, '
'FRONT, INSIDE')
for object_data in (self._data['obstacle'] + self._data['occluder']):
if (
object_data.is_back() or object_data.is_far() or
object_data.is_front() or object_data.is_inside()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'obstacle or occluder location plans: BACK, FAR, FRONT, '
'INSIDE')
# Override
def _create_scenes(
self,
body_template: Dict[str, Any],
goal_template: Dict[str, Any]
) -> List[Dict[str, Any]]:
tries = 0
while True:
tries += 1
try:
logging.debug(
f'\n\n{self.get_name()} initialize scenes try {tries}\n')
# Reset the half-finished scenes, all of their objects, and
# their other properties on each try.
scenes = [
copy.deepcopy(body_template) for _
in range(len(self._plan_list))
]
for object_data_list in self._data.values():
for object_data in object_data_list:
object_data.reset_all_properties()
# Save the bounds of each object in each of its possible
# locations across all the scenes to detect collisions with
# any subsequently positioned objects.
self._bounds_list = []
# Save the targets used in the hypercube that are not defined
# by the plan, if the goal has multiple targets.
self._common_target_list = []
# Save the interior walls used in the hypercube.
self._interior_wall_list = []
# Save the performer's start location in the hypercube.
self._performer_start = self._generate_performer_start()
# Save the small context objects used in the hypercube.
self._small_context_object_list = []
# Initialize all of the objects in all of the scenes.
self._initialize_each_hypercube_object()
# Update each scene's template with its corresponding objects,
# goal, tags, and other specific properties.
for index, scene in enumerate(scenes):
self._update_scene_at_index(scene, index, goal_template)
logging.debug(
f'\n\n{self.get_name()} initialize scenes is done\n ')
scenes = update_floor_and_walls(
body_template,
self._data,
retrieve_template_list,
scenes
)
break
except SceneException:
logging.exception(
f'{self.get_name()} _initialize_each_hypercube_object')
if tries >= util.MAX_TRIES:
raise SceneException(
f'{self.get_name()} cannot successfully initialize scenes '
f'-- please redo.')
return scenes
# Override
def _get_training_scenes(self) -> List[Dict[str, Any]]:
return [
scene for scene in self._scenes
if not scene['debug']['evaluationOnly']
]
def _assign_confusor_obstacle_occluder_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
confusor_data: Optional[ObjectData],
obstacle_occluder_data_list: List[ObjectData],
large_container_data_list: List[ReceptacleData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
plans_to_locations: Dict[ObjectLocationPlan, List[Dict[str, Any]]]
) -> None:
"""Generate and assign locations to the given confusor, obstacle, and
occluder objects, if needed. Will update the given bounds_list."""
# Objects positioned relative to the target (confusors, obstacles, and
# occluders) must each choose new locations for each of the target's
# distinct locations (or its receptacle's locations) across scenes.
target_locations_with_indexes = (
target_data.locations_with_indexes(large_container_data_list)
)
# Next, choose a location for an obstacle/occluder either between the
# performer's start location and the target or behind the target (if
# needed). Assume only one obstacle or occluder is ever "in between"
# OR "close" in a single scene.
for target_location_plan, indexes in target_locations_with_indexes:
for object_data in obstacle_occluder_data_list:
is_obstacle = (object_data.role == tags.ROLES.OBSTACLE)
if object_data.is_between():
# Use the same location for the object across scenes in
# which the target is in this specific location.
self._assign_single_obstacle_occluder_location(
object_data,
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
'between',
object_data.assign_location_between,
indexes,
obstruct=(not is_obstacle),
unreachable=is_obstacle
)
if object_data.is_close():
# Use the same location for the object across scenes in
# which the target is in this specific location.
self._assign_single_obstacle_occluder_location(
object_data,
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
'behind',
object_data.assign_location_close,
indexes,
behind=True
)
if object_data.is_random():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_random_location(
object_data.trained_definition,
goal,
performer_start,
bounds_list,
target_location=(
plans_to_locations[target_location_plan]
),
second_definition=object_data.untrained_definition
)
logging.debug(
f'{self.get_name()} obstacle/occluder location '
f'randomly chosen but not obstructing target: '
f'{location}')
bounds = object_data.assign_location_random(location)
bounds_list.extend(bounds)
# Next, choose a location for the confusor, close to or far from the
# target (if needed).
if confusor_data:
for target_location_plan, indexes in target_locations_with_indexes:
if confusor_data.is_close():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_close_to(
confusor_data.larger_definition(),
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
adjacent=True
)
logging.debug(
f'{self.get_name()} confusor location close to: '
f'{location}')
bounds = confusor_data.assign_location_close(
location,
indexes
)
bounds_list.extend(bounds)
if confusor_data.is_far():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_far_from(
confusor_data.larger_definition(),
plans_to_locations[target_location_plan],
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} confusor location far from: '
f'{location}')
bounds = confusor_data.assign_location_far(
location,
indexes
)
bounds_list.extend(bounds)
def _assign_container_location(
self,
container_data_list: List[ReceptacleData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> None:
"""Generate and assign locations to the given container receptacle
objects, if needed. Will update the given bounds_list."""
# Next, choose the locations for the remaining containers (if needed).
for container_data in container_data_list:
if container_data.is_random():
# Use the same location for the object across scenes in which
# the object is randomly positioned.
location = self._generate_random_location(
container_data.larger_definition(),
goal,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} container location randomly chosen: '
f'{location}')
bounds = container_data.assign_location_random(location)
bounds_list.extend(bounds)
def _assign_front_and_back_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
confusor_data_list: List[ObjectData],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[ObjectLocationPlan, List[Dict[str, Any]]]:
"""Generate and assign front and back locations to the given target and
confusor objects, if needed. Will update the given bounds_list. Return
the target's location corresponding to each unique location plan."""
# Save the target's location corresponding to each location plan.
plans_to_locations = {}
front_and_back_object_data_list = [target_data] + confusor_data_list
if any([
(object_data.is_front() or object_data.is_back()) for object_data
in front_and_back_object_data_list
]):
# Assume only one object is ever "in front" and only one object
# is ever "in back" in a single scene, so use the same front and
# back locations on each relevant object.
location_front, location_back = self._generate_front_and_back(
target_or_receptacle_definition,
target_data.choice
)
logging.debug(
f'{self.get_name()} location in front of performer start:'
f'{location_front}')
logging.debug(
f'{self.get_name()} location in back of performer start:'
f'{location_back}')
for object_data in front_and_back_object_data_list:
bounds = object_data.assign_location_front(location_front)
bounds_list.extend(bounds)
bounds = object_data.assign_location_back(location_back)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.FRONT] = location_front
plans_to_locations[ObjectLocationPlan.BACK] = location_back
# We assume the performer_start won't be modified past here.
logging.debug(
f'{self.get_name()} performer start: {self._performer_start}')
return plans_to_locations
def _assign_object_location_inside_container(
self,
target_data: TargetData,
confusor_data: Optional[ObjectData],
large_container_data_list: List[ReceptacleData]
) -> None:
"""Generate and assign locations to the given target and confusor
objects inside the given container objects, if needed. Will update the
given bounds_list."""
target_contained_indexes = target_data.contained_indexes(
large_container_data_list,
confusor_data
)
# Finally, position the target and confusor inside containers.
for index, container_data, confusor_data in target_contained_indexes:
# Create a new instance of each object to use in this scene.
target_instance = copy.deepcopy(target_data.trained_template)
containment = (
container_data.untrained_containment
if container_data.untrained_plan_list[index]
else container_data.trained_containment
)
# If confusor_data is None, put just the target in the container.
if not confusor_data:
containers.put_object_in_container(
target_instance,
container_data.instance_list[index],
containment.area_index,
containment.target_angle
)
# Else, put both the target and confusor together in the container.
else:
confusor_instance = copy.deepcopy(
confusor_data.untrained_template
if confusor_data.untrained_plan_list[index]
else confusor_data.trained_template
)
containers.put_objects_in_container(
target_instance,
confusor_instance,
container_data.instance_list[index],
containment.area_index,
containment.orientation,
containment.target_angle,
containment.confusor_angle
)
# Save the confusor instance in the hypercube data.
confusor_data.instance_list[index] = confusor_instance
# Save the target instance in the hypercube data.
target_data.instance_list[index] = target_instance
confusor_contained_indexes = confusor_data.contained_indexes(
large_container_data_list,
target_data
) if confusor_data else []
for index, container_data, target_data in confusor_contained_indexes:
# Create a new instance of each object to use in this scene.
confusor_instance = copy.deepcopy(
confusor_data.untrained_template
if confusor_data.untrained_plan_list[index]
else confusor_data.trained_template
)
# If target_data is None, put just the confusor in the container.
if not target_data:
containers.put_object_in_container(
confusor_instance,
container_data.instance_list[index],
container_data.area_index,
container_data.confusor_angle
)
# Save the confusor instance in the hypercube data.
confusor_data.instance_list[index] = confusor_instance
# Else, we already put both objects together in a container, above.
def _assign_single_obstacle_occluder_location(
self,
obstacle_occluder_data: ObjectData,
target_or_receptacle_definition: ObjectDefinition,
target_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
debug_label: str,
location_function: Callable,
indexes: List[float],
behind: bool = False,
obstruct: bool = False,
unreachable: bool = False
) -> None:
"""Generate and assign new locations to a single given obstacle or
occluder using the given function either obstructing or behind the
target. Find separate locations for both the trained and the untrained
definitions because each must be able to obstruct the target."""
trained_location = self._generate_close_to(
obstacle_occluder_data.trained_definition,
target_or_receptacle_definition,
target_location,
performer_start,
bounds_list,
behind=behind,
obstruct=obstruct,
unreachable=unreachable
)
logging.debug(
f'{self.get_name()} trained obstacle/occluder location '
f'{debug_label} target and performer start: {trained_location}')
untrained_location = self._generate_close_to(
obstacle_occluder_data.untrained_definition,
target_or_receptacle_definition,
target_location,
performer_start,
bounds_list,
behind=behind,
obstruct=obstruct,
unreachable=unreachable
)
logging.debug(
f'{self.get_name()} untrained obstacle/occluder location '
f'{debug_label} target and performer start: {untrained_location}')
bounds_trained = location_function(trained_location, [
index for index in indexes
if not obstacle_occluder_data.untrained_plan_list[index]
])
bounds_list.extend(bounds_trained)
bounds_untrained = location_function(untrained_location, [
index for index in indexes
if obstacle_occluder_data.untrained_plan_list[index]
])
bounds_list.extend(bounds_untrained)
def _assign_target_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
container_data: Optional[ReceptacleData],
confusor_data_list: List[ObjectData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[ObjectLocationPlan, List[Dict[str, Any]]]:
"""Generate and assign locations to the given target, as well as the
given target's receptacle and confusor objects if needed. Will update
the given bounds_list. Return the target's location corresponding to
each unique location plan."""
# First, choose the locations for the objects positioned relative to
# the performer's start location (if needed), both in front of it and
# in back of it. Do FIRST because it may change performer_start.
plans_to_locations = self._assign_front_and_back_location(
target_data,
target_or_receptacle_definition,
confusor_data_list,
bounds_list
)
# Next, choose the locations for the target's container (if needed).
target_container_location = None
if container_data and container_data.is_random():
# Use the same location for the object across scenes in which
# the object is randomly positioned.
target_container_location = self._generate_random_location(
container_data.larger_definition(),
goal,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} container location randomly chosen: '
f'{target_container_location}')
bounds = container_data.assign_location_random(
target_container_location
)
bounds_list.extend(bounds)
# Next, choose a location close to the target's container (if any).
# Assume a "close" target is always close to its container.
if target_data.is_close():
target_definition = target_data.larger_definition()
# If the target was turned sideways, revert it for the location
# close to the target's container.
if target_definition.notSideways:
target_definition = copy.deepcopy(target_definition)
target_definition.dimensions = (
target_definition.notSideways['dimensions']
)
target_definition.offset = (
target_definition.notSideways['offset']
)
target_definition.positionY = (
target_definition.notSideways['positionY']
)
target_definition.rotation = (
target_definition.notSideways['rotation']
)
location = self._generate_close_to(
target_definition,
container_data.larger_definition(),
target_container_location,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} target location close to the first '
f'large container: {location}')
bounds = target_data.assign_location_close(
location,
None
)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.CLOSE] = location
# Next, handle the remaining cases for choosing the target's location.
if target_data.is_random():
# Use the same location for the target across scenes in which the
# target is positioned randomly.
location = self._generate_random_location(
target_or_receptacle_definition,
goal,
performer_start,
bounds_list,
target_choice=target_data.choice
)
logging.debug(
f'{self.get_name()} target location randomly chosen: '
f'{location}')
bounds = target_data.assign_location_random(location)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.RANDOM] = location
return plans_to_locations
def _assign_each_object_location(self) -> None:
"""Assign each object's final location in all of the scenes by creating
separate instances of them to use in each individual scene."""
# Use the larger definition of the target or its receptacle in any
# scene to save a big enough area for all objects.
larger_target_definition = self._target_data.larger_definition_of(
self._data['large_container'],
self._confusor_data
)
logging.debug(
f'{self.get_name()} larger definition of trained/untrained '
f'target/confusor/container: {larger_target_definition}')
# Save the target's location corresponding to each location plan.
target_location_plans_to_locations = self._assign_target_location(
self._target_data,
larger_target_definition,
# Assume the 1st large container may have the target inside of it.
self._data['large_container'][0]
if len(self._data['large_container']) > 0 else None,
self._data['confusor'],
self._goal,
self._performer_start,
self._bounds_list
)
self._assign_confusor_obstacle_occluder_location(
self._target_data,
larger_target_definition,
self._confusor_data,
self._data['obstacle'] + self._data['occluder'],
self._data['large_container'],
self._goal,
self._performer_start,
self._bounds_list,
target_location_plans_to_locations
)
self._assign_container_location(
# Assume the 1st large container may have the target inside of it,
# and thus it will have been positioned previously, but the other
# containers will not have any objects inside of them.
self._data['large_container'][1:] + self._data['small_container'],
self._goal,
self._performer_start,
self._bounds_list
)
self._assign_object_location_inside_container(
self._target_data,
self._confusor_data,
self._data['large_container']
)
def _assign_confusor_definition(
self,
confusor_data: Optional[ObjectData],
target_definition: ObjectDefinition
) -> None:
"""Update the given confusor data with its object definition using the
given target data."""
if not confusor_data:
return
dataset = specific_objects.get_interactable_definition_dataset()
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not confusor_data.trained_definition:
confusor_data.trained_definition = util.get_similar_definition(
target_definition,
trained_dataset
)
if not confusor_data.trained_definition:
raise SceneException(
f'{self.get_name()} cannot find trained confusor '
f'size={trained_dataset.size()} '
f'target={target_definition}')
if not confusor_data.untrained_definition:
confusor_data.untrained_definition = util.get_similar_definition(
target_definition,
untrained_dataset
)
if not confusor_data.untrained_definition:
raise SceneException(
f'{self.get_name()} cannot find untrained confusor '
f'size={untrained_dataset.size()} '
f'target={target_definition}')
logging.debug(
f'{self.get_name()} confusor definition: '
f'trained={confusor_data.trained_definition}'
f'untrained={confusor_data.untrained_definition}')
def _choose_small_context_definition(
self,
target_confusor_data_list: List[ObjectData]
) -> Dict[str, Any]:
"""Choose and return a small context object definition for the given
target and confusor objects from the given definition list."""
return util.choose_distractor_definition([
object_data.trained_definition.shape for object_data
in target_confusor_data_list
] + [
object_data.untrained_definition.shape for object_data
in target_confusor_data_list if object_data.untrained_definition
])
def _assign_obstacle_or_occluder_definition(
self,
object_data: ObjectData,
target_definition: ObjectDefinition,
is_occluder: bool
) -> None:
"""Update the given obstacle or occluder data with its object
definition using the given target data."""
dataset = (
specific_objects.get_occluder_definition_dataset() if is_occluder
else specific_objects.get_obstacle_definition_dataset()
)
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not object_data.trained_definition:
object_data.trained_definition = (
self._choose_obstacle_or_occluder_definition(
target_definition,
trained_dataset,
is_occluder
)
)
if not object_data.untrained_definition:
object_data.untrained_definition = (
self._choose_obstacle_or_occluder_definition(
target_definition,
untrained_dataset,
is_occluder
)
)
logging.debug(
f'{self.get_name()} {"occluder" if is_occluder else "obstacle"} '
f'definition: trained={object_data.trained_definition} '
f'untrained={object_data.untrained_definition}')
def _choose_obstacle_or_occluder_definition(
self,
target_definition: ObjectDefinition,
definition_dataset: DefinitionDataset,
is_occluder: bool
) -> Dict[str, Any]:
"""Choose and return an obstacle or occluder definition for the given
target object from the given definition list."""
obstacle_occluder_definition_list = (
geometry.retrieve_obstacle_occluder_definition_list(
target_definition,
definition_dataset,
is_occluder
)
)
if not obstacle_occluder_definition_list:
raise SceneException(
f'{self.get_name()} cannot find '
f'{"occluder" if is_occluder else "obstacle"} '
f'size={definition_dataset.size()} '
f'target={target_definition}')
definition, angle = random.choice(obstacle_occluder_definition_list)
# Note that this rotation must be also modified with the final
# performer start Y.
definition.rotation.y += angle
return definition
def _assign_container_definition(
self,
container_data: ReceptacleData,
target_data: TargetData,
confusor_data: Optional[ObjectData],
find_invalid_container: bool = False
) -> None:
"""Update the given container data with its object definition using the
given target and confusor data and whether it should be a valid or an
invalid size to fit either or both of the objects inside of it."""
dataset = specific_objects.get_container_definition_dataset()
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not container_data.trained_definition:
(
definition,
area_index,
orientation,
target_angle,
confusor_angle
) = self._choose_container_definition(
target_data,
confusor_data,
confusor_data.trained_definition if confusor_data else None,
trained_dataset,
find_invalid_container
)
container_data.trained_definition = definition
container_data.trained_containment.area_index = area_index
container_data.trained_containment.orientation = orientation
container_data.trained_containment.target_angle = target_angle
container_data.trained_containment.confusor_angle = confusor_angle
if not container_data.untrained_definition:
(
definition,
area_index,
orientation,
target_angle,
confusor_angle
) = self._choose_container_definition(
target_data,
confusor_data,
confusor_data.untrained_definition if confusor_data else None,
untrained_dataset,
find_invalid_container
)
container_data.untrained_definition = definition
container_data.untrained_containment.area_index = area_index
container_data.untrained_containment.orientation = orientation
container_data.untrained_containment.target_angle = target_angle
container_data.untrained_containment.confusor_angle = (
confusor_angle
)
logging.debug(
f'{self.get_name()} container definition: '
f'trained={container_data.trained_definition} '
f'untrained={container_data.untrained_definition}')
def _choose_container_definition(
self,
target_data: TargetData,
confusor_data: Optional[ObjectData],
confusor_definition: Optional[ObjectDefinition],
definition_dataset: DefinitionDataset,
find_invalid_container: bool = False,
) -> Tuple[Dict[str, Any], int, containers.Orientation, float, float]:
"""Choose and return a valid or an invalid container definition for the
given target and confusor objects from the given definition list."""
container_definition = None
area_index = None
orientation = None
target_angle = None
confusor_angle = None
target_definition_list = [target_data.trained_definition]
# Also try the target definition's sideways option if it exists.
if target_data.trained_definition.sideways:
sideways_definition = copy.deepcopy(target_data.trained_definition)
# Save the original properties.
sideways_definition.notSideways = {
'dimensions': sideways_definition.dimensions,
'offset': sideways_definition.offset,
'positionY': sideways_definition.positionY,
'rotation': sideways_definition.rotation
}
# Override the original properties with the sideways properties.
sideways_definition.dimensions = (
sideways_definition.sideways['dimensions']
)
sideways_definition.offset = (
sideways_definition.sideways['offset']
)
sideways_definition.positionY = (
sideways_definition.sideways['positionY']
)
sideways_definition.rotation = (
sideways_definition.sideways['rotation']
)
sideways_definition.sideways = None
target_definition_list.append(sideways_definition)
# If needed, find an enclosable container that can hold both the
# target and the confusor together.
if target_data.containerize_with(confusor_data):
for definition in definition_dataset.definitions():
for target_definition in target_definition_list:
valid_containment = containers.can_contain_both(
definition,
target_definition,
confusor_definition
)
if valid_containment and not find_invalid_container:
target_data.trained_definition = target_definition
container_definition = definition
area_index, angles, orientation = valid_containment
target_angle = angles[0]
confusor_angle = angles[1]
break
elif not valid_containment and find_invalid_container:
target_data.trained_definition = target_definition
container_definition = definition
break
# Else, find an enclosable container that can hold either the target
# or confusor individually.
else:
confusor_definition_or_none = (
confusor_definition if confusor_data and
confusor_data.is_inside() else None
)
if not target_data.is_inside():
target_definition_list = [None]
for definition in definition_dataset.definitions():
for target_definition in target_definition_list:
valid_containment = containers.can_contain(
definition,
target_definition,
confusor_definition_or_none
)
if valid_containment and not find_invalid_container:
if target_definition:
target_data.trained_definition = (
target_definition
)
container_definition = definition
area_index, angles = valid_containment
target_angle = angles[0]
confusor_angle = angles[1]
break
elif not valid_containment and find_invalid_container:
if target_definition:
target_data.trained_definition = (
target_definition
)
container_definition = definition
break
if not container_definition:
raise SceneException(
f'{self.get_name()} cannot create '
f'{"small" if find_invalid_container else "large"} '
f'container size={definition_dataset.size()} '
f'target={target_data.trained_definition}\n'
f'confusor={confusor_definition}')
return (
container_definition, area_index, orientation, target_angle,
confusor_angle
)
def _assign_target_definition(
self,
target_data: TargetData,
goal: InteractiveGoal
) -> None:
"""Update the given target data with its object definition using the
given interactive goal."""
if not target_data.trained_definition:
target_data.trained_definition = goal.choose_target_definition(
target_data.choice
)
logging.debug(
f'{self.get_name()} target definition: '
f'{target_data.trained_definition}')
def _create_interior_wall(
self,
wall_material: str,
wall_colors: List[str],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
keep_unobstructed_list: List[Dict[str, Any]] = None
) -> Optional[Dict[str, Any]]:
"""Create and return a randomly positioned interior wall with the
given material and colors. If keep_unobstructed_list is not None, the
wall won't obstruct the line between the performer_start and the
objects in keep_unobstructed_list."""
tries = 0
performer_rect = geometry.find_performer_rect(
performer_start['position']
)
performer_poly = geometry.rect_to_poly(performer_rect)
while tries < util.MAX_TRIES:
rotation = random.choice((0, 90, 180, 270))
x_position = geometry.random_position_x(ROOM_DIMENSIONS)
z_position = geometry.random_position_z(ROOM_DIMENSIONS)
x_width = round(
random.uniform(WALL_MIN_WIDTH, WALL_MAX_WIDTH),
geometry.POSITION_DIGITS
)
# Ensure the wall is not too close to the room's parallel walls.
if (
(rotation == 0 or rotation == 180) and
(
z_position < (ROOM_Z_MIN + WALL_SEPARATION) or
z_position > (ROOM_Z_MAX - WALL_SEPARATION)
)
) or (
(rotation == 90 or rotation == 270) and
(
x_position < (ROOM_X_MIN + WALL_SEPARATION) or
x_position > (ROOM_X_MAX - WALL_SEPARATION)
)
):
continue
wall_rect = geometry.calc_obj_coords(
x_position,
z_position,
x_width / 2.0,
WALL_DEPTH / 2.0,
0,
0,
rotation
)
wall_poly = geometry.rect_to_poly(wall_rect)
# Ensure parallel walls are not too close one another.
boundary_rect = geometry.calc_obj_coords(
x_position,
z_position,
(x_width + WALL_SEPARATION) / 2.0,
(WALL_DEPTH + WALL_SEPARATION) / 2.0,
0,
0,
rotation
)
is_too_close = any(
sat_entry(boundary_rect, bounds) for bounds in bounds_list
)
is_ok = (
not wall_poly.intersects(performer_poly) and
geometry.rect_within_room(wall_rect, ROOM_DIMENSIONS) and
not is_too_close
)
if is_ok and keep_unobstructed_list:
for instance in keep_unobstructed_list:
if (
'locationParent' not in instance and
geometry.does_fully_obstruct_target(
performer_start['position'],
instance,
wall_poly
)
):
is_ok = False
break
if is_ok:
break
tries += 1
if tries < util.MAX_TRIES:
interior_wall = structures.create_interior_wall(
x_position,
z_position,
rotation,
x_width,
WALL_HEIGHT,
materials.MaterialTuple(wall_material, wall_colors),
thickness=WALL_DEPTH,
bounding_rect=wall_rect
)
return interior_wall
return None
def _create_target_list(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
existing_bounds_list: List[List[Dict[str, float]]],
target_validation_list: List[Dict[str, float]],
start_index: int = None,
end_index: int = None
) -> Tuple[List[Dict[str, Any]], List[List[Dict[str, float]]]]:
"""Create and return each of the goal's targets between the start_index
and the end_index. Used if the goal needs more targets than are defined
by the hypercube's plan. Changes the bounds_list."""
valid_start_index = 0 if start_index is None else start_index
# Only create targets up to the given index, or create each of the
# targets if no end_index was given. Keep each existing target.
valid_end_index = (
goal.get_target_count() if end_index is None else end_index
)
if valid_start_index >= valid_end_index:
return [], existing_bounds_list
target_list = []
bounds_list = existing_bounds_list
for i in range(valid_start_index, valid_end_index):
definition = goal.choose_target_definition(i)
for _ in range(util.MAX_TRIES):
location, possible_bounds_list = goal.choose_location(
definition,
performer_start,
existing_bounds_list,
is_target=True,
room_dimensions=ROOM_DIMENSIONS
)
if goal.validate_target_location(
i,
location,
target_validation_list,
performer_start
):
break
location = None
if not location:
raise SceneException(
f'{self.get_name()} cannot find suitable location '
f'target={definition}')
bounds_list = possible_bounds_list
instance = util.instantiate_object(definition, location)
target_list.append(instance)
return target_list, bounds_list
def _generate_front_and_back(
self,
definition: ObjectDefinition,
target_choice: int = None
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Generate a location in front of and (if needed) in back of the
performer's start location. May change the global performer_start if
it's needed to generate the two locations. Return the front and back
locations."""
location_front = None
location_back = None
for _ in range(util.MAX_TRIES):
location_front = self._identify_front(
self._goal,
self._performer_start,
definition,
target_choice
)
if location_front:
location_back = self._identify_back(
self._goal,
self._performer_start,
definition,
target_choice
)
if location_back:
break
location_front = None
location_back = None
self._performer_start = self._generate_performer_start()
if not location_front or not location_back:
raise SceneException(
f'{self.get_name()} cannot position performer start in '
f'front of and in back of object={definition}')
return location_front, location_back
def _generate_close_to(
self,
object_definition: ObjectDefinition,
existing_definition: ObjectDefinition,
existing_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
adjacent: bool = False,
behind: bool = False,
obstruct: bool = False,
unreachable: bool = False
) -> Dict[str, Any]:
"""Generate and return a new location for the given object very close
to the given previously-positioned object and its given location."""
location_close = geometry.generate_location_in_line_with_object(
object_definition,
existing_definition,
existing_location,
performer_start,
bounds_list,
adjacent=adjacent,
behind=behind,
obstruct=obstruct,
unreachable=unreachable,
room_dimensions=ROOM_DIMENSIONS
)
if not location_close:
if adjacent:
raise SceneException(
f'{self.get_name()} cannot position object adjacent to '
f'existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
elif behind:
raise SceneException(
f'{self.get_name()} cannot position object directly in '
f'back of existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
raise SceneException(
f'{self.get_name()} cannot position object directly in '
f'front of existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
return location_close
def _generate_far_from(
self,
object_definition: ObjectDefinition,
existing_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Generate and return a new location for the given object far away
from the given location."""
for _ in range(util.MAX_TRIES):
bounds_list_copy = copy.deepcopy(bounds_list)
location_far = geometry.calc_obj_pos(
performer_start['position'],
bounds_list_copy,
object_definition,
room_dimensions=ROOM_DIMENSIONS
)
if not geometry.are_adjacent(
existing_location,
location_far,
distance=geometry.MIN_OBJECTS_SEPARATION_DISTANCE
):
break
location_far = None
if not location_far:
raise SceneException(
f'{self.get_name()} cannot position object far from existing: '
f'object={object_definition}\nexisting={existing_location}')
return location_far
def _generate_performer_start(self) -> Dict[str, Dict[str, float]]:
"""Generate and return the performer's start location dict."""
return {
'position': {
'x': round(
random.uniform(ROOM_X_MIN, ROOM_X_MAX),
geometry.POSITION_DIGITS
),
'y': 0,
'z': round(
random.uniform(ROOM_Z_MIN, ROOM_Z_MAX),
geometry.POSITION_DIGITS
)
},
'rotation': {
'x': 0,
'y': geometry.random_rotation(),
'z': 0
}
}
def _generate_random_location(
self,
definition: ObjectDefinition,
goal: InteractiveGoal,
performer_start: Dict[str, float],
bounds_list: List[List[Dict[str, float]]],
target_choice: int = None,
target_location: Dict[str, Any] = None,
second_definition: ObjectDefinition = None
) -> Dict[str, Any]:
"""Generate a random location and return it twice."""
for _ in range(util.MAX_TRIES):
location_random, _ = goal.choose_location(
identify_larger_definition(definition, second_definition)
if second_definition else definition,
performer_start,
bounds_list,
is_target=(target_choice is not None),
room_dimensions=ROOM_DIMENSIONS
)
if location_random:
# If generating a location for the target object...
if target_choice:
if goal.validate_target_location(
target_choice,
location_random,
bounds_list,
performer_start
):
# Successful
break
# If generating a location that must ensure the visibility of
# this object, the target object, and other critical objects to
# the performer's start location...
elif target_location:
# Assume that all of the bounds that have been set by now
# will only be for critical objects (specifically targets,
# confusors, containers, obstacles, occluders).
for bounds in bounds_list:
bounds_poly = geometry.get_bounding_polygon({
'boundingBox': bounds
})
# Also validate the second object definition, if given.
second_rect = geometry.generate_object_bounds(
vars(second_definition.dimensions),
vars(second_definition.offset),
location_random['position'],
location_random['rotation']
)
# This location should not completely obstruct or be
# obstructed by any critical object's location.
if geometry.does_fully_obstruct_target(
performer_start['position'],
location_random,
bounds_poly
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': bounds},
geometry.get_bounding_polygon(location_random)
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': second_rect},
bounds_poly
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': bounds},
geometry.get_bounding_polygon({
'boundingBox': second_rect
})
):
# Failed
location_random = None
break
if location_random:
# This location should not partly obstruct the target
# object's location.
if not geometry.does_partly_obstruct_target(
self._performer_start['position'],
target_location,
geometry.get_bounding_polygon(location_random)
):
# Successful
break
# Otherwise...
else:
# Successful
break
# Failed
location_random = None
if not location_random:
raise SceneException(
f'{self.get_name()} cannot randomly position '
f'target={definition}')
return location_random
def _identify_front(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
definition: ObjectDefinition,
target_choice: int = None
) -> Dict[str, Any]:
"""Find and return a location in front of the given performer_start."""
def rotation_func():
return performer_start['rotation']['y']
for _ in range(util.MAX_TRIES):
location_front = geometry.get_location_in_front_of_performer(
performer_start,
definition,
rotation_func=rotation_func,
room_dimensions=ROOM_DIMENSIONS
)
# If we've found a valid location...
if location_front:
# If this is a target location, ensure it's valid for the goal.
if target_choice is None or goal.validate_target_location(
target_choice,
location_front,
[],
performer_start
):
break
# Else, find a new location.
location_front = None
return location_front
def _identify_back(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
definition: ObjectDefinition,
target_choice: int = None
) -> Dict[str, Any]:
"""Find and return a location in back of the given performer_start."""
def rotation_func():
return performer_start['rotation']['y']
for _ in range(util.MAX_TRIES):
location_back = geometry.get_location_in_back_of_performer(
performer_start,
definition,
rotation_func,
room_dimensions=ROOM_DIMENSIONS
)
# If we've found a valid location...
if location_back:
# If this is a target location, ensure it's valid for the goal.
if target_choice is None or goal.validate_target_location(
target_choice,
location_back,
[],
performer_start
):
break
# Else, find a new location.
location_back = None
return location_back
def _initialize_context_objects(self) -> None:
"""Create this hypercube's small context objects."""
critical_object_data_list = (
self._data['target'] + self._data['confusor'] +
self._data['obstacle'] + self._data['occluder']
)
context_count = random.choices(
SMALL_CONTEXT_OBJECT_CHOICES,
weights=SMALL_CONTEXT_OBJECT_WEIGHTS,
k=1
)[0]
for _ in range(context_count):
definition = self._choose_small_context_definition(
critical_object_data_list
)
for _ in range(util.MAX_TRIES):
location, bounds_list = self._goal.choose_location(
definition,
self._performer_start,
self._bounds_list,
room_dimensions=ROOM_DIMENSIONS
)
successful = True
if successful:
for object_data in critical_object_data_list:
for instance in object_data.instance_list:
if not instance:
continue
if geometry.does_fully_obstruct_target(
self._performer_start['position'],
instance,
geometry.get_bounding_polygon(location)
):
successful = False
break
if not successful:
break
if successful:
break
location = False
if not location:
raise SceneException(
f'{self.get_name()} cannot find suitable location '
f'small context object {definition}')
self._bounds_list = bounds_list
instance = util.instantiate_object(definition, location)
self._small_context_object_list.append(instance)
def _initialize_interior_walls(self) -> None:
"""Create this hypercube's interior walls. Changes the
interior_wall_list and the bounds_list."""
# All scenes will have the same room wall material/colors.
room_wall_material_name = self._scene_1['wallMaterial']
room_wall_colors = self._scene_1['debug']['wallColors']
keep_unobstructed_list = [self._target_data.trained_definition]
if self._confusor_data:
keep_unobstructed_list.extend([
self._confusor_data.trained_definition,
self._confusor_data.untrained_definition
])
number = random.choices(WALL_CHOICES, weights=WALL_WEIGHTS, k=1)[0]
logging.debug(f'{self.get_name()} {number} interior walls')
for _ in range(number + 1):
wall = self._create_interior_wall(
room_wall_material_name,
room_wall_colors,
self._performer_start,
self._bounds_list,
keep_unobstructed_list
)
if wall:
self._interior_wall_list.append(wall)
self._bounds_list.append(wall['shows'][0]['boundingBox'])
def _choose_each_object_definition(self) -> None:
"""Choose each object's definition to use across scenes."""
# Create all targets in the hypercube that the goal must make before
# the target chosen by the plan, if the goal has multiple targets.
self._common_target_list, self._bounds_list = self._create_target_list(
self._goal,
self._performer_start,
self._bounds_list,
[],
end_index=self._target_data.choice
)
self._assign_target_definition(self._target_data, self._goal)
self._assign_confusor_definition(
self._confusor_data,
self._target_data.trained_definition
)
for container in self._data['large_container']:
self._assign_container_definition(
container,
self._target_data,
self._confusor_data
)
for container in self._data['small_container']:
self._assign_container_definition(
container,
self._target_data,
self._confusor_data,
find_invalid_container=True
)
larger_target_definition = self._target_data.larger_definition_of(
self._data['large_container'],
self._confusor_data
)
for obstacle in self._data['obstacle']:
self._assign_obstacle_or_occluder_definition(
obstacle,
larger_target_definition,
is_occluder=False
)
for occluder in self._data['occluder']:
self._assign_obstacle_or_occluder_definition(
occluder,
larger_target_definition,
is_occluder=True
)
def _create_each_object_template(self) -> None:
"""Create each object's template at a base location, since later we'll
move them to their final locations in all of the scenes."""
for object_data_list in self._data.values():
for object_data in object_data_list:
object_data.recreate_both_templates()
# Reset object's half-finished instances in all scenes.
object_data.reset_all_instances()
def _initialize_each_hypercube_object(self) -> None:
"""
Initialize this hypercube's objects:
- 1. Create objects that may change in each scene (like targets).
- 2. Containerize objects as needed by this hypercube's plan.
- 3. Move objects into locations specific to each scene.
- 4. Save objects specific to each scene.
- 5. Create all other objects shared by both scenes (like distractors).
"""
self._choose_each_object_definition()
tries = 0
while True:
tries += 1
# Reset the bounds_list on each new try.
self._bounds_list = []
self._create_each_object_template()
try:
self._assign_each_object_location()
for i, instance in enumerate(self._target_data.instance_list):
if not instance:
raise SceneException(
f'{self.get_name()} did not successfully create a '
f'target instance in scene {i} (uh-oh)! '
f'target_location_plan='
f'{self._target_data.location_plan_list[i]}')
break
except SceneException:
logging.exception(
f'{self.get_name()} _assign_each_object_location')
if tries >= util.MAX_TRIES:
raise SceneException(
f'{self.get_name()} cannot successfully assign each '
f'object to a location -- please redo.')
for object_data_list in self._data.values():
for object_data in object_data_list:
self._log_debug_object_data(object_data)
# Create other targets in the hypercube that the goal must make after
# the target chosen by the plan, if the goal has multiple targets.
common_target_list, self._bounds_list = self._create_target_list(
self._goal,
self._performer_start,
self._bounds_list,
self._common_target_list + [
instance for instance in self._target_data.instance_list
if instance
],
start_index=(len(self._common_target_list) + 1)
)
self._common_target_list.extend(common_target_list)
self._initialize_context_objects()
# Add the canContainTarget tag to each container in each scene.
for container_data in self._data['large_container']:
for instance in container_data.instance_list:
if instance:
instance['canContainTarget'] = True
for container_data in self._data['small_container']:
for instance in container_data.instance_list:
if instance:
instance['canContainTarget'] = False
def _log_debug_object_data(self, object_data: ObjectData) -> None:
"""Log debug info for the given object data."""
for scene_index, instance in enumerate(object_data.instance_list):
if instance:
logging.info(
f'{self.get_name()} '
f'{object_data.role}_{scene_index} '
f'{instance["type"]} {instance["id"]} '
f'parent={instance.get("locationParent", None)}')
else:
logging.info(
f'{self.get_name()} '
f'{object_data.role}_{scene_index} None')
def _move_distractor_into_receptacle(
self,
object_instance: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Create and return a receptacle object, moving the given object into
the new receptacle. Changes the bounds_list."""
# Only a pickupable object can be positioned inside a receptacle.
if not object_instance.get('pickupable', False):
return None
# Please note that an enclosable receptacle (that can have objects
# positioned inside of it) may also be called a "container".
dataset = specific_objects.get_container_definition_dataset()
for receptacle_definition in dataset.definitions():
valid_containment = containers.can_contain(
receptacle_definition,
object_instance
)
if valid_containment:
location = geometry.calc_obj_pos(
performer_start['position'],
bounds_list,
receptacle_definition,
room_dimensions=ROOM_DIMENSIONS
)
if location:
receptacle_instance = util.instantiate_object(
receptacle_definition,
location
)
area, angles = valid_containment
containers.put_object_in_container(
object_instance,
receptacle_instance,
area,
angles[0]
)
return receptacle_instance
return None
def _move_distractor_onto_receptacle(
self,
object_instance: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Create and return a receptacle object, moving the given object onto
the new receptacle. Changes the bounds_list."""
# TODO MCS-146 Position objects on top of receptacles.
return None
def _update_scene_at_index(
self,
scene: Dict[str, Any],
scene_index: int,
goal_template: Dict[str, Any]
) -> None:
"""Update the given scene with its metadata like all of its objects."""
scene_plan = self._plan_list[scene_index]
scene['performerStart'] = self._performer_start
scene['debug']['evaluationOnly'] = any([
object_plan.untrained
for object_plan_list in scene_plan.object_plans().values()
for object_plan in object_plan_list
])
scene['goal'] = copy.deepcopy(goal_template)
scene['goal'] = self._goal.update_goal_template(
scene['goal'],
[self._target_data.instance_list[scene_index]]
)
scene['goal']['last_step'] = LAST_STEP
role_to_object_list = {}
role_to_object_list[tags.ROLES.TARGET] = [
object_data.instance_list[scene_index] for object_data in
self._data['target'] if object_data.instance_list[scene_index]
] + self._common_target_list
role_to_object_list[tags.ROLES.CONFUSOR] = [
object_data.instance_list[scene_index] for object_data in
self._data['confusor'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.CONTAINER] = [
object_data.instance_list[scene_index] for object_data in
(self._data['large_container'] + self._data['small_container'])
if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.CONTEXT] = (
self._small_context_object_list
)
role_to_object_list[tags.ROLES.OBSTACLE] = [
object_data.instance_list[scene_index] for object_data in
self._data['obstacle'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.OCCLUDER] = [
object_data.instance_list[scene_index] for object_data in
self._data['occluder'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.WALL] = self._interior_wall_list
update_scene_objects(scene, role_to_object_list)
scene['goal']['sceneInfo'][tags.SCENE.ID] = [
scene_plan.scene_id.upper()
]
scene['goal']['sceneInfo'][tags.SCENE.SLICES] = []
for tag, value in scene_plan.slice_tags.items():
scene['goal']['sceneInfo'][tag] = value
scene['goal']['sceneInfo'][tags.SCENE.SLICES].append(
tags.tag_to_label(tag) + ' ' + str(value)
)
class InteractiveSingleSceneFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
goal.get_name().replace(' ', '').capitalize(),
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
target_object_plan = ObjectPlan(
ObjectLocationPlan.RANDOM,
definition=base_objects.create_soccer_ball()
)
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'',
[InteractivePlan('', {}, target_object_plan)],
training=self.training
)
class InteractiveContainerTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_container_hypercube_plan_list(),
training=self.training
)
class InteractiveObstacleTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Obstacle' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'obstacle',
create_obstacle_hypercube_plan_list(),
training=self.training
)
class InteractiveOccluderTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Occluder' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'occluder',
create_occluder_hypercube_plan_list(),
training=self.training
)
class InteractiveContainerEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_container_hypercube_plan_list(),
training=self.training
)
class InteractiveContainerEvaluation4HypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation4',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_eval_4_container_hypercube_plan_list(),
training=self.training
)
class InteractiveObstacleEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Obstacle' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'obstacle',
create_obstacle_hypercube_plan_list(),
training=self.training
)
class InteractiveOccluderEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Occluder' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'occluder',
create_occluder_hypercube_plan_list(),
training=self.training
)
INTERACTIVE_TRAINING_HYPERCUBE_LIST = [
InteractiveSingleSceneFactory(RetrievalGoal('retrieval')),
InteractiveContainerTrainingHypercubeFactory(RetrievalGoal('container')),
InteractiveObstacleTrainingHypercubeFactory(RetrievalGoal('obstacle')),
InteractiveOccluderTrainingHypercubeFactory(RetrievalGoal('occluder'))
]
INTERACTIVE_EVALUATION_HYPERCUBE_LIST = [
InteractiveContainerEvaluationHypercubeFactory(RetrievalGoal('container')),
InteractiveObstacleEvaluationHypercubeFactory(RetrievalGoal('obstacle')),
InteractiveOccluderEvaluationHypercubeFactory(RetrievalGoal('occluder')),
InteractiveContainerEvaluation4HypercubeFactory(RetrievalGoal('container'))
]
|
the-stack_0_4433 | import collections
from nltk import NaiveBayesClassifier, DecisionTreeClassifier
from nltk.metrics import precision, recall, f_measure
from nltk.classify import apply_features, accuracy
from nltk.classify.scikitlearn import SklearnClassifier
from prueba_paquete.utils import clean_html_tags, shuffled, tokenize_and_stem
from prueba_paquete.concept_extraction import ConceptExtractor
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
class DocumentClassifier():
'''
Train a classifier with labeled documents and classify new documents
into one of the labeled clases.
We call 'dev docs' to the documents set provided for training the
classifier. These 'dev docs' are splitted into two sub sets: 'train docs'
and 'test docs' that would be used to train and test the machine learning
model respectively.
Parameters
----------
train_p : float, 0.8 by default
The proportion of the 'dev docs' used as 'train docs'
Use values greater than 0 and lower than 1.
The remaining docs will be using as 'test docs'
eq_label_num : boolean, True by default
If true, 'train docs' will have equal number of documents for each
class. This number will be the lowest label count.
complete_p : boolean, True by default
Used when eq_label_num is True, but the lowest label count is not
enough for getting the train_p proportion of 'train docs'. If this
attribute is True, more documents from 'test docs' will be moved
to 'train docs' until we get train_p
n_folds : integer, 10 by default
Number of folds to be used in k-fold cross validation technique for
choosing different sets as 'train docs'
vocab_size : integer, 500 by default
This is the size of the vocabulary set that will be used for extracting
features out of the docs
t_classifier : string, 'NB' by default
This is the type of classifier model used. Available types are 'NB'
(Naive Bayes), 'DT' (decision tree), 'RF' (Random Forest), and 'SVM'
(Support Vector Machine)
language: string, 'english'; by default
Language on which documents are written
'''
def __init__(self, train_p=0.8, eq_label_num=True,
complete_p=True, n_folds=10,
vocab_size=250,
t_classifier="NB", language="english",
stem=False):
self.train_p = train_p
self.eq_label_num = eq_label_num
self.complete_p = complete_p
self.n_folds = n_folds
self.vocab_size = vocab_size
self.t_classifier = t_classifier
self.language = language
self.stem = stem
self._vocab = []
self._classified_docs = []
self._classifier = None
self._accuracy = 0
self._precision = {}
self._recall = {}
self._f_measure = {}
self._train_docs = []
self._test_docs = []
def split_train_and_test(self, docs):
'''
Split the 'dev docs' set into the 'train docs' and 'test docs' subsets
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
categories_count = self.count_categories(docs)
label_limit = min([c for (k,c) in categories_count.items()])
labeled_docs = {}
train_docs = []
test_docs = []
# Split docs by label
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in docs if k == cat])
if self.eq_label_num:
# Select the same number of doc for all labels
for cat, cat_docs in labeled_docs.items():
cat_limit = label_limit
cat_train_docs = cat_docs[:cat_limit]
cat_test_docs = cat_docs[cat_limit:]
train_docs += [(doc, cat) for doc in cat_train_docs]
test_docs += [(doc, cat) for doc in cat_test_docs]
l_train = len(train_docs)
l_docs = len(docs)
l_test = len(test_docs)
actual_p = l_train / l_docs
# If the training proportion is not
if self.complete_p == True and actual_p < self.train_p:
shuffled_extra = shuffled(test_docs)
extra_i = 0
while(actual_p < self.train_p and extra_i < l_test):
aux_l_train = l_train + extra_i
actual_p = aux_l_train / l_docs
extra_i += 1
train_docs += shuffled_extra[:extra_i]
test_docs = shuffled_extra[extra_i:]
else:
label_limit = int(self.train_p * len(docs))
shuffled_docs = shuffled(docs)
train_docs = shuffled_docs[:label_limit]
test_docs = shuffled_docs[label_limit:]
self._train_docs = train_docs
self._test_docs = test_docs
def cross_validation_train(self, dev_docs):
'''
Applies k-fold cross validation technique to split the docs into different
pairs of training and testing sets. For each pair, it trains and evals the
a classifier, choosing the one with the best accuracy
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
dev_docs = shuffled(dev_docs)
accuracies = []
best_accuracy = 0
subset_size = int(len(dev_docs)/self.n_folds)
for i in range(self.n_folds):
classifier_list = []
train_docs = (dev_docs[(i + 1) * subset_size:] + \
dev_docs[:i * subset_size])
test_docs = dev_docs[i * subset_size:(i + 1) * subset_size]
train_set = apply_features(self.get_doc_features, train_docs)
if self.t_classifier == "NB":
classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
classifier_list.append(classifier)
test_set = apply_features(self.get_doc_features, test_docs, True)
accuracies.append((accuracy(classifier, test_set)) * 100)
if accuracies[-1] > best_accuracy:
best_accuracy = accuracies[-1]
self._classifier = classifier
self._train_docs = train_docs
self._test_docs = test_docs
def equitative_class_train(self, dev_docs):
categories_count = self.count_categories(dev_docs)
labeled_docs = {}
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in dev_docs if k == cat])
train_docs = []
test_docs = []
for cat, l in labeled_docs.items():
cat_limit = int(self.train_p * len(l))
train_docs += [(t, cat) for t in l[:cat_limit]]
test_docs += [(t, cat) for t in l[cat_limit:]]
self._train_docs = train_docs
self._test_docs = test_docs
# print("len dev docs", len(dev_docs))
# print("categories count", categories_count)
# print("count train", self.count_categories(train_docs))
# print("count test", self.count_categories(test_docs))
# split dev docs and create traning and test set
# self.split_train_and_test(dev_docs)
train_set = apply_features(self.get_doc_features, self._train_docs)
# create and train the classification model according to t_classifier
if self.t_classifier == "NB":
self._classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
self._classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
self._classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
self._classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
def count_categories(self, docs):
'''
Count how many documents of each class are in the 'dev docs' set
Parameters
----------
docs: iterable
An iterable which yields a list of strings
Returns
-------
counters: dictionary
A dictiionary where each item is the number of docs for a class
'''
categories = set([c for (t,c) in docs])
counters = {}
for cat in categories:
counters[cat] = 0
for (text, cat) in docs:
counters[cat] += 1
self._categories = sorted(categories)
return counters
def get_doc_features(self, doc):
'''
Extract features of a document, checking the presence of the words
in the vocabulary
Parameters
----------
doc: string
The doc from which features will be extracted
Returns
-------
features: dictionary
A dictionary where each item indicates the presence of a
word from the vocabulary in the input doc
'''
features = {}
for word in self._vocab:
features['contains({})'.format(word)] = (word in doc)
return features
def train_classifier(self, dev_docs):
'''
Create the features vocabulary from 'dev docs',
Split 'dev docs', train the classifier with 'train docs',
Evaluate accuracy with 'test docs'
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
# create vocabulary for feature extraction
ce = ConceptExtractor(num_concepts=self.vocab_size,
language=self.language)
ce.extract_concepts([t for (t,c) in dev_docs])
self._vocab = sorted([c for (c,f) in ce.common_concepts], key=str.lower)
if (self.stem):
self._vocab = [tokenize_and_stem(w, language=self.language)[0] \
for w in self._vocab]
# self.cross_validation_train(dev_docs)
self.equitative_class_train(dev_docs)
def eval_classifier(self):
'''
Test the model and calculates the metrics of accuracy, precision,
recall and f-measure
'''
test_set = apply_features(self.get_doc_features, self._test_docs, True)
self._accuracy = accuracy(self._classifier, test_set)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(test_set):
refsets[label].add(i)
observed = self._classifier.classify(feats)
testsets[observed].add(i)
self.count_categories(self._train_docs)
for cat in self._categories:
self._precision[cat] = precision(refsets[cat], testsets[cat])
self._recall[cat] = recall(refsets[cat], testsets[cat])
self._f_measure[cat] = f_measure(refsets[cat], testsets[cat])
def classify_docs(self, docs):
'''
First train the classifier with the labeled data.
Then classifies the unlabeled data.
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
dev_docs = [(t, c) for (t, c) in docs if c!=""]
unlabeled_docs = [t for (t, c) in docs if c==""]
self.train_classifier(dev_docs)
self.eval_classifier()
results = []
for doc in unlabeled_docs:
doc_feats = self.get_doc_features(doc)
result = self._classifier.classify(doc_feats)
results.append((doc, result))
self._classified_docs = results
self._final_cat_count = self.count_categories(dev_docs+results)
@property
def classified_docs(self):
return self._classified_docs
@property
def accuracy(self):
return self._accuracy
@property
def precision(self):
return self._precision
@property
def recall(self):
return self._recall
@property
def f_measure(self):
return self._f_measure
@property
def category_count(self):
return self._final_cat_count
|
the-stack_0_4435 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import threading
from PyQt6 import QtCore
class Socket_server():
"""
A class for creating and listening the socket server that connect errors messages from device modules
and a dedicated text box in the main window of the programm.
"""
def start_messenger_server(self, helper):
"""
A function to create a socket server.
This function should be run in another thread in order to not
block the execution of the main programm.
"""
sock = socket.socket()
sock.bind(('', 9091))
sock.listen(2)
while True:
client, addr = sock.accept()
client_handler = threading.Thread(target=self.message, args=(helper, client), daemon = True).start()
def message(self, helper, client):
"""
A function to read a message from clients and emit a special signal with the message
to a helper class and finally to a dedicated text box in the main window of the programm.
This function should be run in another thread in order to not
block the execution of the main programm.
"""
data = client.recv(1024).decode()
helper.changedSignal.emit(data)
#print(data)
class Helper(QtCore.QObject):
"""
A helper class to connect an event in another thread to a function in the main thread.
"""
changedSignal = QtCore.pyqtSignal(str)
if __name__ == "__main__":
main() |
the-stack_0_4436 | #!/usr/bin/env python3
"""
Functions for finding and killing processes
"""
import os
import psutil
import signal
import time
def process_exists(pid):
"""
Determine if process 'pid' exists.
"""
try:
os.kill(pid, 0)
except ProcessLookupError:
return False # Doesn't exist
except PermissionError:
pass # Exists but not ours
return True
def kill_processes(first_args,
orphaned_only=False,
send_signal=signal.SIGINT,
wait=2.0,
retry=0.25
):
"""
Find all processes whose arguments begin with the list 'first_args'
and kill them off, first with 'send_signal' for up to 'wait' seconds
and retrying every 'retry' seconds. If, after that time, any
processes are left, send them a SIGKILL. If 'orphaned_only' is True,
only kill processes with a PPID of 1.
"""
if not isinstance(first_args,list):
raise ValueError("Program arguments must be in a list")
len_first_args = len(first_args)
if len_first_args < 1:
raise ValueError("Must have at least one argument to match.")
pids = []
for process_item in psutil.process_iter():
process = process_item.as_dict()
if (process["cmdline"][0:len_first_args] == first_args) \
and ((not orphaned_only) or (process["ppid"] == 1)):
pids.append(process["pid"])
times = (wait / retry)
while times:
existing = 0
# Do the kills in parallel to make things more predictable
# time-wise.
for pid in pids:
if process_exists(pid):
existing += 1
os.kill(pid, send_signal)
if existing == 0:
return # Awww. Ran out of PIDdies.
time.sleep(retry)
times -= 1
# Last resort
for pid in pids:
if process_exists(pid):
os.kill(pid, signal.SIGKILL)
|
the-stack_0_4437 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from glue.core import Data
from glue.tests.helpers import requires_h5py
from ..hdf5 import hdf5_writer
DTYPES = [np.int16, np.int32, np.int64, np.float32, np.float64]
@requires_h5py
@pytest.mark.parametrize('dtype', DTYPES)
def test_hdf5_writer_data(tmpdir, dtype):
filename = tmpdir.join('test1.hdf5').strpath
data = Data(x=np.arange(6).reshape(2, 3).astype(dtype),
y=(np.arange(6) * 2).reshape(2, 3).astype(dtype))
hdf5_writer(filename, data)
from h5py import File
f = File(filename)
assert len(f) == 2
np.testing.assert_equal(f['x'][()], data['x'])
np.testing.assert_equal(f['y'][()], data['y'])
assert f['x'][()].dtype == dtype
assert f['y'][()].dtype == dtype
f.close()
# Only write out some components
filename = tmpdir.join('test2.hdf5').strpath
hdf5_writer(filename, data, components=[data.id['x']])
f = File(filename)
assert len(f) == 1
np.testing.assert_equal(f['x'][()], data['x'])
f.close()
@requires_h5py
@pytest.mark.parametrize('dtype', DTYPES)
def test_hdf5_writer_subset(tmpdir, dtype):
filename = tmpdir.join('test').strpath
data = Data(x=np.arange(6).reshape(2, 3).astype(dtype),
y=(np.arange(6) * 2).reshape(2, 3).astype(dtype))
subset = data.new_subset()
subset.subset_state = data.id['x'] > 2
hdf5_writer(filename, subset)
from h5py import File
f = File(filename)
if np.dtype(dtype).kind == 'f':
assert np.all(np.isnan(f['x'][0]))
assert np.all(np.isnan(f['y'][0]))
else:
np.testing.assert_equal(f['x'][0], 0)
np.testing.assert_equal(f['y'][0], 0)
np.testing.assert_equal(f['x'][1], data['x'][1])
np.testing.assert_equal(f['y'][1], data['y'][1])
assert f['x'][()].dtype == dtype
assert f['y'][()].dtype == dtype
f.close()
|
the-stack_0_4438 | import torch
import torch.nn as nn
from torchvision import models
class BaseModel_scratch(nn.Module):
def __init__(self, model_name, eps=3, num_classes=200, init_weights=True):
super().__init__()
if model_name == 'vgg16bn':
backbone = nn.Sequential(*list(models.vgg16_bn(pretrained=False).features.children())[:-4])
last_conv = nn.Sequential(
nn.Conv2d(512, num_classes * eps, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(num_classes * eps),
nn.ReLU(True),
nn.AvgPool2d(kernel_size=1, stride=1, padding=0)
)
else:
backbone = None
last_conv = None
self.backbone = backbone
self.last_conv = last_conv
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if init_weights:
self._initialize_weights()
def forward(self, x):
feat = self.backbone(x)
feat = self.last_conv(feat)
out = self.maxpool(feat)
out = out.view(out.size(0), -1)
return feat, out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
model = BaseModel_scratch('vgg16bn')
print(model)
inp = torch.randn((3, 3, 224, 224))
a, b = model(inp)
print(a.size())
print(b.size())
|
the-stack_0_4439 | # -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2013-2016
# - Cedric Serfon <[email protected]>, 2014-2019
# - Thomas Beermann <[email protected]>, 2014
# - Mario Lassnig <[email protected]>, 2017-2019
# - Hannes Hansen <[email protected]>, 2018-2019
# - Martin Barisits <[email protected]>, 2019-2021
# - Andrew Lister <[email protected]>, 2019
# - Ilija Vukotic <[email protected]>, 2020
# - Luc Goossens <[email protected]>, 2020
# - Patrick Austin <[email protected]>, 2020
# - Eric Vaandering <[email protected]>, 2020
# - Benedikt Ziemons <[email protected]>, 2020
# - James Perry <[email protected]>, 2020
# - Radu Carpa <[email protected]>, 2021
from rucio.api import permission
from rucio.db.sqla.constants import BadFilesStatus
from rucio.core import replica
from rucio.core.rse import get_rse_id, get_rse_name
from rucio.common import exception
from rucio.common.schema import validate_schema
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict
def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, vo='def'):
"""
List the bad file replicas summary. Method used by the rucio-ui.
:param rse_expression: The RSE expression.
:param from_date: The start date.
:param to_date: The end date.
:param vo: the VO to act on.
"""
replicas = replica.get_bad_replicas_summary(rse_expression=rse_expression, from_date=from_date, to_date=to_date, filter_={'vo': vo})
return [api_update_return_dict(r) for r in replicas]
def list_bad_replicas_status(state=BadFilesStatus.BAD, rse=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def'):
"""
List the bad file replicas history states. Method used by the rucio-ui.
:param state: The state of the file (SUSPICIOUS or BAD).
:param rse: The RSE name.
:param younger_than: datetime object to select bad replicas younger than this date.
:param older_than: datetime object to select bad replicas older than this date.
:param limit: The maximum number of replicas returned.
:param vo: The VO to act on.
"""
rse_id = None
if rse is not None:
rse_id = get_rse_id(rse=rse, vo=vo)
replicas = replica.list_bad_replicas_status(state=state, rse_id=rse_id, younger_than=younger_than,
older_than=older_than, limit=limit, list_pfns=list_pfns, vo=vo)
return [api_update_return_dict(r) for r in replicas]
def declare_bad_file_replicas(pfns, reason, issuer, vo='def'):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
kwargs = {}
if not permission.has_permission(issuer=issuer, vo=vo, action='declare_bad_file_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad replicas' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
replicas = replica.declare_bad_file_replicas(pfns=pfns, reason=reason, issuer=issuer, status=BadFilesStatus.BAD)
for k in list(replicas):
try:
rse = get_rse_name(rse_id=k)
replicas[rse] = replicas.pop(k)
except exception.RSENotFound:
pass
return replicas
def declare_suspicious_file_replicas(pfns, reason, issuer, vo='def'):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
kwargs = {}
if not permission.has_permission(issuer=issuer, vo=vo, action='declare_suspicious_file_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare suspicious replicas' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
replicas = replica.declare_bad_file_replicas(pfns=pfns, reason=reason, issuer=issuer, status=BadFilesStatus.SUSPICIOUS)
for k in list(replicas):
try:
rse = get_rse_name(rse_id=k)
replicas[rse] = replicas.pop(k)
except exception.RSENotFound:
pass
return replicas
def get_did_from_pfns(pfns, rse, vo='def'):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse: The RSE name.
:param vo: The VO to act on.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}}
"""
rse_id = get_rse_id(rse=rse, vo=vo)
replicas = replica.get_did_from_pfns(pfns=pfns, rse_id=rse_id, vo=vo)
for r in replicas:
for k in r.keys():
r[k]['scope'] = r[k]['scope'].external
yield r
def list_replicas(dids, schemes=None, unavailable=False, request_id=None,
ignore_availability=True, all_states=False, rse_expression=None,
client_location=None, domain=None, signature_lifetime=None,
resolve_archives=True, resolve_parents=False,
nrandom=None, updated_after=None,
issuer=None, vo='def'):
"""
List file replicas for a list of data identifiers.
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. Compatibility fallback: None falls back to 'wan'.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to True, find archives which contain the replicas.
:param resolve_parents: When set to True, find all parent datasets which contain the replicas.
:param updated_after: datetime object (UTC time), only return replicas updated after this time
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
validate_schema(name='r_dids', obj=dids, vo=vo)
# Allow selected authenticated users to retrieve signed URLs.
# Unauthenticated users, or permission-less users will get the raw URL without the signature.
sign_urls = False
if permission.has_permission(issuer=issuer, vo=vo, action='get_signed_url', kwargs={}):
sign_urls = True
for d in dids:
d['scope'] = InternalScope(d['scope'], vo=vo)
replicas = replica.list_replicas(dids=dids, schemes=schemes, unavailable=unavailable,
request_id=request_id,
ignore_availability=ignore_availability,
all_states=all_states, rse_expression=rse_expression,
client_location=client_location, domain=domain,
sign_urls=sign_urls, signature_lifetime=signature_lifetime,
resolve_archives=resolve_archives, resolve_parents=resolve_parents,
nrandom=nrandom, updated_after=updated_after)
for rep in replicas:
# 'rses' and 'states' use rse_id as the key. This needs updating to be rse.
keys = ['rses', 'states']
for k in keys:
old_dict = rep.get(k, None)
if old_dict is not None:
new_dict = {}
for rse_id in old_dict:
rse = get_rse_name(rse_id=rse_id) if rse_id is not None else None
new_dict[rse] = old_dict[rse_id]
rep[k] = new_dict
rep['scope'] = rep['scope'].external
if 'parents' in rep:
new_parents = []
for p in rep['parents']:
scope, name = p.split(':')
scope = InternalScope(scope, fromExternal=False).external
new_parents.append('{}:{}'.format(scope, name))
rep['parents'] = new_parents
yield rep
def add_replicas(rse, files, issuer, ignore_availability=False, vo='def'):
"""
Bulk add file replicas.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param ignore_availability: Ignore blocked RSEs.
:param vo: The VO to act on.
:returns: True is successful, False otherwise
"""
for v_file in files:
v_file.update({"type": "FILE"}) # Make sure DIDs are identified as files for checking
validate_schema(name='dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not add file replicas on %s' % (issuer, rse))
if not permission.has_permission(issuer=issuer, vo=vo, action='skip_availability_check', kwargs=kwargs):
ignore_availability = False
issuer = InternalAccount(issuer, vo=vo)
for f in files:
f['scope'] = InternalScope(f['scope'], vo=vo)
if 'account' in f:
f['account'] = InternalAccount(f['account'], vo=vo)
replica.add_replicas(rse_id=rse_id, files=files, account=issuer, ignore_availability=ignore_availability)
def delete_replicas(rse, files, issuer, ignore_availability=False, vo='def'):
"""
Bulk delete file replicas.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param ignore_availability: Ignore blocked RSEs.
:param vo: The VO to act on.
:returns: True is successful, False otherwise
"""
validate_schema(name='r_dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='delete_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not delete file replicas on %s' % (issuer, rse))
if not permission.has_permission(issuer=issuer, vo=vo, action='skip_availability_check', kwargs=kwargs):
ignore_availability = False
for f in files:
f['scope'] = InternalScope(f['scope'], vo=vo)
replica.delete_replicas(rse_id=rse_id, files=files, ignore_availability=ignore_availability)
def update_replicas_states(rse, files, issuer, vo='def'):
"""
Update File replica information and state.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
for v_file in files:
v_file.update({"type": "FILE"}) # Make sure DIDs are identified as files for checking
validate_schema(name='dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='update_replicas_states', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not update file replicas state on %s' % (issuer, rse))
replicas = []
for file in files:
rep = file
rep['rse_id'] = rse_id
rep['scope'] = InternalScope(rep['scope'], vo=vo)
replicas.append(rep)
replica.update_replicas_states(replicas=replicas)
def list_dataset_replicas(scope, name, deep=False, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
scope = InternalScope(scope, vo=vo)
replicas = replica.list_dataset_replicas(scope=scope, name=name, deep=deep)
for r in replicas:
r['scope'] = r['scope'].external
yield r
def list_dataset_replicas_bulk(dids, vo='def'):
"""
:param dids: The list of did dictionaries with scope and name.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
validate_schema(name='r_dids', obj=dids, vo=vo)
names_by_scope = dict()
for d in dids:
if d['scope'] in names_by_scope:
names_by_scope[d['scope']].append(d['name'])
else:
names_by_scope[d['scope']] = [d['name'], ]
names_by_intscope = dict()
for scope in names_by_scope:
internal_scope = InternalScope(scope, vo=vo)
names_by_intscope[internal_scope] = names_by_scope[scope]
replicas = replica.list_dataset_replicas_bulk(names_by_intscope)
for r in replicas:
yield api_update_return_dict(r)
def list_dataset_replicas_vp(scope, name, deep=False, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param vo: The vo to act on.
:returns: If VP exists a list of dicts of sites, otherwise nothing
NOTICE: This is an RnD function and might change or go away at any time.
"""
scope = InternalScope(scope, vo=vo)
for r in replica.list_dataset_replicas_vp(scope=scope, name=name, deep=deep):
yield api_update_return_dict(r)
def list_datasets_per_rse(rse, filters={}, limit=None, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
rse_id = get_rse_id(rse=rse, vo=vo)
if 'scope' in filters:
filters['scope'] = InternalScope(filters['scope'], vo=vo)
for r in replica.list_datasets_per_rse(rse_id, filters=filters, limit=limit):
yield api_update_return_dict(r)
def add_bad_pfns(pfns, issuer, state, reason=None, expires_at=None, vo='def'):
"""
Add bad PFNs.
:param pfns: the list of new files.
:param issuer: The issuer account.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param vo: The VO to act on.
:param session: The database session in use.
:returns: True is successful.
"""
kwargs = {'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_bad_pfns', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad PFNs' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
return replica.add_bad_pfns(pfns=pfns, account=issuer, state=state, reason=reason, expires_at=expires_at)
def add_bad_dids(dids, rse, issuer, state, reason=None, expires_at=None, vo='def'):
"""
Add bad replica entries for DIDs.
:param dids: the list of dids with bad replicas at rse.
:param rse: the rse with the bad replicas.
:param issuer: The issuer account.
:param state: One of the possible states : BAD
:param reason: A string describing the reason of the loss.
:param expires_at: None
:param vo: The VO to act on.
:returns: The list of replicas not declared bad
"""
kwargs = {'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_bad_pfns', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad PFN or DIDs' % issuer)
issuer = InternalAccount(issuer, vo=vo)
rse_id = get_rse_id(rse=rse)
return replica.add_bad_dids(dids=dids, rse_id=rse_id, reason=reason, issuer=issuer, state=state)
def get_suspicious_files(rse_expression, younger_than=None, nattempts=None, vo='def'):
"""
List the list of suspicious files on a list of RSEs
:param rse_expression: The RSE expression where the suspicious files are located
:param younger_than: datetime object to select the suspicious replicas younger than this date.
:param nattempts: The number of time the replicas have been declared suspicious
:param vo: The VO to act on.
"""
replicas = replica.get_suspicious_files(rse_expression=rse_expression, younger_than=younger_than, nattempts=nattempts, filter_={'vo': vo})
return [api_update_return_dict(r) for r in replicas]
def set_tombstone(rse, scope, name, issuer, vo='def'):
"""
Sets a tombstone on one replica.
:param rse: name of the RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param issuer: The issuer account
:param vo: The VO to act on.
"""
rse_id = get_rse_id(rse, vo=vo)
if not permission.has_permission(issuer=issuer, vo=vo, action='set_tombstone', kwargs={}):
raise exception.AccessDenied('Account %s can not set tombstones' % (issuer))
scope = InternalScope(scope, vo=vo)
replica.set_tombstone(rse_id, scope, name)
|
the-stack_0_4440 | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Access Control roles propagation."""
from ggrc.models import all_models, get_model
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
class TestACLPropagation(TestCase):
"""TestACLPropagation base class."""
GLOBAL_ROLES = ["Creator", "Reader", "Editor", "Administrator"]
SUCCESS = 200
SUCCESS_CREATED = 201
FORBIDDEN = 403
ACCESS_ERROR = ("Response for current operation has wrong status. {} "
"expected, {} received.")
ACCESS_QUERY_API_ERROR = ("Current operation has wrong result. {} "
"expected, {} received. ({} count={})")
CAN_NOT_READ_ERROR = ("{} objects weren't read. Non-zero object count "
"expected.")
CAN_READ_ERROR = "Some {} objects were read. No objects expected."
READ_COLLECTION_OPERATIONS = ["read_revisions", "get_latest_version"]
QUERY_API_OPERATIONS = ["read_comments", "read_document_comments"]
def setup_people(self):
"""Setup people with global roles."""
# pylint: disable=attribute-defined-outside-init
roles_query = all_models.Role.query.filter(
all_models.Role.name.in_(self.GLOBAL_ROLES)
)
global_roles = {role.name: role for role in roles_query}
self.people = {}
with factories.single_commit():
for role_name in self.GLOBAL_ROLES:
user = factories.PersonFactory()
self.people[role_name] = user
rbac_factories.UserRoleFactory(
role=global_roles[role_name],
person=user
)
def assert_read_collection(self, response, expected_res, model):
"""Check collection read operation.
Args:
response(TestResponse): Received operation response.
expected_res: Boolean flag showing if objects should be read or not.
model: Model name.
"""
self.assertStatus(response, self.SUCCESS)
table_plural = get_model(model)._inflector.table_plural
response_data = response.json.get("{}_collection".format(table_plural), {})
assert_raises = False
if isinstance(expected_res, tuple):
expected_res, assert_raises = expected_res
if expected_res:
err = self.CAN_NOT_READ_ERROR.format(model)
else:
err = self.CAN_READ_ERROR.format(model)
if assert_raises == "unimplemented":
with self.assertRaises(AssertionError):
self.assertEqual(
bool(response_data.get(table_plural)),
expected_res,
err,
)
else:
self.assertEqual(
bool(response_data.get(table_plural)),
expected_res,
err,
)
def assert_status(self, response, expected_res):
"""Check correctness of response status.
Args:
response: Response instance.
expected_res: Boolean flag. If True 200/201 status expected, if False
403 status expected.
"""
assert_raises = False
if isinstance(expected_res, tuple):
expected_res, assert_raises = expected_res
success_statuses = [self.SUCCESS, self.SUCCESS_CREATED]
exp_statuses = success_statuses if expected_res else [self.FORBIDDEN]
if assert_raises:
with self.assertRaises(AssertionError):
self.assertIn(
response.status_code,
exp_statuses,
self.ACCESS_ERROR.format(exp_statuses[0], response.status_code)
)
else:
self.assertIn(
response.status_code,
exp_statuses,
self.ACCESS_ERROR.format(exp_statuses[0], response.status_code)
)
def assert_query_api_response(self, response, expected_res):
"""Check correctness of query API response.
Args:
response: query api result of action execution.
expected_res: Boolean flag.
"""
for resp_item in response.json:
for obj, resp in resp_item.iteritems():
res = bool(resp['count'])
self.assertEqual(res, expected_res,
self.ACCESS_QUERY_API_ERROR.format(expected_res,
res, obj,
resp['count']))
def assert_result(self, response, expected_res, operation, model):
"""Check correctness of response result.
Args:
response: Response instance.
expected_res: Boolean flag that show if response should be succeeded.
operation: Action name.
model: Model name.
"""
# Snapshot is a special case. All operation with it
# is done through Revisions.
model = "Revision" if model == "Snapshot" else model
# Some operations based on several requests and responses,
# need to verify all of these responses
responses = response if isinstance(response, list) else [response]
for res in responses:
if operation in self.READ_COLLECTION_OPERATIONS:
self.assert_read_collection(res, expected_res, model)
elif operation in self.QUERY_API_OPERATIONS:
self.assert_query_api_response(res, expected_res)
else:
self.assert_status(res, expected_res)
def runtest(self, role, model, action_name, expected_result, **kwargs):
"""Run integration RBAC test.
Args:
role: Global user role (Creator/Reader/Editor).
model: Model that should be tested (Audit/Assessment/...).
action_name: Action that should be tested (read/update/delete/...).
expected_result: Boolean expected result of action.
"""
model_name, parent = model, None
if " " in model:
model_name, parent = model.split(" ")
rbac_factory = self.init_factory(role, model_name, parent, **kwargs)
if not rbac_factory:
raise Exception("There is no factory for model '{}'".format(model_name))
action = getattr(rbac_factory, action_name, None)
if not action:
raise NotImplementedError(
"Action {} is not implemented for this test.".format(action_name)
)
response = action()
self.assert_result(response, expected_result, action_name, model_name)
|
the-stack_0_4441 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow as tf
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
'detection_model_fn_base': model_builder.build,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def provide_groundtruth(model, labels):
"""Provides the labels to a model as groundtruth.
This helper function extracts the corresponding boxes, classes,
keypoints, weights, masks, etc. from the labels, and provides it
as groundtruth to the models.
Args:
model: The detection model to provide groundtruth to.
labels: The labels for the training or evaluation inputs.
"""
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[
fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list)
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,
postprocess_on_cpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess
is scheduled on the host cpu.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
provide_groundtruth(detection_model, labels)
preprocessed_images = features[fields.InputDataFields.image]
if use_tpu and train_config.use_bfloat16:
with tf.contrib.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):
if use_tpu and postprocess_on_cpu:
detections = tf.contrib.tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict,
features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper((
prediction_dict,
features[fields.InputDataFields.true_image_shape]))
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if use_tpu and train_config.use_bfloat16:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf.estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[fields.InputDataFields
.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=None,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
postprocess_on_cpu=False,
export_to_tpu=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if sample_1_of_n_eval_examples >= 1:
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples
})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
detection_model_fn_base, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = [
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config) for eval_input_config in eval_input_configs
]
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,
postprocess_on_cpu)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
export_to_tpu=export_to_tpu,
eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
eval_interval_secs = eval_config.eval_interval_secs
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps,
eval_interval_secs = eval_interval_secs)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
eval_interval_secs=300,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name, eval_input_fn) in enumerate(
zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf.estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter,
throttle_secs=eval_interval_secs))
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, train_steps, name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
|
the-stack_0_4443 | import datetime
from casexml.apps.case.models import CommCareCaseAction
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import ReportCaseES
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.basic import BasicTabularReport, Column
from corehq.apps.reports.standard import (DatespanMixin,
ProjectReportParametersMixin, CustomProjectReport)
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.pillows.base import restore_property_dict
from hsph.reports import HSPHSiteDataMixin
from hsph.fields import NameOfCATIField, AllocatedToFilter
from corehq.apps.reports.filters.users import UserTypeFilter
from corehq.apps.reports.filters.dates import DatespanFilter
from couchdbkit_aggregate.fn import mean, unique_count
from casexml.apps.case import const
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import get_db
def short_date_format(date):
return date.strftime('%d-%b')
def username(key, report):
return report.usernames[key[0]]
def datestring_minus_days(datestring, days):
date = datetime.datetime.strptime(datestring[:10], '%Y-%m-%d')
return (date - datetime.timedelta(days=days)).isoformat()
def date_minus_11_days(couchkey):
return couchkey + [datestring_minus_days(couchkey[0], 11)]
def date_minus_14_days(couchkey):
return couchkey + [datestring_minus_days(couchkey[0], 14)]
class CATIPerformanceReport(CustomProjectReport, ProjectReportParametersMixin,
DatespanMixin, BasicTabularReport):
name = "CATI Performance Report"
slug = "cati_performance"
field_classes = (UserTypeFilter, DatespanFilter, NameOfCATIField)
filter_group_name = "CATI"
couch_view = "hsph/cati_performance_report_old"
default_column_order = (
'catiName',
'followedUp',
'noFollowUpAfter4Days',
'transferredToManager',
'transferredToField',
'notClosedOrTransferredAfter13Days',
'workingDaysUniqueCount',
'followUpTime',
'followUpTimeMean'
)
catiName = Column(
"Name of CATI", calculate_fn=username)
followedUp = Column(
"No. of Births Followed Up", key='followedUp')
noFollowUpAfter4Days = Column(
"No. of Cases with No Follow Up for 4 Days",
key='noFollowUpAfter4Days',
endkey_fn=date_minus_11_days)
transferredToManager = Column(
"Transferred to Call Center Manager", key='transferredToManager')
transferredToField = Column(
"Transferred to Field", key='transferredToField')
notClosedOrTransferredAfter13Days = Column(
"CATI Timed Out", key='notClosedOrTransferredAfter13Days',
endkey_fn=date_minus_14_days)
workingDaysUniqueCount = Column(
"No. of Working Days", key='workingDays', reduce_fn=unique_count)
followUpTime = Column(
"Total Follow Up Time", key='followUpTime')
followUpTimeMean = Column(
"Average Follow Up Time", key='followUpTime', reduce_fn=mean)
@property
def start_and_end_keys(self):
return ([self.datespan.startdate_param_utc],
[self.datespan.enddate_param_utc])
@property
def keys(self):
for user in self.users:
yield [user['user_id']]
class HSPHCaseDisplay(CaseDisplay):
@property
def region(self):
try:
return self.report.get_region_name(self.case['region_id'])
except AttributeError:
return ""
@property
def district(self):
try:
return self.report.get_district_name(
self.case['region_id'], self.case['district_id'])
except AttributeError:
return ""
@property
def site(self):
try:
return self.report.get_site_name(
self.case['region_id'], self.case['district_id'],
self.case['site_number'])
except AttributeError:
return ""
@property
def patient_id(self):
try:
return self.case.patient_id
except AttributeError:
return ""
@property
def status(self):
return "Closed" if self.case['closed'] else "Open"
@property
def mother_name(self):
return self.case.get('name_mother', '')
@property
def filter_date(self):
return self.case.get('filter_date', '')
@property
def address(self):
return self.case.get('house_address', '')
@property
@memoized
def allocated_to(self):
if self.status == "Closed":
close_action = [CommCareCaseAction.wrap(a) for a in self.case['actions'] if a['action_type'] ==
const.CASE_ACTION_CLOSE][0]
CATI_FOLLOW_UP_FORMS = (
"http://openrosa.org/formdesigner/A5B08D8F-139D-46C6-9FDF-B1AD176EAE1F",
)
if close_action.xform.xmlns in CATI_FOLLOW_UP_FORMS:
return 'CATI'
else:
return 'Field'
else:
follow_up_type = self.case.get('follow_up_type', '')
house_number = self.case.get('phone_house_number', '')
husband_number = self.case.get('phone_husband_number', '')
mother_number = self.case.get('phone_mother_number', '')
asha_number = self.case.get('phone_asha_number', '')
if follow_up_type != 'field_follow_up' and (house_number or
husband_number or mother_number or asha_number):
return 'CATI'
else:
return 'Field'
@property
def allocated_start(self):
try:
delta = datetime.timedelta(days=8 if self.allocated_to == 'CATI' else 13)
return short_date_format(self.parse_date(self.case['filter_date']) + delta)
except AttributeError:
return ""
@property
def allocated_end(self):
try:
delta = datetime.timedelta(days=13 if self.allocated_to == 'CATI' else 23)
return short_date_format(self.parse_date(self.case['filter_date']) + delta)
except AttributeError:
return ""
@property
def outside_allocated_period(self):
if not ('filter_date' in self.case and
isinstance(self.parse_date(self.case['filter_date']), datetime.datetime)):
return ""
if self.case['closed_on']:
compare_date = self.parse_date(self.case['closed_on']).date()
else:
compare_date = datetime.date.today()
return 'Yes' if (compare_date - self.parse_date(self.case['filter_date']).date()).days > 23 else 'No'
class CaseReport(CaseListReport, CustomProjectReport, HSPHSiteDataMixin,
DatespanMixin):
name = 'Case Report'
slug = 'case_report'
fields = (
'corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.SiteField',
#'hsph.fields.AllocatedToFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
)
default_case_type = 'birth'
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("Region"),
DataTablesColumn("District"),
DataTablesColumn("Site"),
DataTablesColumn("Patient ID"),
DataTablesColumn("Status"),
DataTablesColumn("Mother Name"),
DataTablesColumn("Date of Delivery or Admission"),
DataTablesColumn("Address of Patient"),
DataTablesColumn("Allocated To"),
DataTablesColumn("Allocated Start"),
DataTablesColumn("Allocated End"),
DataTablesColumn("Outside Allocated Period")
)
headers.no_sort = True
return headers
@property
def case_filter(self):
#allocated_to = self.request_params.get(AllocatedToFilter.slug, '')
region_id = self.request_params.get('hsph_region', '')
district_id = self.request_params.get('hsph_district', '')
site_num = str(self.request_params.get('hsph_site', ''))
filters = [{
'range': {
'opened_on': {
"from": self.datespan.startdate_param_utc,
"to": self.datespan.enddate_param_utc
}
}
}]
#if allocated_to:
#filters.append({'term': {'allocated_to': allocated_to}})
if site_num:
filters.append({'term': {'site_number.#value': site_num.lower()}})
if district_id:
filters.append({'term': {'district_id.#value': district_id.lower()}})
if region_id:
filters.append({'term': {'region_id.#value': region_id.lower()}})
return {'and': filters} if filters else {}
#def allocated_to(self):
#if self.status == "Closed":
#close_action = [a for a in self.case.actions if a.action_type ==
#const.CASE_ACTION_CLOSE][0]
#CATI_FOLLOW_UP_FORMS = (
#"http://openrosa.org/formdesigner/A5B08D8F-139D-46C6-9FDF-B1AD176EAE1F",
#)
#if close_action.xform.xmlns in CATI_FOLLOW_UP_FORMS:
#return 'CATI'
#else:
#return 'Field'
#else:
#follow_up_type = getattr(self.case, 'follow_up_type', '')
#house_number = getattr(self.case, 'phone_house_number', '')
#husband_number = getattr(self.case, 'phone_husband_number', '')
#mother_number = getattr(self.case, 'phone_mother_number', '')
#asha_number = getattr(self.case, 'phone_asha_number', '')
#if follow_up_type != 'field_follow_up' and (house_number or
#husband_number or mother_number or asha_number):
#return 'CATI'
#else:
#return 'Field'
@property
def shared_pagination_GET_params(self):
params = super(CaseReport, self).shared_pagination_GET_params
slugs = [
AllocatedToFilter.slug,
'hsph_region',
'hsph_district',
'hsph_site',
'startdate',
'enddate'
]
for slug in slugs:
params.append({
'name': slug,
'value': self.request_params.get(slug, '')
})
return params
@property
def rows(self):
case_displays = (HSPHCaseDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.region,
disp.district,
disp.site,
disp.patient_id,
disp.status,
disp.case_link,
disp.filter_date,
disp.address,
disp.allocated_to,
disp.allocated_start,
disp.allocated_end,
disp.outside_allocated_period
]
class CallCenterFollowUpSummaryReport(GenericTabularReport,
CustomProjectReport, ProjectReportParametersMixin, DatespanMixin,
HSPHSiteDataMixin):
name = "Call Center Follow Up Summary"
slug = "hsph_dcc_followup_summary"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.SiteField']
@property
def headers(self):
return DataTablesHeader(DataTablesColumn("Region"),
DataTablesColumn("District"),
DataTablesColumn("Site"),
DataTablesColumn("Total Number of Birth events with contact details"),
DataTablesColumn("Total number of births followed up"),
DataTablesColumn("Number of cases followed up at day 8th"),
DataTablesColumn("Number of cases followed up between day 9th to 13th"),
DataTablesColumn("Number of cases with contact details open at day 14th"),
DataTablesColumn("Number of cases with contact details transferred to Field management for home Visits"),
DataTablesColumn("Number of cases where no out comes could be recorded"))
@property
def rows(self):
db = get_db()
rows = []
if not self.selected_site_map:
self._selected_site_map = self.site_map
keys = self.generate_keys()
for key in keys:
data = db.view("hsph/dcc_followup_summary_old",
reduce=True,
startkey=key+[self.datespan.startdate_param_utc],
endkey=key+[self.datespan.enddate_param_utc]
).all()
for item in data:
item = item.get('value')
if item:
region, district, site = self.get_site_table_values(key)
now = self.datespan.enddate
day14 = now-datetime.timedelta(days=14)
day14 = day14.strftime("%Y-%m-%d")
day14_data = db.view("hsph/cases_by_birth_date_old",
reduce=True,
startkey=key,
endkey=key+[day14]
).first()
still_open_at_day14 = day14_data.get('value', 0) if day14_data else 0
rows.append([
region,
district,
site,
item.get('totalBirthsWithContact', 0),
item.get('totalBirths', 0),
item.get('numCasesFollowedUpByDay8', 0),
item.get('numCasesFollowedUpBetweenDays9and13', 0),
still_open_at_day14,
item.get('numCasesWithContactTransferredToField', 0),
item.get('numCasesWithNoOutcomes', 0)
])
return rows
|
the-stack_0_4444 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class IPConfiguration(SubResource):
"""IPConfiguration.
:param id: Resource ID.
:type id: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2016_09_01.models.IPAllocationMethod
:param subnet: The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2016_09_01.models.Subnet
:param public_ip_address: The reference of the public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2016_09_01.models.PublicIPAddress
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, private_ip_address=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state=None, name=None, etag=None):
super(IPConfiguration, self).__init__(id=id)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
the-stack_0_4445 | """
ViP training and evaluating script
This script is modified from pytorch-image-models by Ross Wightman (https://github.com/rwightman/pytorch-image-models/)
It was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import models
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import load_checkpoint, create_model, resume_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='ViP Training and Evaluating')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='vip_s7', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--eval_checkpoint', default='', type=str, metavar='PATH',
help='path to eval checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=224, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.005 for adamw)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "const")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.0)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.99996,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=8, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed and args.num_gpu > 1:
_logger.warning(
'Using more than one GPU per process in distributed mode is not allowed.Setting num_gpu to 1.')
args.num_gpu = 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.num_gpu = 1
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
if args.distributed:
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on %d GPUs.' % args.num_gpu)
torch.manual_seed(args.seed + args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint,
img_size=args.img_size)
if args.local_rank == 0:
_logger.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
use_amp = None
if args.amp:
# for backwards compat, `--amp` arg tries apex before native amp
if has_apex:
args.apex_amp = True
elif has_native_amp:
args.native_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
if args.num_gpu > 1:
if use_amp == 'apex':
_logger.warning(
'Apex AMP does not work well with nn.DataParallel, disabling. Use DDP or Torch AMP.')
use_amp = None
model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
assert not args.channels_last, "Channels last not supported with DP, use DDP."
else:
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
optimizer = create_optimizer(args, model)
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.resume)
if args.distributed:
if args.sync_bn:
assert not args.split_bn
try:
if has_apex and use_amp != 'native':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
except Exception as e:
_logger.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
if has_apex and use_amp != 'native':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
dataset_train = create_dataset(
args.dataset, root=args.data, split=args.train_split, is_training=True, batch_size=args.batch_size)
dataset_eval = create_dataset(
args.dataset, root=args.data, split=args.val_split, is_training=False, batch_size=args.batch_size)
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
if args.eval_checkpoint: # evaluate the model
load_checkpoint(model, args.eval_checkpoint, args.model_ema)
val_metrics = validate(model, loader_eval, validate_loss_fn, args)
print(f"Top-1 accuracy of the model is: {val_metrics['top1']:.1f}%")
return
saver = None
output_dir = ''
if args.local_rank == 0:
output_base = args.output if args.output else './output'
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
args.model,
str(data_config['input_size'][-1])
])
output_dir = get_outdir(output_base, 'train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try: # train the model
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.ema, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters(), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
the-stack_0_4446 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
from apricot import TestWithServers
from daos_perf_utils import DaosPerfCommand
from command_utils_base import CommandFailure
class DaosPerfBase(TestWithServers):
"""Base test cases for the daos_perf command.
Test Class Description:
Tests daos_perf with different configurations.
:avocado: recursive
"""
def run_daos_perf(self):
"""Run the daos_perf command."""
# Create pool
self.add_pool()
# Create container
self.add_container(self.pool)
# Obtain the number of processes listed with the daos_perf options
processes = self.params.get("processes", "/run/daos_perf/*")
# Use the dmg_control yaml
dmg_config_file = self.get_dmg_command().yaml.filename
# Create the daos_perf command from the test yaml file
daos_perf = DaosPerfCommand(self.bin)
daos_perf.get_params(self)
daos_perf.pool_uuid.update(self.pool.uuid)
daos_perf.cont_uuid.update(self.container.uuid)
daos_perf.dmg_config_file.update(dmg_config_file)
self.log.info("daos_perf command: %s", str(daos_perf))
daos_perf_env = daos_perf.get_environment(self.server_managers[0])
# Create the orterun command
self.job_manager.assign_hosts(self.hostlist_clients, self.workdir, None)
self.job_manager.assign_processes(processes)
self.job_manager.assign_environment(daos_perf_env)
self.job_manager.job = daos_perf
self.log.info("orterun command: %s", str(self.job_manager))
# Run the daos_perf command and check for errors
try:
return self.job_manager.run()
except CommandFailure as error:
self.log.error("DAOS PERF Failed: %s", str(error))
self.fail("Test was expected to pass but it failed.\n")
|
the-stack_0_4447 | import numpy as np
from random import shuffle
from scipy.sparse import csr_matrix
class SVM:
def __init__(self, learning_rate=1, regularization_loss_tradeoff=1):
self.learning_rate = learning_rate
self.regularization_loss_tradeoff = regularization_loss_tradeoff
def train(self, train, labels, epochs):
w = csr_matrix((1, train[0].shape[1]), dtype=np.float128)
for _ in range(epochs):
[w] = self.train_one_epoch(train, labels, w)
return w
def train_one_epoch(self, train, labels, w):
lr = self.learning_rate
tradeoff = self.regularization_loss_tradeoff
order = [i for i in range(train.shape[0])]
shuffle(order)
w_transpose = w.transpose()
for i in order:
x = train[i]
y = labels.toarray()[0][i]
if (x.dot(w_transpose) * y)[0, 0] <= 1:
w = w * (1 - lr) + x * (lr * tradeoff * y)
else:
w = w * (1 - lr)
w_transpose = w.transpose()
return [w]
class SVMPredictor:
def __init__(self, w):
self.w = w.transpose()
def predict(self, x):
if x.dot(self.w)[0, 0] < 0:
return -1
else:
return 1
|
the-stack_0_4449 | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2-NS"
@dataclass
class NistschemaSvIvListNegativeIntegerMaxLength2:
class Meta:
name = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2"
namespace = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2-NS"
value: List[int] = field(
default_factory=list,
metadata={
"max_length": 6,
"tokens": True,
}
)
|
the-stack_0_4453 | import pygame
import random
import os
pygame.init()
win = pygame.display.set_mode((700, 700))
pygame.display.set_caption("Falling Blocks")
script_dir = os.path.dirname('Obstacles')
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Obstacles"
abs_file_path = os.path.join(script_dir, rel_path)
# loading objects
current_file = r"/finalboulder.png"
boulderimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/betterball.png"
snowballimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/bestrock.png"
rockimg = pygame.image.load(abs_file_path + current_file)
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Powerups"
script_dir = os.path.dirname('Powerups')
abs_file_path = os.path.join(script_dir, rel_path)
# loading powerups
current_file = r"/resizedheart.png"
heartimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/bestgun.png"
gunimg = pygame.image.load(abs_file_path + current_file)
current_file = r'/better_small.png'
side_gun = pygame.image.load(abs_file_path + current_file)
#loading background
abs_file_path = os.path.join('Backgrounds',r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Backgrounds")
bg = pygame.image.load(abs_file_path + r"/background.jpg")
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Player"
script_dir = os.path.dirname('Player')
abs_file_path = os.path.join(script_dir, rel_path)
# character
char = pygame.image.load(abs_file_path + r'/standing.png')
walkRight = [pygame.image.load(abs_file_path + r'/R1.png'), pygame.image.load(abs_file_path + r'/R2.png'), pygame.image.load(abs_file_path + r'/R3.png'), pygame.image.load(abs_file_path + r'/R4.png'),\
pygame.image.load(abs_file_path + r'/R5.png'), pygame.image.load(abs_file_path + r'/R6.png'), pygame.image.load(abs_file_path + r'/R7.png'), pygame.image.load(abs_file_path + r'/R8.png'), pygame.image.load(abs_file_path + r'/R9.png')]
walkLeft = [pygame.image.load(abs_file_path + r'/L1.png'), pygame.image.load(abs_file_path + r'/L2.png'), pygame.image.load(abs_file_path + r'/L3.png'), pygame.image.load(abs_file_path + r'/L4.png'),\
pygame.image.load(abs_file_path + r'/L5.png'), pygame.image.load(abs_file_path + r'/L6.png'), pygame.image.load(abs_file_path + r'/L7.png'), pygame.image.load(abs_file_path + r'/L8.png'), pygame.image.load(abs_file_path + r'/L9.png')]
clock = pygame.time.Clock()
class Character(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.v = 5
self.left, self.right = False, False
self.standing = True
self.walkCount = 0
self.health = 10
self.hitbox = (self.x + 17, self.y + 11, 29, 52)
self.alive = True
self.shooting = False
def draw(self, win):
if self.alive:
# reset image cycle
if self.walkCount + 1 >= 9:
self.walkCount = 0
# moving
if not(self.standing):
# drawing left walking images
if self.left:
win.blit(walkLeft[self.walkCount], (self.x, self.y))
self.walkCount += 1
elif self.right:
win.blit(walkRight[self.walkCount], (self.x, self.y))
self.walkCount += 1
# not moving
else:
if self.right:
win.blit(walkRight[0], (self.x, self.y))
else:
win.blit(walkLeft[0], (self.x, self.y))
#hitbox
# hitbox[0], hitbox[1] are the coords of the top left of the hitbox
self.hitbox = (self.x + 17, self.y + 11, 29, 52)
#pygame.draw.rect(win, (255, 0, 0), self.hitbox, 2)
#health bar
pygame.draw.rect(win, (255,0,0), (self.hitbox[0], self.hitbox[1] - 20, 50, 10))
pygame.draw.rect(win, (0,128,0), (self.hitbox[0], self.hitbox[1] - 20, 50 - ( 5* (10-self.health)), 10))
# gun
if self.shooting:
win.blit(side_gun, (self.x + 20, self.y + 40))
# circle at start of rect
# pygame.draw.circle(win, (255,0,0), (self.hitbox[0], self.hitbox[1]), 20)
else:
#game over
font = pygame.font.SysFont('comicsans', 30, True)
over = font.render('GAME OVER', 1, (0,0,0))
win.blit(over, (290, 350))
# abstract block class
class Block(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.v = 10
self.falling = True
def draw(self, win):
# not off the screen
if self.y < 700:
win.blit(self.image, (self.x, self.y))
self.y += self.v
else:
self.falling = False
class Rock(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 10
self.image = rockimg
self.hitbox = (self.x, self.y + 10, 90, 60)
self.id = "rock"
def draw(self, win):
Block.draw(self,win)
# hitbox
self.hitbox = (self.x, self.y + 10, 90, 60)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Snowball(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 20
self.image = snowballimg
self.hitbox = (self.x, self.y - 10, 15, 15)
self.id = "snowball"
def draw(self,win):
Block.draw(self, win)
self.hitbox = (self.x, self.y - 10, 30, 30)
# pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Boulder(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 5
self.image = boulderimg
self.hitbox = (self.x, self.y - 20, 200 ,200)
self.id = "boulder"
def draw(self, win):
Block.draw(self, win)
self.hitbox = (self.x, self.y - 5, 135, 135)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Powerup(object):
def __init__(self, x):
self.x = x
self.y = 635
self.image = None
self.time = 270
self.appear = True
def draw(self, win):
if self.appear:
if self.time > 0:
win.blit(self.image, (self.x, self.y))
else:
self.appear = False
self.time -= 1
class Heart(Powerup):
def __init__(self, x):
Powerup.__init__(self, x)
self.increase = 1
self.image = heartimg
self.id = "heart"
self.hitbox = (self.x, self.y, 30, 30)
def draw(self, win):
Powerup.draw(self, win)
self.hitbox = (self.x, self.y, 30, 30)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Gun(Powerup):
def __init__(self, x):
Powerup.__init__(self, x)
self.image = gunimg
self.id = "gun"
self.hitbox = (self.x, self.y, 30, 30)
def draw(self, win):
Powerup.draw(self, win)
self.hitbox = (self.x, self.y, 30, 30)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Bullet(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.hitbox = (self.x, self.y, 30, 30)
self.appear = True
self.v = 8
def draw(self, win):
if self.appear:
pygame.draw.circle(win, (0,0,0), (self.x, self.y), 7)
self.hitbox = (self.x - 10, self.y - 10, 20, 20)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
def drawWindow():
win.blit(bg, (0,0))
man.draw(win)
# drawing falling blocks
if man.alive:
for o in objects:
o.draw(win)
for p in powerups:
p.draw(win)
for b in bullets:
b.draw(win)
# displaying score
font = pygame.font.SysFont('comicsans', 30, True)
text = font.render('Score: ' + str(score), 1, (0,0,0))
win.blit(text, (550, 10))
else:
# draw the score
font = pygame.font.SysFont('comicsans', 30, True)
text = font.render('Score: ' + str(score), 1, (0,0,0))
win.blit(text, (290, 400))
# update the display
pygame.display.update()
man = Character(300, 600, 64, 64)
gun = Gun(400)
print(type(man))
objects, powerups, bullets = [], [gun] , []
run, hit = True, False
max_length, rounds, score, cooldown, interval = 0, 0, 0, 0, 27
shoot_cooldown, shoot_time = 0, 0
while run and man.alive:
# set fps
clock.tick(27)
# find the center of the man
center_x = (man.hitbox[0] + (man.hitbox[0] + man.hitbox[2]))//2
center_y = (man.hitbox[1] + (man.hitbox[1] + man.hitbox[3]))//2
# screen being closed
for e in pygame.event.get():
if e.type == pygame.QUIT:
run = False
# keys list
keys = pygame.key.get_pressed()
# moving left
if keys[pygame.K_LEFT] and man.x > 0:
man.x -= man.v
man.left, man.right = True, False
man.standing = False
# moving right
elif keys[pygame.K_RIGHT] and man.x < 700 - man.width:
man.x += man.v
man.right, man.left = True, False
man.standing = False
# standing
else:
man.standing = True
man.walkCount = 0
#shooting controls
if man.shooting and keys[pygame.K_SPACE] and len(bullets) <= 5 and shoot_cooldown >= 10:
shoot_cooldown = 0
new_bullet = Bullet(man.x + 30, man.y)
bullets.append(new_bullet)
# change bullet position or delete them
for bullet in bullets:
if bullet.y > 0:
bullet.y -= bullet.v
else:
bullets.pop(bullets.index(bullet))
# check for bullet collisions
for bullet in bullets:
for o in objects:
if bullet.x >= o.hitbox[0] and bullet.x <= o.hitbox[0] + o.hitbox[2]:
# check the y
if bullet.y >= o.hitbox[1] and bullet.y <= o.hitbox[1] + o.hitbox[3]:
objects.pop(objects.index(o))
bullets.pop(bullets.index(bullet))
#check rocks
for o in objects:
if o.falling == False:
objects.pop(objects.index(o))
score += 1
# check powerups
for p in powerups:
if p.appear == False:
powerups.pop(powerups.index(p))
#check for a collision
for r in objects:
# check the x
if center_x >= r.hitbox[0] and center_x <= r.hitbox[0] + r.hitbox[2]:
# check the y
if center_y >= r.hitbox[1] and center_y <= r.hitbox[1] + r.hitbox[3]:
if r.id == "boulder":
if man.health - 2 <= 0:
man.alive = False
else:
print("HIT")
r.falling = False
man.health -= 2
# not a boulder
elif man.health - 1 == 0:
man.alive = False
else:
print('hit')
r.falling = False
man.health -= 1
# generate new objects
x = random.randint(1,10)
if x >= 5 and len(objects) < 5 + max_length and cooldown >= 20:
x = random.randint(1, 21)
xpos = random.randint(0, 700)
if x == 10 or x == 15:
new_snowball = Snowball(xpos, 0)
objects.append(new_snowball)
elif x == 20:
new_boulder = Boulder(xpos, 0)
objects.append(new_boulder)
else:
newrock = Rock(xpos, 0)
objects.append(newrock)
cooldown = 0
# generate new powerups
x = random.randint(1, 100)
if score > 50 and x == 25 and len(powerups) == 0:
choice = random.randint(1, 100)
xpos = random.randint(0,700)
if choice >= 50:
newheart = Heart(xpos)
powerups.append(newheart)
else:
newgun = Gun(xpos)
powerups.append(newgun)
# check for picking up powerup
for p in powerups:
if center_x >= p.hitbox[0] and center_x <= p.hitbox[0] + p.hitbox[2]:
# check the y
if center_y >= p.hitbox[1] and center_y <= p.hitbox[1] + p.hitbox[3]:
if p.id == "heart":
if man.health < 10:
man.health += 1
elif p.id == "gun":
man.shooting = True
# reset the shoot time
shoot_time = 135
# picked up an powerup
p.appear = False
# check for the gun being use up
if shoot_time == 0:
man.shooting = False
# draw the scene
drawWindow()
# increment the cooldown by a tenth of the score after 10 objects
# so that difficulty increases over time
if score < 10:
cooldown += 1
else:
cooldown += int(score * 0.1)
# add to the amount of allowed objects as time goes on
if rounds == 100 and max_length <= 10:
max_length += 1
rounds = 0
# increment varaibles
interval += 1
shoot_cooldown += 1
shoot_time -= 1
rounds += 1
rel_path = r"C:\Users\Owner\Desktop\Falling-Blocks\Textfiles"
script_dir = os.path.dirname("Textfiles")
abs_file_path = os.path.join(script_dir, rel_path)
# print higscores
highscores = open(abs_file_path + r'\highscores.txt', 'r')
top = int(highscores.read())
print("Current highscore is ", top)
highscores.close()
hs = open(abs_file_path + r'\highscores.txt', 'w')
if score > top:
print("Congratulations! You have the new highscore")
hs.write(str(score))
hs.close()
run = True
# game over screen
while run and not(man.alive):
# screen being closed
for e in pygame.event.get():
if e.type == pygame.QUIT:
run = False
drawWindow()
pygame.quit()
|
the-stack_0_4454 | """Handle presentation exchange information interface with non-secrets storage."""
from marshmallow import fields
from ....messaging.models.base_record import BaseRecord, BaseRecordSchema
class PresentationExchange(BaseRecord):
"""Represents a presentation exchange."""
class Meta:
"""PresentationExchange metadata."""
schema_class = "PresentationExchangeSchema"
RECORD_TYPE = "presentation_exchange"
RECORD_ID_NAME = "presentation_exchange_id"
WEBHOOK_TOPIC = "presentations"
LOG_STATE_FLAG = "debug.presentations"
TAG_NAMES = {"thread_id"}
INITIATOR_SELF = "self"
INITIATOR_EXTERNAL = "external"
STATE_REQUEST_SENT = "request_sent"
STATE_REQUEST_RECEIVED = "request_received"
STATE_PRESENTATION_SENT = "presentation_sent"
STATE_PRESENTATION_RECEIVED = "presentation_received"
STATE_VERIFIED = "verified"
def __init__(
self,
*,
presentation_exchange_id: str = None,
connection_id: str = None,
thread_id: str = None,
initiator: str = None,
state: str = None,
presentation_request: dict = None,
presentation: dict = None,
verified: str = None,
error_msg: str = None,
**kwargs
):
"""Initialize a new PresentationExchange."""
super().__init__(presentation_exchange_id, state, **kwargs)
self.connection_id = connection_id
self.thread_id = thread_id
self.initiator = initiator
self.state = state
self.presentation_request = presentation_request
self.presentation = presentation
self.verified = verified
self.error_msg = error_msg
@property
def presentation_exchange_id(self) -> str:
"""Accessor for the ID associated with this exchange."""
return self._id
@property
def record_value(self) -> dict:
"""Accessor for JSON record value generated for this presentation exchange."""
return {
prop: getattr(self, prop)
for prop in (
"connection_id",
"initiator",
"presentation_request",
"presentation",
"error_msg",
"verified",
"state",
)
}
class PresentationExchangeSchema(BaseRecordSchema):
"""Schema for serialization/deserialization of presentation exchange records."""
class Meta:
"""PresentationExchangeSchema metadata."""
model_class = PresentationExchange
presentation_exchange_id = fields.Str(required=False)
connection_id = fields.Str(required=False)
thread_id = fields.Str(required=False)
initiator = fields.Str(required=False)
state = fields.Str(required=False)
presentation_request = fields.Dict(required=False)
presentation = fields.Dict(required=False)
verified = fields.Str(required=False)
error_msg = fields.Str(required=False)
|
the-stack_0_4455 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid import core, unique_name
from functools import reduce
from paddle.distributed.fleet.meta_optimizers.common import is_loss_grad_op, is_backward_op, is_optimizer_op
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
import re
import os
def check_broadcast(block):
"""
if a var is broadcasted, it should have a sync_comm before
this var is used, if not, raise error.
if the broadcasted var has a fill_constant op, the fill_constant
op should stay forward before the broadcast op, and before a
sync_calc op. Otherwise, raise error.
should ignore and skip broadcast_op of inner_parallelism (e.g. Megatron)
"""
broadcast_vars = {}
for idx, op in enumerate(block.ops):
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if var_name in broadcast_vars:
raise ValueError("var_name areadly exist: {}"
"the old pos is {}, the new pos is {}".
format(var_name, broadcast_vars[
var_name]["broadcast_pos"], idx))
broadcast_vars[var_name] = {
"fill_constant_pos": -1,
"broadcast_pos": idx,
}
for idx, op in enumerate(block.ops):
if op.type == "fill_constant":
var_name = op.desc.output_arg_names()[0]
if var_name in broadcast_vars:
broadcast_vars[var_name]["fill_constant_pos"] = idx
continue
last_sync_comm_op_idx = -1
last_sync_calc_op_idx = -1
for idx, op in enumerate(block.ops):
if op.type == "c_sync_comm_stream":
last_sync_comm_op_idx = idx
continue
if op.type == "c_sync_calc_stream":
last_sync_calc_op_idx = idx
continue
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if broadcast_vars[var_name]["fill_constant_pos"] != -1:
assert (last_sync_calc_op_idx != -1)
assert (broadcast_vars[var_name]["fill_constant_pos"] <
last_sync_calc_op_idx)
assert (last_sync_calc_op_idx < idx)
continue
for input_name in op.desc.input_arg_names():
if input_name in broadcast_vars:
assert (broadcast_vars[input_name]["broadcast_pos"] != -1)
assert (broadcast_vars[input_name]["broadcast_pos"] <
last_sync_comm_op_idx)
assert (last_sync_comm_op_idx < idx)
return
def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
"""
the op order should be:
grad:
- 0: op that generate Var
- 1: sync_calc
- 2: reduce_sum_sharding (allreduce --> reduce)
- 3: sync_comm
- 4: allreuce_sum_dp (dp_grads)
- 5: sync_comm (dp_grads)
- 6: op that use Var (dp_grads & sum)
should ignore and skip allreduce_op of inner_parallelism (e.g. Megatron)
"""
vars_status = {}
dp_grads_status = {}
idx_last_grad_allreduce = -1
idx_amp_allreduce = -1
idx_gradient_clip_allreduce = -1
for idx, op in enumerate(block.ops):
# sharding use both allreduce and reduce to sync grad
if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
ring_id = op.desc.attr("ring_id")
var_name = op.desc.input_arg_names()[0]
param = var_name.split("@")[0]
assert 'sum' in var_name or ("@GRAD" in var_name)
if 'sum' in var_name or (not shard.has_param(param)):
vars_status[var_name] = -1
else:
dp_grads_status[var_name] = -1
if ring_id != sharding_ring_id:
assert shard.has_param(param)
assert ring_id == dp_ring_id
if "sum" in var_name:
idx_amp_allreduce = idx
elif "@GRAD":
idx_last_grad_allreduce = idx
if op.type == "c_allreduce_max":
idx_gradient_clip_allreduce = idx
for op in block.ops:
if op.type == "c_sync_calc_stream":
for var_name in vars_status:
if var_name in vars_status and vars_status[var_name] == 0:
vars_status[var_name] = 1
for var_name in dp_grads_status:
if var_name in dp_grads_status and dp_grads_status[
var_name] == 0:
dp_grads_status[var_name] = 1
# check sharding allreduce and reduce but skip megatron allreduce
elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
assert op.type == "c_reduce_sum", "Grad in Sharding group should be reduce rather than allreduce"
if var_name in vars_status:
_status = vars_status[var_name]
else:
_status = dp_grads_status[var_name]
if _status == -1:
raise ValueError("{} is not generated, but you are"
"trying to all-reduce it".format(
var_name))
if _status == 0:
raise ValueError("There should be a sync_calc op "
"after generate Var: {} and before the"
"c_allreduce_sum op".format(var_name))
assert (_status == 1)
if var_name in vars_status:
vars_status[var_name] = 2
else:
dp_grads_status[var_name] = 2
else:
assert ring_id == dp_ring_id
param = var_name.split("@")[0]
assert shard.has_param(param)
assert dp_grads_status[var_name] == 3
dp_grads_status[var_name] = 4
elif op.type == "c_sync_comm_stream":
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
for var_name in op.desc.input_arg_names():
if var_name in vars_status:
assert vars_status[var_name] == 2
vars_status[var_name] = 3
elif var_name in dp_grads_status:
assert dp_grads_status[var_name] == 2
dp_grads_status[var_name] = 3
else:
for var_name in op.desc.input_arg_names():
param = var_name.split("@")[0]
assert ring_id == dp_ring_id
assert shard.has_param(param)
assert dp_grads_status[var_name] == 4
dp_grads_status[var_name] = 5
else:
for input_name in op.desc.input_arg_names():
if input_name in vars_status:
if vars_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".format(
input_name))
raise ValueError(
"The reduce output grad [{}] should NOT be be used in Non-root rank.".
format(input_name))
if input_name in dp_grads_status:
if dp_ring_id == -1:
if dp_grads_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".
format(input_name))
else:
if dp_grads_status[input_name] != 5:
raise ValueError(
"The grad in shard should be allreduce and sync"
"twice before usage {}".format(input_name))
for output_name in op.desc.output_arg_names():
if output_name in vars_status and \
vars_status[output_name] == -1:
vars_status[output_name] = 0
if output_name in dp_grads_status and \
dp_grads_status[output_name] == -1:
dp_grads_status[output_name] = 0
# check sharding with amp
if idx_amp_allreduce != -1:
assert idx_amp_allreduce > idx_last_grad_allreduce
# check sharding with gradient_clip_by_global_norm
if idx_gradient_clip_allreduce != -1:
assert idx_gradient_clip_allreduce > idx_last_grad_allreduce
return
def get_valid_op_role(block, insert_idx):
"""
return OpRole.Forward or OpRole.Backward
"""
op_role = block.ops[insert_idx].attr('op_role')
if (insert_idx >= len(block.ops)) or (
op_role in [int(OpRole.Backward), int(OpRole.Optimize)]):
return OpRole.Backward
if op_role in [int(OpRole.Forward), int(OpRole.Loss)]:
return OpRole.Forward
return get_valid_op_role(block, insert_idx + 1)
def insert_sync_calc_op(block, insert_idx, calc_dep_vars):
"""
_insert_sync_calc_op
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_calc_stream',
inputs={'X': calc_dep_vars},
outputs={'Out': calc_dep_vars},
attrs={OP_ROLE_KEY: op_role})
return
def insert_sync_comm_op(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for single var
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': ring_id,
OP_ROLE_KEY: op_role})
return 1
def insert_sync_comm_ops(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for vars
"""
# NOTE (JZ-LIANG) to be check, may result undefined case
if len(comm_dep_vars) == 0:
return 0
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': int(ring_id),
OP_ROLE_KEY: op_role})
return 1
def insert_fill_constant_ops(block, insert_idx, fill_constant_vars):
"""
_add_fill_constant_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name in fill_constant_vars:
broadcast_var = block.var(broadcast_name)
block._insert_op_without_sync(
insert_idx,
type="fill_constant",
outputs={"Out": broadcast_var.name},
attrs={
"shape": broadcast_var.shape,
"dtype": broadcast_var.dtype,
"value": 0.0,
OP_ROLE_KEY: op_role
})
return
def insert_cast_ops(block, insert_idx, cast_ops):
"""
_add_cast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for fp16_name, fp32_name in cast_ops.items():
block._insert_op_without_sync(
insert_idx,
type="cast",
inputs={"X": fp32_name},
outputs={"Out": fp16_name},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16,
OP_ROLE_KEY: op_role
})
return
def insert_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
user_defined_strategy=None):
"""
_add_allreduce_ops
"""
if len(allreduce_vars) == 0:
return
if user_defined_strategy and \
user_defined_strategy.fuse_all_reduce_ops and \
not user_defined_strategy.fuse_grad_merge:
# If fuse_grad_merge is enable, the grad vars have already been fused during
# gradient merge pass, therefore, those vars are not need to be fused here
insert_fused_allreduce_ops(block, insert_idx, ring_id, allreduce_vars,
op_role, use_calc_stream,
user_defined_strategy.fuse_grad_size_in_MB)
else:
for var in allreduce_vars:
block._insert_op_without_sync(
insert_idx,
type='c_allreduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return
class FuseHelper(object):
@staticmethod
def sort_vars_by_dtype(block, vars_name):
fp32_vars = []
fp16_vars = []
other_vars = []
for var in vars_name:
dtype = block.var(var).dtype
if dtype == paddle.float32:
fp32_vars.append(var)
elif dtype == paddle.float16:
fp16_vars.append(var)
else:
other_vars.append(var)
assert len(other_vars) == 0, "only support fp32/fp16 vars for fuse"
fp32_vars.extend(fp16_vars)
return fp32_vars
@staticmethod
def get_fused_groups(block, vars_name, fuse_size=32.):
""" coalesce tensor, get fused group """
groups = []
cur_size = 0.
last_dtype = None
for var_name in vars_name:
real_var = block.var(var_name)
var_size = get_var_size(real_var)
if cur_size + var_size > fuse_size \
or len(groups) == 0 \
or real_var.dtype != last_dtype:
groups.append([real_var])
cur_size = var_size
last_dtype = real_var.dtype
else:
groups[-1].append(real_var)
cur_size += var_size
return groups
@staticmethod
def insert_coalesce_tensor(block,
index,
groups,
op_role=OpRole.Backward,
prefix="Output"):
fused_vars = []
insert_num = 0
for group in groups:
assert len(group) >= 1
if len(group) == 1:
# no need fuse
fused_vars.append(group[0])
continue
fused_var = block.create_var(
name=unique_name.generate('Fused{}_{}'.format(prefix, group[0]
.name)),
dtype=group[0].dtype,
persistable=False,
stop_gradient=True)
fused_vars.append(fused_var)
block._insert_op_without_sync(
index,
type="coalesce_tensor",
inputs={"Input": group},
outputs={"Output": group,
"FusedOutput": fused_var},
attrs={
"copy_data": True,
"use_align": True,
"dtype": group[0].dtype,
OP_ROLE_KEY: op_role
})
insert_num += 1
return fused_vars, insert_num
def insert_fused_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
fuse_grad_size_in_MB=32):
groups = FuseHelper.get_fused_groups(block, allreduce_vars,
fuse_grad_size_in_MB)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Grad")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_allreduce_sum',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
def insert_fused_reduce_ops(block,
insert_idx,
ring_id,
reduce_vars,
shard,
op_role=OpRole.Backward,
use_calc_stream=False,
rank=None,
fuse_grad_size=32):
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for var in reduce_vars:
root_id = get_grad_device(var, shard)
assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \
"but now nranks={}, the root_id of var={} is {}"\
.format(nranks, var, root_id)
device_to_vars[root_id].append(var)
for root_id, vars_name in enumerate(device_to_vars):
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_grad_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Grad")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_reduce_sum',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root_id': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
return [] if rank is None else device_to_vars[rank]
def insert_reduce_ops(block,
insert_idx,
ring_id,
reduce_vars,
shard,
op_role=OpRole.Backward,
use_calc_stream=False,
rank=None,
strategy=None):
"""
_add_reduce_ops
"""
if strategy and strategy.fuse_all_reduce_ops and \
not strategy.fuse_grad_merge:
return insert_fused_reduce_ops(block, insert_idx, ring_id, reduce_vars,
shard, op_role, use_calc_stream, rank,
strategy.fuse_grad_size_in_MB)
grad_in_this_device = []
for var in reduce_vars:
grad_var = var
if strategy and strategy.fuse_all_reduce_ops and \
strategy.fuse_grad_merge:
# TODO(wangxi): if support fp16_allreduce, need be
# 'FusedMergedGrad.cast_fp16._'
grad_var = var.replace('FusedMergedGrad_', '')
root_id = get_grad_device(grad_var, shard)
assert root_id >= 0, "root id should be a positive int, but now root id is {}".format(
root_id)
if rank is not None and rank == root_id:
grad_in_this_device.append(var)
block._insert_op_without_sync(
insert_idx,
type='c_reduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'root_id': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return grad_in_this_device
def insert_fused_broadcast_param_ops(block,
insert_idx,
ring_id,
params,
shard,
op_role=OpRole.Optimize,
use_calc_stream=False,
rank=None,
fuse_size=32):
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for var in params:
root_id = shard.device(var)
assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \
"but now nranks={}, the root_id of var={} is {}"\
.format(nranks, var, root_id)
device_to_vars[root_id].append(var)
for root_id, vars_name in enumerate(device_to_vars):
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Param")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_broadcast',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
return [] if rank is None else device_to_vars[rank]
def insert_broadcast_param_ops(block,
insert_idx,
ring_id,
params,
shard,
op_role=OpRole.Optimize,
use_calc_stream=False,
rank=None,
strategy=None):
"""
add broadcast param ops
"""
if strategy and strategy.fuse_all_reduce_ops:
# TODO(wangxi): put fused var in startup_program, only need exec once
return insert_fused_broadcast_param_ops(
block, insert_idx, ring_id, params, shard, op_role, use_calc_stream,
rank, strategy.fuse_grad_size_in_MB)
param_in_this_device = []
for param in params:
root_id = shard.device(param)
assert root_id >= 0, "root id should be a positive int, but now root id is {}".format(
root_id)
if rank is not None and rank == root_id:
param_in_this_device.append(param)
block._insert_op_without_sync(
insert_idx,
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return param_in_this_device
def fuse_opt_broadcast_param_ops(block,
ring_id,
shard,
op_role=OpRole.Optimize,
strategy=None):
"""
fuse optimizer sharding broadcast param ops
"""
if strategy is None or not strategy.fuse_all_reduce_ops:
return
fuse_size = strategy.fuse_grad_size_in_MB
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for idx, op in reversed(list(enumerate(block.ops))):
if not is_optimizer_op(op) or op.type != 'c_broadcast':
break
var = op.input_arg_names[0]
root_id = op.attr('root')
device_to_vars[root_id].insert(0, var)
block._remove_op(idx, sync=False)
insert_idx = idx + 1
for root_id, vars_name in enumerate(device_to_vars):
vars_name = FuseHelper.sort_vars_by_dtype(block, vars_name)
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Param")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_broadcast',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': True,
OP_ROLE_KEY: op_role
})
block._sync_with_cpp()
def get_grad_device(grad_name, shard):
assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format(
grad_name)
base_name = None
# NOTE: mind the traversal order
possible_suffixes = [
# sharding gm
'.cast_fp16@GRAD@MERGED',
'.cast_fp16@GRAD',
# pipeline
'@GRAD@MERGED@FP16',
'@GRAD@MERGED',
'@GRAD',
]
for suffix in possible_suffixes:
if suffix in grad_name:
base_name = re.sub(suffix, '', grad_name)
break
assert base_name in shard.global_param2device, "[{}] should be a param variable.".format(
base_name)
return shard.global_param2device[base_name]
def get_first_check_finite_and_unscale_op_idx(block, raise_error=True):
for idx, op in enumerate(block.ops):
if op.type == "check_finite_and_unscale":
return idx
if raise_error:
raise ValueError(
"amp is turned on but check_finite_and_unscale op does not exist in main block"
)
return -1
def get_first_optimize_op_idx(block):
first_opt_op_idx = None
for index, op in reversed(tuple(enumerate(block.ops))):
if is_backward_op(op) and first_opt_op_idx is None:
first_opt_op_idx = index + 1
break
return first_opt_op_idx
def insert_broadcast_ops(block, insert_idx, ring_id, broadcast2root):
"""
_add_broadcast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name, root_device in broadcast2root:
block._insert_op_without_sync(
insert_idx,
type='c_broadcast',
inputs={'X': broadcast_name},
outputs={'Out': broadcast_name},
attrs={
'ring_id': ring_id,
'root': root_device,
OP_ROLE_KEY: op_role
})
return
DtypeToSize = {
core.VarDesc.VarType.FP16: 2,
core.VarDesc.VarType.FP32: 4,
core.VarDesc.VarType.FP64: 8,
core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
}
def get_var_size(param):
"""
input:
- param: var
return:
var size in MB
"""
assert -1 not in param.shape
return reduce(lambda x, y: x * y,
param.shape) * DtypeToSize[param.dtype] / 1024.0 / 1024.0
def insert_scale_loss_grad_ops(block, scale=1.0):
'''
In order to keep the learning rate consistent in different numbers of
training workers, we scale the loss grad by the number of workers
'''
for idx, op in reversed(list(enumerate(block.ops))):
if is_loss_grad_op(op):
assert op.type == 'fill_constant', \
"loss_grad_op must be fill_constant op, " \
"but this op is {}".format(op.type)
assert op.has_attr('value')
loss_scale = float(op.attr('value'))
loss_scale = loss_scale / scale
op._set_attr('value', loss_scale)
break
def comm_analyse(main_program):
"""
Analyse the parameter size that need to be broadcast/allreduce during sharding training
"""
reduce_vars = {}
broadcast_vars = {}
block = main_program.global_block()
for op in block.ops:
if op.type == "c_broadcast":
var_name = op.desc.input_arg_names()[0]
# convert MB to KB
broadcast_vars[var_name] = get_var_size(block.var(
var_name)) * 1024.0
elif op.type == "c_allreduce_sum":
var_name = op.desc.input_arg_names()[0]
reduce_vars[var_name] = get_var_size(block.var(var_name)) * 1024.0
varsize_count = {}
gap = 1
for k, v in broadcast_vars.items():
print("broadcast: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
for k, v in reduce_vars.items():
print("allreduce: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
with open("nccl_size.txt", 'w') as f:
sorted_varsize = sorted(varsize_count.items(), key=lambda x: x[0])
for varsize, count in sorted_varsize:
print("NCCL size {}~{} KB: {}".format(varsize, varsize + 1, count))
f.write("NCCL size {}~{} KB: {}\n".format(varsize, varsize + 1,
count))
def add_sync_comm(program, sharding_ring_id):
"""
When clone a test prog by clone from the sharding main prog,
part of the sync_comm op maybe be pruned by mistake, this function
add the sync_comm op for the test prog.
"""
#NOTE (liangjianzhong): only support one comm stream by now, use more than one
# comm streams will cause error. should be revise in future.
assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero"
block = program.global_block()
not_sync_vars = set([])
for op in block.ops:
if op.type in ["c_broadcast", "c_allreduce"]:
for input_name in op.desc.input_arg_names():
not_sync_vars.add(input_name)
if op.type == "c_sync_comm_stream":
for input_name in op.desc.input_arg_names():
not_sync_vars.remove(input_name)
if not_sync_vars:
block.append_op(
type='c_sync_comm_stream',
inputs={'X': list(not_sync_vars)},
outputs={'Out': list(not_sync_vars)},
attrs={
'ring_id': sharding_ring_id,
'op_role': core.op_proto_and_checker_maker.OpRole.Forward
})
return
def save_persistables(exe, dirname, main_program, filename=None):
"""
When use sharding, part of persistable vars are unique and are partitioned in different ranks,
and part of persistable vars are duplicated and exist in all the ranks with different values.
This function handles the model saving for sharding training.
"""
# TODO (JZ-LIANG) revise this for uniform mixed parallelism
if main_program._pipeline_opt:
main_program = main_program._pipeline_opt['section_program']
def is_opt_vars(var):
# NOTE(JZ-LIANG): The checks should be updated when add new compatible optimizer
# now only Momentum and adam are compatible with sharding,
# support EMA optimizer with '_ema_0',
# support offload with '@offload_0' and '.cast_fp16'
checks = [
"_moment1_0", "_moment2_0", "_beta1_pow_acc_0", "_beta2_pow_acc_0",
"_velocity_0", "_ema_0", "@offload_0", ".cast_fp16"
]
for check in checks:
if var.name.endswith(check) and var.persistable:
return True
return False
def is_gradient_merge_vars(var):
# NOTE(JZ-LIANG): to revise save/load logic in framework instead of write this naive rule
return var.name.endswith("@GradiantMerge")
def is_trainable(var):
return isinstance(var,
paddle.fluid.framework.Parameter) and var.trainable
def sharding_predicate(var):
return is_trainable(var) or is_opt_vars(var) or is_gradient_merge_vars(
var)
if int(os.environ.get('PADDLE_TRAINER_ID', 0)) == 0:
paddle.fluid.io.save_persistables(
exe, dirname, main_program=main_program, filename=None)
else:
paddle.fluid.io.save_vars(
exe,
dirname,
main_program=main_program,
predicate=sharding_predicate,
filename=None)
return
def append_naive_sync(block, sync_var, ring_id):
# NOTE (JZ-LIANG) update this to use barrier sync for more elegent logic
# sync within global
block.append_op(
type="fill_constant",
outputs={"Out": sync_var},
attrs={
"shape": sync_var.shape,
"dtype": sync_var.dtype,
"value": int(1),
})
block.append_op(
type='c_allreduce_sum',
inputs={'X': sync_var},
outputs={'Out': sync_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
block.append_op(
type='c_sync_calc_stream',
inputs={'X': [sync_var]},
outputs={'Out': [sync_var]},
attrs={OP_ROLE_KEY: OpRole.Forward})
|
the-stack_0_4457 | """Module to define main fnet model wrapper class."""
from pathlib import Path
from typing import Callable, Iterator, List, Optional, Sequence, Tuple, Union
import logging
import math
import os
from scipy.ndimage import zoom
import numpy as np
import tifffile
import torch
from fnet.metrics import corr_coef
from fnet.predict_piecewise import predict_piecewise as _predict_piecewise_fn
from fnet.transforms import flip_y, flip_x, norm_around_center
from fnet.utils.general_utils import get_args, retry_if_oserror, str_to_object
from fnet.utils.model_utils import move_optim
logger = logging.getLogger(__name__)
def _weights_init(m):
classname = m.__class__.__name__
if classname.startswith("Conv"):
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_per_param_options(module, wd):
"""Returns list of per parameter group options.
Applies the specified weight decay (wd) to parameters except parameters
within batch norm layers and bias parameters.
"""
if wd == 0:
return module.parameters()
with_decay = list()
without_decay = list()
for idx_m, (name_m, module_sub) in enumerate(module.named_modules()):
if list(module_sub.named_children()):
continue # Skip "container" modules
if isinstance(module_sub, torch.nn.modules.batchnorm._BatchNorm):
for param in module_sub.parameters():
without_decay.append(param)
continue
for name_param, param in module_sub.named_parameters():
if "weight" in name_param:
with_decay.append(param)
elif "bias" in name_param:
without_decay.append(param)
# Check that no parameters were missed or duplicated
n_param_module = len(list(module.parameters()))
n_param_lists = len(with_decay) + len(without_decay)
n_elem_module = sum([p.numel() for p in module.parameters()])
n_elem_lists = sum([p.numel() for p in with_decay + without_decay])
assert n_param_module == n_param_lists
assert n_elem_module == n_elem_lists
per_param_options = [
{"params": with_decay, "weight_decay": wd},
{"params": without_decay, "weight_decay": 0.0},
]
return per_param_options
class Model:
"""Class that encompasses a pytorch network and its optimizer.
"""
def __init__(
self,
betas=(0.5, 0.999),
criterion_class="fnet.losses.WeightedMSE",
init_weights=True,
lr=0.001,
nn_class="fnet.nn_modules.fnet_nn_3d.Net",
nn_kwargs={},
scheduler=None,
weight_decay=0,
gpu_ids=-1,
):
self.betas = betas
self.criterion = str_to_object(criterion_class)()
self.gpu_ids = [gpu_ids] if isinstance(gpu_ids, int) else gpu_ids
self.init_weights = init_weights
self.lr = lr
self.nn_class = nn_class
self.nn_kwargs = nn_kwargs
self.scheduler = scheduler
self.weight_decay = weight_decay
self.count_iter = 0
self.device = (
torch.device("cuda", self.gpu_ids[0])
if self.gpu_ids[0] >= 0
else torch.device("cpu")
)
self.optimizer = None
self._init_model()
self.fnet_model_kwargs, self.fnet_model_posargs = get_args()
self.fnet_model_kwargs.pop("self")
def _init_model(self):
self.net = str_to_object(self.nn_class)(**self.nn_kwargs)
if self.init_weights:
self.net.apply(_weights_init)
self.net.to(self.device)
self.optimizer = torch.optim.Adam(
get_per_param_options(self.net, wd=self.weight_decay),
lr=self.lr,
betas=self.betas,
)
if self.scheduler is not None:
if self.scheduler[0] == "snapshot":
period = self.scheduler[1]
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lambda x: (
0.01
+ (1 - 0.01)
* (0.5 + 0.5 * math.cos(math.pi * (x % period) / period))
),
)
elif self.scheduler[0] == "step":
step_size = self.scheduler[1]
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size
)
else:
raise NotImplementedError
def __str__(self):
out_str = [
f"*** {self.__class__.__name__} ***",
f"{self.nn_class}(**{self.nn_kwargs})",
f"iter: {self.count_iter}",
f"gpu: {self.gpu_ids}",
]
return os.linesep.join(out_str)
def get_state(self):
return {
"fnet_model_class": (self.__module__ + "." + self.__class__.__qualname__),
"fnet_model_kwargs": self.fnet_model_kwargs,
"fnet_model_posargs": self.fnet_model_posargs,
"nn_state": self.net.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"count_iter": self.count_iter,
}
def to_gpu(self, gpu_ids: Union[int, List[int]]) -> None:
"""Move network to specified GPU(s).
Parameters
----------
gpu_ids
GPU(s) on which to perform training or prediction.
"""
if isinstance(gpu_ids, int):
gpu_ids = [gpu_ids]
self.gpu_ids = gpu_ids
self.device = (
torch.device("cuda", self.gpu_ids[0])
if self.gpu_ids[0] >= 0
else torch.device("cpu")
)
self.net.to(self.device)
if self.optimizer is not None:
move_optim(self.optimizer, self.device)
def save(self, path_save: str):
"""Saves model to disk.
Parameters
----------
path_save
Filename to which model is saved.
"""
dirname = os.path.dirname(path_save)
if not os.path.exists(dirname):
os.makedirs(dirname)
logger.info(f"Created: {dirname}")
curr_gpu_ids = self.gpu_ids
self.to_gpu(-1)
retry_if_oserror(torch.save)(self.get_state(), path_save)
self.to_gpu(curr_gpu_ids)
def load_state(self, state: dict, no_optim: bool = False):
self.count_iter = state["count_iter"]
self.net.load_state_dict(state["nn_state"])
if no_optim:
self.optimizer = None
return
self.optimizer.load_state_dict(state["optimizer_state"])
def train_on_batch(
self,
x_batch: torch.Tensor,
y_batch: torch.Tensor,
weight_map_batch: Optional[torch.Tensor] = None,
) -> float:
"""Update model using a batch of inputs and targets.
Parameters
----------
x_batch
Batched input.
y_batch
Batched target.
weight_map_batch
Optional batched weight map.
Returns
-------
float
Loss as determined by self.criterion.
"""
if self.scheduler is not None:
self.scheduler.step()
self.net.train()
x_batch = x_batch.to(dtype=torch.float32, device=self.device)
y_batch = y_batch.to(dtype=torch.float32, device=self.device)
if len(self.gpu_ids) > 1:
module = torch.nn.DataParallel(self.net, device_ids=self.gpu_ids)
else:
module = self.net
self.optimizer.zero_grad()
y_hat_batch = module(x_batch)
args = [y_hat_batch, y_batch]
if weight_map_batch is not None:
args.append(weight_map_batch)
loss = self.criterion(*args)
loss.backward()
self.optimizer.step()
self.count_iter += 1
return loss.item()
def _predict_on_batch_tta(self, x_batch: torch.Tensor) -> torch.Tensor:
"""Performs model prediction using test-time augmentation."""
augs = [None, [flip_y], [flip_x], [flip_y, flip_x]]
x_batch = x_batch.numpy()
y_hat_batch_mean = None
for aug in augs:
x_batch_aug = x_batch.copy()
if aug is not None:
for trans in aug:
x_batch_aug = trans(x_batch_aug)
y_hat_batch = self.predict_on_batch(x_batch_aug.copy()).numpy()
if aug is not None:
for trans in aug:
y_hat_batch = trans(y_hat_batch)
if y_hat_batch_mean is None:
y_hat_batch_mean = np.zeros(y_hat_batch.shape, dtype=np.float32)
y_hat_batch_mean += y_hat_batch
y_hat_batch_mean /= len(augs)
return torch.tensor(
y_hat_batch_mean, dtype=torch.float32, device=torch.device("cpu")
)
def predict_on_batch(self, x_batch: torch.Tensor) -> torch.Tensor:
"""Performs model prediction on a batch of data.
Parameters
----------
x_batch
Batch of input data.
Returns
-------
torch.Tensor
Batch of model predictions.
"""
x_batch = torch.tensor(x_batch, dtype=torch.float32, device=self.device)
if len(self.gpu_ids) > 1:
network = torch.nn.DataParallel(self.net, device_ids=self.gpu_ids)
else:
network = self.net
network.eval()
with torch.no_grad():
y_hat_batch = network(x_batch).cpu()
network.train()
return y_hat_batch
def predict(
self, x: Union[torch.Tensor, np.ndarray], tta: bool = False
) -> torch.Tensor:
"""Performs model prediction on a single example.
Parameters
----------
x
Input data.
piecewise
Set to perform piecewise predictions. i.e., predict on patches of
the input and stitch together the predictions.
tta
Set to use test-time augmentation.
Returns
-------
torch.Tensor
Model prediction.
"""
x_batch = torch.unsqueeze(torch.tensor(x), 0)
if tta:
return self._predict_on_batch_tta(x_batch).squeeze(0)
return self.predict_on_batch(x_batch).squeeze(0)
def predict_piecewise(
self, x: Union[torch.Tensor, np.ndarray], **predict_kwargs
) -> torch.Tensor:
"""Performs model prediction piecewise on a single example.
Predicts on patches of the input and stitchs together the predictions.
Parameters
----------
x
Input data.
**predict_kwargs
Kwargs to pass to predict method.
Returns
-------
torch.Tensor
Model prediction.
"""
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if len(x.size()) == 4:
dims_max = [None, 32, 512, 512]
elif len(x.size()) == 3:
dims_max = [None, 1024, 1024]
y_hat = _predict_piecewise_fn(
self, x, dims_max=dims_max, overlaps=16, **predict_kwargs
)
return y_hat
def test_on_batch(
self,
x_batch: torch.Tensor,
y_batch: torch.Tensor,
weight_map_batch: Optional[torch.Tensor] = None,
) -> float:
"""Test model on a batch of inputs and targets.
Parameters
----------
x_batch
Batched input.
y_batch
Batched target.
weight_map_batch
Optional batched weight map.
Returns
-------
float
Loss as evaluated by self.criterion.
"""
y_hat_batch = self.predict_on_batch(x_batch)
args = [y_hat_batch, y_batch]
if weight_map_batch is not None:
args.append(weight_map_batch)
loss = self.criterion(*args)
return loss.item()
def test_on_iterator(self, iterator: Iterator, **kwargs: dict) -> float:
"""Test model on iterator which has items to be passed to
test_on_batch.
Parameters
----------
iterator
Iterator that generates items to be passed to test_on_batch.
kwargs
Additional keyword arguments to be passed to test_on_batch.
Returns
-------
float
Mean loss for items in iterable.
"""
loss_sum = 0
for item in iterator:
loss_sum += self.test_on_batch(*item, **kwargs)
return loss_sum / len(iterator)
def evaluate(
self,
x: torch.Tensor,
y: torch.Tensor,
metric: Optional = None,
piecewise: bool = False,
**kwargs,
) -> Tuple[float, torch.Tensor]:
"""Evaluates model output using a metric function.
Parameters
----------
x
Input data.
y
Target data.
metric
Metric function. If None, uses fnet.metrics.corr_coef.
piecewise
Set to perform predictions piecewise.
**kwargs
Additional kwargs to be passed to predict() method.
Returns
-------
float
Evaluation as determined by metric function.
torch.Tensor
Model prediction.
"""
if metric is None:
metric = corr_coef
if piecewise:
y_hat = self.predict_piecewise(x, **kwargs)
else:
y_hat = self.predict(x, **kwargs)
if y is None:
return None, y_hat
evaluation = metric(y, y_hat)
return evaluation, y_hat
def apply_on_single_zstack(
self,
input_img: Optional[np.ndarray] = None,
filename: Optional[Union[Path, str]] = None,
inputCh: Optional[int] = None,
normalization: Optional[Callable] = None,
already_normalized: bool = False,
ResizeRatio: Optional[Sequence[float]] = None,
cutoff: Optional[float] = None,
) -> np.ndarray:
"""Applies model to a single z-stack input.
This assumes the loaded network architecture can receive 3d grayscale
images as input.
Parameters
----------
input_img
3d or 4d image with shape (Z, Y, X) or (C, Z, Y, X) respectively.
filename
Path to input image. Ignored if input_img is supplied.
inputCh
Selected channel if filename is a path to a 4d image.
normalization
Input image normalization function.
already_normalized
Set to skip input normalization.
ResizeRatio
Resizes each dimension of the the input image by the specified
factor if specified.
cutoff
If specified, converts the output to a binary image with cutoff as
threshold value.
Returns
-------
np.ndarray
Predicted image with shape (Z, Y, X). If cutoff is set, dtype will
be numpy.uint8. Otherwise, dtype will be numpy.float.
Raises
------
ValueError
If parameters are invalid.
FileNotFoundError
If specified file does not exist.
IndexError
If inputCh is invalid.
"""
if input_img is None:
if filename is None:
raise ValueError("input_img or filename must be specified")
input_img = tifffile.imread(str(filename))
if inputCh is not None:
if input_img.ndim != 4:
raise ValueError("input_img must be 4d if inputCh specified")
input_img = input_img[inputCh,]
if input_img.ndim != 3:
raise ValueError("input_img must be 3d")
normalization = normalization or norm_around_center
if not already_normalized:
input_img = normalization(input_img)
if ResizeRatio is not None:
if len(ResizeRatio) != 3:
raise ValueError("ResizeRatio must be length 3")
input_img = zoom(input_img, zoom=ResizeRatio, mode="nearest")
yhat = (
self.predict_piecewise(input_img[np.newaxis,], tta=True)
.squeeze(dim=0)
.numpy()
)
if cutoff is not None:
yhat = (yhat >= cutoff).astype(np.uint8) * 255
return yhat
|
the-stack_0_4462 | # Copyright (c) 2012-2017 Snowflake Computing Inc. All rights reserved.
"""
test_tokens.py - This defines a series of tests to ascertain that we are
capable of renewing JWT tokens
"""
from snowflake.ingest.utils import SecurityManager
from snowflake.ingest.error import IngestClientError
from snowflake.ingest.errorcode import ERR_INVALID_PRIVATE_KEY
from datetime import timedelta
from time import sleep
import os
import pytest
def test_same_token(test_util):
"""
Tests that we aren't immediately regenerating the key after each request
"""
private_key, _ = test_util.generate_key_pair()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(seconds=3))
assert sec_man.get_token() == sec_man.get_token()
def test_regenerate_token(test_util):
"""
Tests that the security manager generates new tokens after we
cross the set renewal threshold
"""
private_key, _ = test_util.generate_key_pair()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(seconds=3))
old_token = sec_man.get_token()
sleep(5)
assert old_token != sec_man.get_token()
def test_calculate_public_key_fingerprint(test_util):
with open(os.path.join(test_util.get_data_dir(), 'test_rsa_key'), 'r') as key_file:
private_key = key_file.read()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(minutes=3))
public_key_fingerprint = sec_man.calculate_public_key_fingerprint(private_key)
assert public_key_fingerprint == 'SHA256:QKX8hnXHVAVXp7mLdCAF+vjU2A8RBuRSpgdRjPHhVWY='
def test_invalid_private_key():
sec_man = SecurityManager("testaccount", "snowman", 'invalid_private_key',
renewal_delay=timedelta(minutes=3))
with pytest.raises(IngestClientError) as client_error:
sec_man.get_token()
assert client_error.value.code == ERR_INVALID_PRIVATE_KEY
|
the-stack_0_4463 | """
Ethereum Virtual Machine (EVM) Keccak Instructions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Implementations of the EVM keccak instructions.
"""
from ethereum.base_types import U256, Uint
from ethereum.crypto.hash import keccak256
from ethereum.utils.numeric import ceil32
from ethereum.utils.safe_arithmetic import u256_safe_add, u256_safe_multiply
from ...vm.error import OutOfGasError
from .. import Evm
from ..gas import (
GAS_KECCAK256,
GAS_KECCAK256_WORD,
calculate_gas_extend_memory,
subtract_gas,
)
from ..memory import extend_memory, memory_read_bytes
from ..stack import pop, push
def keccak(evm: Evm) -> None:
"""
Pushes to the stack the Keccak-256 hash of a region of memory.
This also expands the memory, in case the memory is insufficient to
access the data's memory location.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.dao_fork.vm.error.StackUnderflowError`
If `len(stack)` is less than `2`.
"""
# Converting memory_start_index to Uint as memory_end_index can
# overflow U256.
memory_start_index = Uint(pop(evm.stack))
size = pop(evm.stack)
words = ceil32(Uint(size)) // 32
word_gas_cost = u256_safe_multiply(
GAS_KECCAK256_WORD,
words,
exception_type=OutOfGasError,
)
memory_extend_gas_cost = calculate_gas_extend_memory(
evm.memory, memory_start_index, size
)
total_gas_cost = u256_safe_add(
GAS_KECCAK256,
word_gas_cost,
memory_extend_gas_cost,
exception_type=OutOfGasError,
)
evm.gas_left = subtract_gas(evm.gas_left, total_gas_cost)
extend_memory(evm.memory, memory_start_index, size)
data = memory_read_bytes(evm.memory, memory_start_index, size)
hash = keccak256(data)
push(evm.stack, U256.from_be_bytes(hash))
evm.pc += 1
|
the-stack_0_4464 | from typing import Any, Dict, Optional, Union
from uuid import uuid4
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.crud.crud_user import user
from app.models.login_link import LoginLink
from app.schemas.login_link import LoginLinkCreate, LoginLinkUpdate
class CRUDLoginLink(CRUDBase[LoginLink, LoginLinkCreate, LoginLinkUpdate]):
def get_by_code(self, db: Session, *, code: str) -> Optional[LoginLink]:
return db.query(LoginLink).filter(LoginLink.code == code).first()
def create(self, db: Session, *, obj_in: LoginLinkCreate) -> LoginLink:
code = str(uuid4())[:8]
link = self.get_by_code(db, code=code)
while link:
code = str(uuid4())[:8]
link = self.get_by_code(db, code=code)
found_user = user.get(db, id=obj_in.user.id)
assert found_user is not None
db_obj = LoginLink(code=code.upper(), user_id=found_user.id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(
self,
db: Session,
*,
db_obj: LoginLink,
obj_in: Union[LoginLinkUpdate, Dict[str, Any]]
) -> LoginLink:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
return super().update(db, db_obj=db_obj, obj_in=update_data)
def is_active(self, db_obj: LoginLink) -> bool:
return db_obj.active
def disable(self, db: Session, *, db_obj: LoginLink) -> bool:
setattr(db_obj, "active", False)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj.active
login_link = CRUDLoginLink(LoginLink)
|
the-stack_0_4466 | import random
import torch
import sys
import torch.nn as nn
import torchaudio
import bz2
import pickle
import torchvision.transforms as transforms
import cv2
import math
import os
import numpy as np
from torch.utils.data import Dataset, DataLoader
from logging import Logger
from torchvision.transforms.transforms import Lambda
try:
from datasets import MelSpectrogram, align_and_crop_face
except:
sys.path.extend(['..'])
from spectograms import MelSpectrogram
from face_utils import align_and_crop_face
def av_speech_collate_fn_pad(batch):
lower_faces, speeches, melspecs, face_crop = zip(*batch)
max_frames_in_batch = max([l.shape[0] for l in lower_faces])
max_samples_in_batch = max([s.shape[1] for s in speeches])
max_melspec_samples_in_batch = max([m.shape[1] for m in melspecs])
padded_lower_faces = torch.zeros(len(lower_faces), max_frames_in_batch, *tuple(lower_faces[0].shape[1:]))
padded_speeches = torch.zeros(len(speeches), 1, max_samples_in_batch)
padded_melspecs = torch.zeros(len(melspecs), melspecs[0].shape[0], max_melspec_samples_in_batch)
mel_gate_padded = torch.zeros(len(melspecs), max_melspec_samples_in_batch)
video_lengths = list()
audio_lengths = list()
melspec_lengths = list()
for idx, (lower_face, speech, melspec) in enumerate(zip(lower_faces, speeches, melspecs)):
T = lower_face.shape[0]
video_lengths.append(T)
padded_lower_faces[idx, :T, :, :, :] = lower_face
S = speech.shape[-1]
audio_lengths.append(S)
padded_speeches[idx, :, :S] = speech
M = melspec.shape[-1]
melspec_lengths.append(M)
padded_melspecs[idx, :, :M] = melspec
mel_gate_padded[idx, M-1:] = 1.0
face_crop_tensor = torch.cat([f.unsqueeze(0) for f in face_crop], dim=0)
padded_lower_faces = padded_lower_faces.permute(0, 2, 1, 3, 4)
padded_speeches = padded_speeches.squeeze(1)
video_lengths = torch.tensor(video_lengths)
audio_lengths = torch.tensor(audio_lengths)
melspec_lengths = torch.tensor(melspec_lengths)
return (padded_lower_faces, video_lengths), (padded_speeches, audio_lengths), (padded_melspecs, melspec_lengths, mel_gate_padded), face_crop_tensor
def x_round(x):
return math.floor(x * 4) / 4
def loadframes(filename):
with bz2.BZ2File(filename, 'r') as f:
data = pickle.load(f)
return [cv2.cvtColor(cv2.imdecode(imn, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB) for imn in data]
class WILD(Dataset):
def __init__(self, rootpth, face_size=(96, 96), mode='train', demo=False, duration=1, face_augmentation=None, *args, **kwargs):
super(WILD, self).__init__(*args, **kwargs)
assert mode in ('train', 'test')
self.rootpth = rootpth
self.face_recog_resize = transforms.Compose([
transforms.Resize((160, 160)),
transforms.Lambda(lambda im: (im.float() - 127.5) / 128.0),
])
self.face_size = face_size
self.face_resize = transforms.Compose([
transforms.Resize(face_size),
transforms.Lambda(lambda im: im.float() / 255.0),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if face_augmentation is None:
self.face_augmentation = nn.Identity()
else:
self.face_augmentation = face_augmentation
self.mode = mode
self.demo = demo
self.items = dict()
index = 0
for root, _, filenames in os.walk(self.rootpth):
for filename in filenames:
if filename.endswith(('.mp4', '.mov', '.mpg')):
if filename.endswith('.mov'):
format = '.mov'
elif filename.endswith('.mpg'):
format = '.mpg'
elif filename.endswith('.mp4'):
format = '.mp4'
video_path = os.path.join(root, filename)
audio_path = os.path.join(root, filename.replace(format, '.wav'))
frame_info_path = os.path.join(root, filename.replace(format, '.json'))
spec_path = os.path.join(root, filename.replace(format, '.npz'))
face_path = video_path[:-4] + '_face.npz'
if os.path.isfile(audio_path) and os.path.isfile(frame_info_path) and os.path.isfile(spec_path):
self.items[index] = [video_path, audio_path, spec_path, face_path, frame_info_path]
index += 1
self.len = len(self.items)
self.duration = duration
print(f'Size of {type(self).__name__}: {self.len}')
random.shuffle(self.items)
def __len__(self):
return self.len
def __getitem__(self, idx):
video_path, audio_path, spec_path, face_path, frame_info_path = self.items[idx]
speech, sampling_rate = torchaudio.load(audio_path, normalize=True, format='wav')
melspec = torch.from_numpy(np.load(spec_path)['data'])
melspec = melspec.squeeze(0)
faces = [torch.from_numpy(face).permute(2, 0, 1) for face in loadframes(face_path)]
faces = self.face_augmentation(faces)
face_indices = (torch.rand(2) * len(faces)).int()
face_crop = torch.cat([self.face_recog_resize(faces[f_id]).unsqueeze(0) for f_id in face_indices], dim=0)
lower_faces = list()
for face in faces:
C, H, W = face.shape
lower_face = face[:, H//2:, :]
lower_faces.append(self.face_resize(lower_face).unsqueeze(0))
lower_faces = torch.cat(lower_faces, dim=0)
if self.demo:
return lower_faces, speech, melspec, face_crop, audio_path
return lower_faces, speech, melspec, face_crop
def main():
ds = WILD('/media/ssd/christen-rnd/Experiments/Lip2Speech/Datasets/WILD', mode='test', duration=1)
dl = DataLoader(ds,
batch_size=8,
shuffle=False,
num_workers=0,
pin_memory=False,
drop_last=True,
collate_fn=av_speech_collate_fn_pad)
from IPython.display import Audio, display
for bdx, batch in enumerate(dl):
(video, video_lengths), (speeches, audio_lengths), (melspecs, melspec_lengths, mel_gates), faces = batch
frames = video
print('video.shape', video.shape)
print('faces.shape ', faces.shape)
print('frames[0][0].shape ', frames[0][0].shape)
print('melspecs.shape ', melspecs.shape)
# print('speech.shape ', speech.shape)
# continue
B, C, T, H, W = video.shape
for k in range(B):
face = faces[k, 0, :, :, :].permute(1, 2, 0).numpy()
face = ((face * 128.0) + 127.5).astype(dtype=np.uint8)
cv2.imshow('face', face[:, :, :: -1])
for i in range(T):
image = frames[k, :, i, :, :].permute(1, 2, 0).numpy()
image = image * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
print(k, i, image.shape)
cv2.imshow('image', image[:, :, :: -1])
if ord('q') == cv2.waitKey(16):
exit()
# sample_rate = 16000
# effects = [
# ["lowpass", "-1", "700"], # apply single-pole lowpass filter
# # ["speed", "0.8"], # reduce the speed
# # This only changes sample rate, so it is necessary to
# # add `rate` effect with original sample rate after this.
# # ["rate", f"{sample_rate}"],
# # ["reverb", "-w"], # Reverbration gives some dramatic feeling
# ]
# aug_speech, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor(
# speech[0], sample_rate, effects)
# torchaudio.save('test.wav', speech[0], 16000)
# torchaudio.save('aug_speech.wav', aug_speech, 16000)
# plot_waveform(waveform, sample_rate)
# plot_specgram(waveform, sample_rate)
# play_audio(waveform, sample_rate)
# images = images.numpy()
# lb = lb.numpy()
# for image, label in zip(images, lb):
# label = ds.vis_label(label)
# print(torch.unique(label))
# print(img.shape, label.shape)
if __name__ == "__main__":
main()
|
the-stack_0_4470 | from distutils import log
from weaverbird.backends.sql_translator.steps.utils.query_transformation import (
build_selection_query,
)
from weaverbird.backends.sql_translator.types import (
SQLPipelineTranslator,
SQLQuery,
SQLQueryDescriber,
SQLQueryExecutor,
SQLQueryRetriever,
)
from weaverbird.pipeline.steps import ReplaceStep
def translate_replace(
step: ReplaceStep,
query: SQLQuery,
index: int,
sql_query_retriever: SQLQueryRetriever = None,
sql_query_describer: SQLQueryDescriber = None,
sql_query_executor: SQLQueryExecutor = None,
sql_translate_pipeline: SQLPipelineTranslator = None,
subcall_from_other_pipeline_count: int = None,
) -> SQLQuery:
query_name = f'REPLACE_STEP_{index}'
log.debug(
'############################################################'
f'query_name: {query_name}\n'
'------------------------------------------------------------'
f'step.name: {step.name}\n'
f'step.search_column: {step.search_column}\n'
f'step.to_replace: {step.to_replace}\n'
f'query.transformed_query: {query.transformed_query}\n'
f'query.metadata_manager.query_metadata: {query.metadata_manager.retrieve_query_metadata()}\n'
)
def _clean_str(value):
if not isinstance(value, float) and not isinstance(value, int):
value = value.strip('"').strip("'").replace('"', "\'").replace("'", "\\'")
return f'\'{value}\''
return value
compiled_query: str = 'CASE '
for element_to_replace in step.to_replace:
from_value, to_value = element_to_replace
compiled_query += (
f'WHEN {step.search_column}={_clean_str(from_value)} THEN {_clean_str(to_value)} '
)
compiled_query += f'ELSE {step.search_column} END AS {step.search_column}'
completed_fields = query.metadata_manager.retrieve_query_metadata_columns_as_str(
columns_filter=[step.search_column]
)
new_query = SQLQuery(
query_name=query_name,
transformed_query=f"""{query.transformed_query}, {query_name} AS"""
f""" (SELECT {completed_fields},"""
f""" {compiled_query}"""
f""" FROM {query.query_name})""",
selection_query=build_selection_query(
query.metadata_manager.retrieve_query_metadata_columns(), query_name
),
metadata_manager=query.metadata_manager,
)
log.debug(
'------------------------------------------------------------'
f'SQLquery: {new_query.transformed_query}'
'############################################################'
)
return new_query
|
the-stack_0_4474 | # 深海棲艦の装備一覧のURL
from typing import Dict, Tuple, List
import lxml.html
import requests
from model.weapon import Weapon
from model.weapon_type import WeaponType
ENEMY_WEAPON_DATA_URL = 'https://kancolle.fandom.com/wiki/List_of_equipment_used_by_the_enemy'
# 装備種テキストと装備種との対応表
WEAPON_TYPE_DICT: Dict[str, WeaponType] = {
'Small Caliber Main Gun': WeaponType.SCMG,
'Medium Caliber Main Gun': WeaponType.MCMG,
'Large Caliber Main Gun': WeaponType.LCMG,
'Secondary Gun': WeaponType.SG,
'Torpedo': WeaponType.TORPEDO,
'Midget Submarine': WeaponType.MS,
'Carrier-based Fighter Aircraft': WeaponType.CFA,
'Carrier-based Dive Bomber': WeaponType.CDB,
'Seaplane Bomber': WeaponType.SB,
'Carrier-based Torpedo Bomber': WeaponType.CTB,
'Reconnaissance Seaplane': WeaponType.MS,
'Small Radar': WeaponType.SR,
'Large Radar': WeaponType.LR,
'Engine Improvement': WeaponType.EI,
'Anti-Aircraft Shell': WeaponType.AAS,
'Armor Piercing Shell': WeaponType.APS,
'Anti-Aircraft Gun': WeaponType.AAG,
'Depth Charge': WeaponType.DC,
'Sonar': WeaponType.SONAR,
'Searchlight': WeaponType.S_LIGHT,
}
def read_weapon_name(td_tag: lxml.html.HtmlElement) -> str:
"""装備名を算出する
Parameters
----------
td_tag: lxml.html.HtmlElement
TDタグの中身
Returns
-------
装備名
"""
link_name: str = td_tag.cssselect('a')[0].text
name = td_tag.text_content().replace(link_name, '', 1)
return name.strip()
def read_weapon_parameters(td_tag: lxml.html.HtmlElement) -> Dict[str, int]:
"""装備のパラメーターを読み取る
Parameters
----------
td_tag: lxml.html.HtmlElement
TDタグの中身
Returns
-------
装備のパラメーター
"""
# アイコン情報と値情報を読み取る
# 値情報は、各要素が必ず「+」「-」「特定の文字列」で区切れていることを利用した分割
icon_list = [x.get('title', '') for x in td_tag.cssselect('a')]
value_list = td_tag.text_content().replace('+', '\n+').replace('-', '\n+') \
.replace('Very Long', '\nVL').replace('Short', '\nShort')\
.replace('Medium', '\nMedium').replace('Long', '\nLong') \
.replace('VL', 'Very Long').split('\n')
value_list = [x for x in value_list if x != '']
# 読み取った情報を連想配列に代入する
parameters: Dict[str, int] = {}
for icon, value in zip(icon_list, value_list):
if icon == 'Range':
continue
parameters[icon] = int(value)
return parameters
def get_enemy_weapon_list() -> Tuple[List[Weapon], Dict[str, int]]:
"""深海棲艦の装備の一覧を取得する
Returns
-------
weapon_list[index] = 装備情報
weapon_url_dict[装備URL] = 装備ID
"""
# URLを読み取り、HTMLをパースする
response = requests.get(ENEMY_WEAPON_DATA_URL)
dom: lxml.html.HtmlElement = lxml.html.fromstring(response.text)
# テーブルの各行を読み取り、装備データとしてweapon_dictに代入する
weapon_list: List[Weapon] = [Weapon(
id=0,
name='',
type=WeaponType.NONE,
attack=0,
torpedo=0,
bomber=0,
anti_air=0,
anti_sub=0)]
weapon_url_dict: Dict[str, int] = {}
for tr_tag in dom.cssselect('table.wikitable tr'):
# テーブルなので列毎にバラせる
tr_tag: lxml.html.HtmlElement = tr_tag
td_tag_list: List[lxml.html.HtmlElement] = tr_tag.cssselect('td')
if len(td_tag_list) < 6:
continue
# 装備IDを読み取る
weapon_id = int(td_tag_list[0].text)
# 装備名を読み取る
weapon_name = read_weapon_name(td_tag_list[2])
# 装備URLを読み取る
weapon_url = td_tag_list[2].cssselect('a')[0].get('href', '')
weapon_url_dict[weapon_url] = weapon_id
# 装備種を読み取る
raw_weapon_type = td_tag_list[3].text.strip()
weapon_type = WEAPON_TYPE_DICT.get(raw_weapon_type, WeaponType.NONE)
# 他のパラメーターを読み取る
parameters = read_weapon_parameters(td_tag_list[4])
weapon_attack = parameters.get('Firepower', 0)
weapon_torpedo = parameters.get('Torpedo', 0)
weapon_bomber = parameters.get('Bombing', 0)
weapon_antiair = parameters.get('AA', 0)
weapon_anti_sub = parameters.get('ASW', 0)
# 装備情報を作成し、代入する
weapon = Weapon(
id=weapon_id,
name=weapon_name,
type=weapon_type,
attack=weapon_attack,
torpedo=weapon_torpedo,
bomber=weapon_bomber,
anti_air=weapon_antiair,
anti_sub=weapon_anti_sub)
weapon_list.append(weapon)
return weapon_list, weapon_url_dict
|
the-stack_0_4475 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from foundation_tenant.models.base.abstract_thing import AbstractThing
class TagManager(models.Manager):
def delete_all(self):
items = Tag.objects.all()
for item in items.all():
item.delete()
class Tag(models.Model):
class Meta:
app_label = 'foundation_tenant'
db_table = 'smeg_tags'
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
objects = TagManager()
name = models.CharField(
_("Name"),
max_length=127,
help_text=_('The name of the Tag item.'),
unique=True,
)
is_program = models.BooleanField(
_("Is program"),
help_text=_('Indicates if this Tag is to be used for programs.'),
default=False,
blank=True,
)
# DEVELOPERS NOTES:
# - These fields should be set in a ETL.
entrepreneurs_count = models.PositiveSmallIntegerField(
_("entrepreneurs_count"),
help_text=_('Keep track of how many entrepreneurs that are assigned to this tag.'),
default=0,
null=True
)
entrepreneurs_average_stage_num = models.FloatField(
_("Entrepreneurs Average Stage"),
help_text=_('Keep track of the average stage number that most entrepeneurs that belong this tag fall under.'),
default=0,
null=True
)
def __str__(self):
return str(self.name)
|
the-stack_0_4477 | from fasteve import Fasteve, BaseSchema, Resource, ObjectID, SubResource
from fasteve.utils import Unique, DataRelation
from typing import Optional, List, NewType, Union, Any
from pydantic import EmailStr, SecretStr, Field, BaseModel
from datetime import datetime
from time import sleep
class Data(BaseSchema):
date: datetime # datetime.date not supported by mongo
confirmed: int
deaths: int
recovered: int
country_id: ObjectID
data = Resource(
schema=Data, resource_methods=["GET", "POST", "DELETE"], item_name="datum"
)
class Leader(BaseSchema):
name: str
age: int
leader = Resource(schema=Leader, resource_methods=["GET", "POST", "DELETE"])
class Countries(BaseSchema):
name: Unique[str]
# leader: DataRelation = leader
data_sub_resource = SubResource(resource=data, id_field="country_id", name="data")
countries = Resource(
schema=Countries,
resource_methods=["GET", "POST", "DELETE"],
item_name="country",
alt_id="name",
sub_resources=[data_sub_resource], # GET /countries/<country_id|name>/data
)
resources = [countries, leader, data]
app = Fasteve(resources=resources, cors_origins=["*"])
@app.repeat_every(seconds=60 * 60 * 24) # every day
async def count_countries_in_db() -> None:
data, count = await app.data.find(countries)
print(f"There are {count} countries in the database!")
@app.get("/custom_endpoint")
def custom_endpoint():
return {"custom": "endpoint"}
|
the-stack_0_4479 | import json
from os import urandom
import urllib
import urlparse
import flask
import requests
from requests_oauthlib import OAuth1 as OAuth1Manager
from oauthlib.oauth1.rfc5849 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.oauth2.draft25 import tokens
from werkzeug.urls import url_decode
from foauth import OAuthError
BEARER = 'BEARER'
BEARER_HEADER = 'HEADER'
BEARER_BODY = 'BODY'
BEARER_URI = 'URI'
BEARER_TYPES = (BEARER_HEADER, BEARER_BODY, BEARER_URI)
class Bearer(object):
def __init__(self, token, bearer_type=BEARER_HEADER):
self.token = token
if bearer_type in BEARER_TYPES or callable(bearer_type):
self.bearer_type = bearer_type
else:
raise ValueError('Unknown bearer type %s' % bearer_type)
def __call__(self, r):
if self.bearer_type == BEARER_HEADER:
r.headers = tokens.prepare_bearer_headers(self.token, r.headers)
elif self.bearer_type == BEARER_BODY:
r.data = tokens.prepare_bearer_body(self.token, r.data)
elif self.bearer_type == BEARER_URI:
r.url = tokens.prepare_bearer_uri(self.token, r.url)
elif callable(self.bearer_type):
r = self.bearer_type(self.token, r)
return r
class OAuthMeta(type):
def __init__(cls, name, bases, attrs):
if 'alias' not in attrs:
cls.alias = cls.__name__.lower()
if 'api_domain' in attrs and 'api_domains' not in attrs:
cls.api_domains = [cls.api_domain]
if 'provider_url' in attrs and 'favicon_url' not in attrs:
# Use a favicon service when no favicon is supplied
primary = 'https://getfavicon.appspot.com/%s' % cls.provider_url
domain = urlparse.urlparse(cls.provider_url).netloc
backup = 'https://www.google.com/s2/favicons?domain=%s' % domain
cls.favicon_url = '%s?defaulticon=%s' % (primary, urllib.quote(backup))
if 'name' not in attrs:
cls.name = cls.__name__
class OAuth(object):
__metaclass__ = OAuthMeta
https = True
verify = True
signature_method = SIGNATURE_HMAC
signature_type = SIGNATURE_TYPE_AUTH_HEADER
permissions_widget = 'checkbox'
description = ''
disclaimer = ''
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
def get_request_token_url(self):
return self.request_token_url
def get_access_token_url(self):
return self.access_token_url
def get_scope_string(self, scopes):
return ''
def get_authorize_url(self, redirect_uri, scopes):
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=scopes)
req = requests.Request(url=self.authorize_url, params=params)
return req.prepare().url
def get_login_uri(self, redirect_uri):
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=[])
req = requests.Request(url=self.authorize_url, params=params)
return req.prepare().url
# The remainder of the API must be implemented for each flavor of OAuth
def callback(self, data, redirect_uri):
"""
Receives the full callback from the service and returns a 2-tuple
containing the user token and user secret (if applicable).
"""
raise NotImplementedError("callback() must be defined in a subclass")
def api(self, key, domain, path, method='GET', params=None, data=None):
"""
Passes along an API request to the service and returns the response.
"""
raise NotImplementedError("api() must be defined in a subclass")
class OAuth1(OAuth):
returns_token = True
def parse_token(self, content):
content = url_decode(content)
return {
'access_token': content['oauth_token'],
'secret': content['oauth_token_secret'],
}
def get_request_token_params(self, redirect_uri, scopes):
return {}
def get_request_token_response(self, redirect_uri, scopes):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
callback_uri=redirect_uri,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_request_token_url(), auth=auth,
params=self.get_request_token_params(redirect_uri, scopes),
verify=self.verify)
def get_authorize_params(self, redirect_uri, scopes):
resp = self.get_request_token_response(redirect_uri, scopes)
try:
data = self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
flask.session['%s_temp_secret' % self.alias] = data['secret']
if not self.returns_token:
redirect_uri += ('?oauth_token=%s' % data['access_token'])
return {
'oauth_token': data['access_token'],
'oauth_callback': redirect_uri,
}
def get_access_token_response(self, token, secret, verifier=None):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=token,
resource_owner_secret=secret,
verifier=verifier,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_access_token_url(), auth=auth,
verify=self.verify)
def callback(self, data, redirect_uri):
token = data['oauth_token']
verifier = data.get('oauth_verifier', None)
secret = flask.session['%s_temp_secret' % self.alias]
del flask.session['%s_temp_secret' % self.alias]
resp = self.get_access_token_response(token, secret, verifier)
try:
return self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=key.access_token,
resource_owner_secret=key.secret,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
class OAuth2(OAuth):
token_type = BEARER
bearer_type = BEARER_HEADER
supports_state = True
auth = None
def parse_token(self, content):
return json.loads(content)
def get_scope_string(self, scopes):
return ' '.join(scopes)
def get_authorize_params(self, redirect_uri, scopes):
state = ''.join('%02x' % ord(x) for x in urandom(16))
flask.session['%s_state' % self.alias] = state
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'state': state,
}
if any(scopes):
params['scope'] = self.get_scope_string(scopes)
return params
def get_access_token_response(self, redirect_uri, data):
return requests.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': data['code'],
'redirect_uri': redirect_uri
}, verify=self.verify, auth=self.auth)
def callback(self, data, redirect_uri):
state = flask.session['%s_state' % self.alias]
if 'state' in data and state != data['state']:
flask.abort(403)
del flask.session['%s_state' % self.alias]
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
resp = self.get_access_token_response(redirect_uri, data)
return self.parse_token(resp.content)
def refresh_token(self, token):
resp = requests.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': token
}, verify=self.verify, auth=self.auth)
return self.parse_token(resp.content)
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
if self.token_type == BEARER:
auth = Bearer(key.access_token, bearer_type=self.bearer_type)
return requests.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
|
the-stack_0_4480 |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from replay_buffer import ReplayBuffer
def DeepQNetwork(lr, num_actions, input_dims, fc1, fc2):
q_net = Sequential()
q_net.add(Dense(fc1, input_dim=input_dims, activation='relu'))
q_net.add(Dense(fc2, activation='relu'))
q_net.add(Dense(num_actions, activation=None))
q_net.compile(optimizer=Adam(learning_rate=lr), loss='mse')
return q_net
class Agent:
def __init__(self, lr, discount_factor, num_actions, epsilon, batch_size, input_dims):
self.action_space = [i for i in range(num_actions)]
self.discount_factor = discount_factor
self.epsilon = epsilon
self.batch_size = batch_size
self.epsilon_decay = 0.001
self.epsilon_final = 0.01
self.update_rate = 100
self.step_counter = 0
self.buffer = ReplayBuffer(1000000, input_dims)
self.q_net = DeepQNetwork(lr, num_actions, input_dims, 256, 256)
self.q_target_net = DeepQNetwork(lr, num_actions, input_dims, 256, 256)
def store_tuple(self, state, action, reward, new_state, done):
self.buffer.store_tuples(state, action, reward, new_state, done)
def policy(self, observation):
if np.random.random() < self.epsilon:
action = np.random.choice(self.action_space)
else:
state = np.array([observation])
actions = self.q_net(state)
action = tf.math.argmax(actions, axis=1).numpy()[0]
return action
def train(self):
if self.buffer.counter < self.batch_size:
return
if self.step_counter % self.update_rate == 0:
self.q_target_net.set_weights(self.q_net.get_weights())
state_batch, action_batch, reward_batch, new_state_batch, done_batch = \
self.buffer.sample_buffer(self.batch_size)
q_predicted = self.q_net(state_batch)
q_next = self.q_target_net(new_state_batch)
q_max_next = tf.math.reduce_max(q_next, axis=1, keepdims=True).numpy()
q_target = np.copy(q_predicted)
for idx in range(done_batch.shape[0]):
target_q_val = reward_batch[idx]
if not done_batch[idx]:
target_q_val += self.discount_factor*q_max_next[idx]
q_target[idx, action_batch[idx]] = target_q_val
self.q_net.train_on_batch(state_batch, q_target)
self.epsilon = self.epsilon - self.epsilon_decay if self.epsilon > self.epsilon_final else self.epsilon_final
self.step_counter += 1
def train_model(self, env, num_episodes, graph):
scores, episodes, avg_scores, obj = [], [], [], []
goal = -110
f = 0
txt = open("saved_networks.txt", "w")
for i in range(num_episodes):
done = False
score = 0.0
state = env.reset()
while not done:
action = self.policy(state)
new_state, reward, done, _ = env.step(action)
score += reward
self.store_tuple(state, action, reward, new_state, done)
state = new_state
self.train()
scores.append(score)
obj.append(goal)
episodes.append(i)
avg_score = np.mean(scores[-100:])
avg_scores.append(avg_score)
print("Episode {0}/{1}, Score: {2} ({3}), AVG Score: {4}".format(i, num_episodes, score, self.epsilon,
avg_score))
if avg_score >= -110 and score >= -108:
self.q_net.save(("saved_networks/dqn_model{0}".format(f)))
self.q_net.save_weights(("saved_networks/dqn_model{0}/net_weights{0}.h5".format(f)))
txt.write("Save {0} - Episode {1}/{2}, Score: {3} ({4}), AVG Score: {5}\n".format(f, i, num_episodes,
score, self.epsilon,
avg_score))
f += 1
print("Network saved")
txt.close()
if graph:
df = pd.DataFrame({'x': episodes, 'Score': scores, 'Average Score': avg_scores, 'Solved Requirement': obj})
plt.plot('x', 'Score', data=df, marker='', color='blue', linewidth=2, label='Score')
plt.plot('x', 'Average Score', data=df, marker='', color='orange', linewidth=2, linestyle='dashed',
label='AverageScore')
plt.plot('x', 'Solved Requirement', data=df, marker='', color='red', linewidth=2, linestyle='dashed',
label='Solved Requirement')
plt.legend()
plt.savefig('MountainCar_Train.png')
def test(self, env, num_episodes, file_type, file, graph):
if file_type == 'tf':
self.q_net = tf.keras.models.load_model(file)
elif file_type == 'h5':
self.train_model(env, 5, False)
self.q_net.load_weights(file)
self.epsilon = 0.0
scores, episodes, avg_scores, obj = [], [], [], []
goal = -110
score = 0.0
for i in range(num_episodes):
state = env.reset()
done = False
episode_score = 0.0
while not done:
env.render()
action = self.policy(state)
new_state, reward, done, _ = env.step(action)
episode_score += reward
state = new_state
score += episode_score
scores.append(episode_score)
obj.append(goal)
episodes.append(i)
avg_score = np.mean(scores[-100:])
avg_scores.append(avg_score)
if graph:
df = pd.DataFrame({'x': episodes, 'Score': scores, 'Average Score': avg_scores, 'Solved Requirement': obj})
plt.plot('x', 'Score', data=df, marker='', color='blue', linewidth=2, label='Score')
plt.plot('x', 'Average Score', data=df, marker='', color='orange', linewidth=2, linestyle='dashed',
label='AverageScore')
plt.plot('x', 'Solved Requirement', data=df, marker='', color='red', linewidth=2, linestyle='dashed',
label='Solved Requirement')
plt.legend()
plt.savefig('MountainCar_Test.png')
env.close()
|
the-stack_0_4481 | import re
from lxml import etree
from pyramid.settings import asbool
from .exception import ConfigurationError
def clean_oai_settings(settings):
"""Parse and validate OAI app settings in a dictionary.
Check that the settings required by the OAI app are in the settings
dictionary and have valid values. Convert them to correct types.
Required settings are:
admin_emails
deleted_records
item_list_limit
logging_config
repository_descriptions
repository_name
sqlalchemy.url
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'admin_emails': _clean_admin_emails,
'deleted_records': _clean_deleted_records,
'item_list_limit': _clean_item_list_limit,
'logging_config': _clean_unicode,
'repository_descriptions': _load_repository_descriptions,
'repository_name': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
}
_clean_settings(settings, cleaners)
def clean_importer_settings(settings):
"""Parse and validate metadata importer settings in a dictionary.
Check that the settings required by the metadata importer are in the
settings dictionary and have valid values. Convert them to correct
types. Required settings are:
deleted_records
dry_run
force_update
logging_config
sqlalchemy.url
timestamp_file
metadata_provider_class
metadata_provider_args
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'deleted_records': _clean_deleted_records,
'dry_run': _clean_boolean,
'force_update': _clean_boolean,
'logging_config': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
'timestamp_file': _clean_unicode,
'metadata_provider_args': _clean_unicode,
'metadata_provider_class': _clean_provider_class,
}
return _clean_settings(settings, cleaners)
def _clean_settings(settings, cleaners):
"""Check that settings are ok.
The parameter `cleaners` is a dict from setting names to functions.
Each cleaner function is called with the value of the corresponding
setting. The cleaners should raise an exception if the value is invalid
and otherwise return a cleaned value. The old value gets replaced by
the cleaned value.
Parameters
----------
settings: dict from str to str
The settings dictionary.
cleaners: dict from str to callable
Mapping from setting names to cleaner functions.
Raises
------
ConfigurationError:
If any setting is missing or invalid.
"""
for name, func in cleaners.items():
if name not in settings:
raise ConfigurationError('missing setting {0}'.format(name))
try:
cleaned = func(settings[name])
settings[name] = cleaned
except Exception as error:
raise ConfigurationError(
'invalid {0} setting: {1}'.format(name, error)
)
def _clean_admin_emails(value):
"""Check that the value is a list of valid email addresses."""
# email regex pattern defined in the OAI-PMH XML schema
pattern = re.compile(r'^\S+@(\S+\.)+\S+$', flags=re.UNICODE)
emails = _clean_unicode(value).split()
if not emails:
raise ValueError('no emails')
for email in emails:
if re.match(pattern, email) is None:
raise ValueError(
'invalid email address: {0}'
''.format(repr(email))
)
return emails
def _clean_deleted_records(value):
"""Check that value is one of "no", "transient", "persistent"."""
allowed_values = ['no', 'transient', 'persistent']
if value not in allowed_values:
raise ValueError('deleted_records must be one of {0}'.format(
allowed_values
))
return str(value)
def _clean_boolean(value):
"""Return the value as a bool."""
return asbool(value)
def _clean_item_list_limit(value):
"""Check that value is a positive integer."""
int_value = int(value)
if int_value <= 0:
raise ValueError('item_list_limit must be positive')
return int_value
def _clean_unicode(value):
"""Return the value as a unicode."""
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return str(value)
def _clean_provider_class(value):
"""Split the value to module name and classname."""
modulename, classname = value.split(':')
if len(modulename) == 0:
raise ValueError('empty module name')
if len(classname) == 0:
raise ValueError('empty class name')
return (modulename, classname)
def _load_repository_descriptions(value):
"""Load XML fragments from files."""
def load_description(path):
"""Load a single description."""
with open(path, 'r') as file_:
contents = file_.read()
try:
doc = etree.fromstring(contents.encode('utf-8'))
except Exception as error:
raise ValueError(
'ill-formed XML in repository description {0}: '
'{1}'.format(repr(path), error)
)
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
if doc.get('{{{0}}}schemaLocation'.format(xsi_ns)) is None:
raise ValueError('no schema location')
return contents
paths = value.split()
return list(map(load_description, paths))
|
the-stack_0_4482 | import pygame.font
class Button():
"""Basic button, since pygame doesn't have it built-in"""
def __init__(self, ai_game, message):
"""Initialize button attributes"""
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
self._render_message(message)
def _render_message(self, message):
"""Turn message into a rendered image on center of the button"""
self.message_image = self.font.render(
message, True, self.text_color, self.button_color)
self.message_image_rect = self.message_image.get_rect()
self.message_image_rect.center = self.rect.center
def draw(self):
"""Draw button with message"""
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.message_image, self.message_image_rect)
|
the-stack_0_4483 | #! /usr/bin/python
import argparse
import glob
import os
import sys
import tarfile
def parse_args():
parser = argparse.ArgumentParser()
products = ["rdn", "obs_ort", "loc", "igm", "glt"]
formats = ["envi", "hdf"]
parser.add_argument("-p", "--product",
help=("Choose one of the following product types: " + ", ".join(products)))
parser.add_argument("-f", "--format",
help=("Choose one of the following formats: " + ", ".join(formats)))
args = parser.parse_args()
if args.product:
if args.product not in products:
print("ERROR: Product \"%s\" is not a valid product choice." % args.product)
sys.exit(1)
if args.format:
if args.format not in formats:
print("ERROR: Format \"%s\" is not a valid format choice." % f)
sys.exit(1)
return args
def main():
args = parse_args()
# Unzip and untar granules
input_dir = "input"
granule_paths = glob.glob(os.path.join(input_dir, "*.tar.gz"))
for g in granule_paths:
tar_file = tarfile.open(g)
tar_file.extractall(input_dir)
tar_file.close()
os.remove(g)
dirs = [d for d in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir, d))]
instrument = "PRISMA" if dirs[0][:3] == "PRS" else "AVIRIS"
# Get paths based on product type file matching
paths = []
if instrument == "PRISMA":
if args.product == "rdn":
paths = glob.glob(os.path.join(input_dir, "*", "*rdn_prj"))
elif args.product == "obs_ort":
paths = glob.glob(os.path.join(input_dir, "*", "*obs_prj"))
elif args.product == "loc":
paths = glob.glob(os.path.join(input_dir, "*", "*loc_prj"))
elif instrument == "AVIRIS":
if args.product == "rdn":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*rdn*img"))
elif args.product == "obs_ort":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*obs_ort"))
elif args.product == "loc":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*loc"))
elif args.product == "igm":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*igm"))
elif args.product == "glt":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*glt"))
print(",".join(paths))
if __name__ == "__main__":
main()
|
the-stack_0_4484 | import logging
import os
from pythonjsonlogger import jsonlogger
def setup_logging(log_level):
logger = logging.getLogger()
logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setFormatter(
jsonlogger.JsonFormatter(
fmt='%(asctime)s %(levelname)s %(lambda)s %(message)s'
)
)
logger.addHandler(handler)
logger.removeHandler(logger.handlers[0])
def get_logger():
logger = logging.getLogger()
logger = logging.LoggerAdapter(
logger,
{'lambda': os.environ.get('AWS_LAMBDA_FUNCTION_NAME', '')}
)
return logger
|
the-stack_0_4485 | import speech_recognition
import re
name = re.compile(r'(name is | nome é)(.*)', re.IGNORECASE)
goodbye = re.compile(r'(.*)(goodbye)(.*)', re.IGNORECASE)
recognizer = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source:
print("Say something!")
audio = recognizer.listen(source)
print("Google Speech Recognition thinks you said:")
print(recognizer.recognize_google(audio))
words = recognizer.recognize_google(audio)
if mo := name.search(words):
print(f"Hello, {mo.group(2)}")
elif mo := goodbye.search(words):
print(f"{mo.group(2)} to you!")
|
the-stack_0_4487 | """
:ref:`chainladder.methods<methods>`.MackChainladder
===================================================
:ref:`MackChainladder<mack>` produces the same IBNR results as the deterministic
approach, but ldf selection happens in a regression framework that allows for
the calculation of prediction errors. The Mack Chainladder technique is the OG
stochastic method.
"""
import numpy as np
import pandas as pd
import copy
from chainladder.methods import Chainladder
class MackChainladder(Chainladder):
""" Basic stochastic chainladder method popularized by Thomas Mack
Parameters
----------
None
Attributes
----------
triangle
returns **X**
ultimate_
The ultimate losses per the method
ibnr_
The IBNR per the method
full_expectation_
The ultimates back-filled to each development period in **X** replacing
the known data
full_triangle_
The ultimates back-filled to each development period in **X** retaining
the known data
summary_
summary of the model
full_std_err_
The full standard error
total_process_risk_
The total process error
total_parameter_risk_
The total parameter error
mack_std_err_
The total prediction error by origin period
total_mack_std_err_
The total prediction error across all origin periods
"""
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Data to which the model will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
super().fit(X, y, sample_weight)
self._mack_recursion('param_risk')
self._mack_recursion('process_risk')
self._mack_recursion('total_param_risk')
return self
@property
def full_std_err_(self):
obj = copy.deepcopy(self.X_)
tri_array = self.full_triangle_.triangle
weight_dict = {'regression': 2, 'volume': 1, 'simple': 0}
val = np.array([weight_dict.get(item.lower(), 2)
for item in list(self.average_) + ['volume']])
for i in [2, 1, 0]:
val = np.repeat(np.expand_dims(val, 0), tri_array.shape[i], axis=0)
k, v, o, d = val.shape
weight = np.sqrt(tri_array[..., :len(self.X_.ddims)]**(2-val))
weight[weight == 0] = np.nan
obj.triangle = self.X_.sigma_.triangle / weight
w = np.concatenate((self.X_.w_, np.ones((k, v, o, 1))*np.nan), axis=3)
w[np.isnan(w)] = 1
obj.triangle = np.nan_to_num(obj.triangle) * w
obj.nan_override = True
return obj
@property
def total_process_risk_(self):
obj = copy.deepcopy(self.process_risk_)
obj.triangle = np.sqrt(np.nansum(self.process_risk_.triangle**2, 2))
obj.triangle = np.expand_dims(obj.triangle, 2)
obj.odims = ['tot_proc_risk']
return obj
def _mack_recursion(self, est):
obj = copy.deepcopy(self.X_)
# replace this with nan_x_latest
nans = np.expand_dims(np.expand_dims(self.X_.nan_triangle(), 0), 0)
k, v, o, d = self.X_.shape
nans = nans * np.ones((k, v, o, d))
nans = np.concatenate((nans, np.ones((k, v, o, 1))*np.nan), 3)
nans = 1-np.nan_to_num(nans)
properties = self.full_triangle_
obj.ddims, obj.valuation = properties.ddims, properties.valuation
obj.nan_override = True
risk_arr = np.zeros((k, v, o, 1))
if est == 'param_risk':
obj.triangle = self._get_risk(nans, risk_arr,
obj.std_err_.triangle)
self.parameter_risk_ = obj
elif est == 'process_risk':
obj.triangle = self._get_risk(nans, risk_arr,
self.full_std_err_.triangle)
self.process_risk_ = obj
else:
risk_arr = risk_arr[..., 0:1, :]
obj.triangle = self._get_tot_param_risk(risk_arr)
obj.odims = ['Total param risk']
self.total_parameter_risk_ = obj
def _get_risk(self, nans, risk_arr, std_err):
full_tri = self.full_triangle_.triangle[..., :len(self.X_.ddims)]
t1_t = (full_tri * std_err)**2
extend = self.X_.ldf_.shape[-1]-self.X_.shape[-1]+1
ldf = self.X_.ldf_.triangle[..., :len(self.X_.ddims)-1]
ldf = np.concatenate(
(ldf, np.prod(self.X_.ldf_.triangle[..., -extend:], -1,
keepdims=True)), -1)
for i in range(len(self.X_.ddims)):
t1 = t1_t[..., i:i+1]
t2 = (ldf[..., i:i+1] * risk_arr[..., i:i+1])**2
t_tot = np.sqrt(t1+t2)*nans[..., i+1:i+2]
risk_arr = np.concatenate((risk_arr, t_tot), 3)
return risk_arr
def _get_tot_param_risk(self, risk_arr):
""" This assumes triangle symmertry """
t1 = self.full_triangle_.triangle[..., :len(self.X_.ddims)] - \
np.nan_to_num(self.X_.triangle) + \
np.nan_to_num(self.X_.get_latest_diagonal(False).triangle)
t1 = np.expand_dims(np.sum(t1*self.X_.std_err_.triangle, 2), 2)
extend = self.X_.ldf_.shape[-1]-self.X_.shape[-1]+1
ldf = self.X_.ldf_.triangle[..., :len(self.X_.ddims)-1]
ldf = np.concatenate(
(ldf, np.prod(self.X_.ldf_.triangle[..., -extend:], -1,
keepdims=True)), -1)
ldf = np.unique(ldf, axis=-2)
for i in range(self.full_triangle_.shape[-1]-1):
t_tot = np.sqrt((t1[..., i:i+1])**2 + (ldf[..., i:i+1] *
risk_arr[..., -1:])**2)
risk_arr = np.concatenate((risk_arr, t_tot), -1)
return risk_arr
@property
def mack_std_err_(self):
obj = copy.deepcopy(self.parameter_risk_)
obj.triangle = np.sqrt(self.parameter_risk_.triangle**2 +
self.process_risk_.triangle**2)
return obj
@property
def total_mack_std_err_(self):
# This might be better as a dataframe
obj = copy.deepcopy(self.X_.latest_diagonal)
obj.triangle = np.sqrt(self.total_process_risk_.triangle**2 +
self.total_parameter_risk_.triangle**2)
obj.triangle = obj.triangle[..., -1:]
obj.ddims = ['Total Mack Std Err']
obj.odims = ['Total']
return obj
@property
def summary_(self):
# This might be better as a dataframe
obj = copy.deepcopy(self.X_)
obj.triangle = np.concatenate(
(self.X_.latest_diagonal.triangle,
self.ibnr_.triangle,
self.ultimate_.triangle,
self.mack_std_err_.triangle[..., -1:]), 3)
obj.ddims = ['Latest', 'IBNR', 'Ultimate', 'Mack Std Err']
obj.nan_override = True
return obj
|
the-stack_0_4488 | """
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding, \
UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-v] [-x] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -C
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
The additional option -x allows custom lexers and formatters to be
loaded from a .py file relative to the current working directory. For
example, ``-l ./customlexer.py -x``. By default, this option expects a
file with a class named CustomLexer or CustomFormatter; you can also
specify your own class name with a colon (``-l ./lexer.py:MyLexer``).
Users should be very careful not to use this option with untrusted files,
because it will import and run them.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -C option is like -N, but prints out a lexer name based solely on
a given content from standard input.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -v option prints a detailed traceback on unhandled exceptions,
which is useful for debugging and bug reports.
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(popts, args, usage):
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2021 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args # pylint: disable=unbalanced-tuple-unpacking
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
lexer = find_lexer_class_for_filename(infn)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -C``
infc = opts.pop('-C', None)
if infc is not None:
inp = sys.stdin.buffer.read()
try:
lexer = guess_lexer(inp, inencoding=inencoding)
except ClassNotFound:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
allow_custom_lexer_formatter = False
# -x: allow custom (eXternal) lexers and formatters
if opts.pop('-x', None) is not None:
allow_custom_lexer_formatter = True
# select lexer
lexer = None
# given by name?
lexername = opts.pop('-l', None)
if lexername:
# custom lexer, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in lexername:
try:
filename = None
name = None
if ':' in lexername:
filename, name = lexername.rsplit(':', 1)
if '.py' in name:
# This can happen on Windows: If the lexername is
# C:\lexer.py -- return to normal load path in that case
name = None
if filename and name:
lexer = load_lexer_from_file(filename, name,
**parsed_opts)
else:
lexer = load_lexer_from_file(lexername, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
# custom formatter, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in fmter:
try:
filename = None
name = None
if ':' in fmter:
# Same logic as above for custom lexer
filename, name = fmter.rsplit(':', 1)
if '.py' in name:
name = None
if filename and name:
fmter = load_formatter_from_file(filename, name,
**parsed_opts)
else:
fmter = load_formatter_from_file(fmter, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
if '256' in os.environ.get('TERM', ''):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if '-s' not in opts:
# process whole input as per normal...
try:
highlight(code, lexer, fmter, outfile)
finally:
if outfn:
outfile.close()
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
finally:
if outfn:
outfile.close()
def main(args=sys.argv):
"""
Main command line entry point.
"""
usage = USAGE % ((args[0],) * 7)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:CvhVHgsx")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
try:
return main_inner(popts, args, usage)
except Exception:
if '-v' in dict(popts):
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://github.com/pygments/pygments/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1
|
the-stack_0_4489 | # Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import py.test
import os
from bson.objectid import ObjectId
from pytransact import commit, context
from pytransact.contextbroker import ContextBroker
from pytransact.testsupport import ContextTests
blmdir = os.path.join(os.path.dirname(__file__), 'blm')
import blm
def setup_module(module):
global blm
from pytransact import blm
blm.addBlmPath(blmdir)
from blm import fundamental, testcommit
def teardown_module(module):
blm.removeBlmPath(blmdir)
blm.clear()
class FakeUser(object):
@classmethod
def _create(cls, user):
return user
class TestContext(ContextTests):
def setup_method(self, method):
super(TestContext, self).setup_method(method)
self.user = blm.fundamental.AccessHolder()
def test_createQuery(self):
query = self.ctx.createQuery(blm.testcommit.Test, {'name': 'test'})
assert isinstance(query, context.ServiceQuery)
def test_query_invisible(self):
toi = blm.testcommit.Test(name=['test'])
self.sync()
cctx = self.pushnewctx(user=self.user)
r = blm.testcommit.Test._query(name='test').run()
assert r == []
q = blm.testcommit.Test._query()
q.clear()
r = q.run()
assert r == []
def test_query_visible(self):
toi = blm.testcommit.Test(name=['test'], allowRead=[self.user])
self.sync()
cctx = self.pushnewctx(user=self.user)
r = blm.testcommit.Test._query(name='test').run()
assert r == [toi]
assert not r[0]._phantom
def test_query_with_data_fetching(self):
blm.testcommit.Test(name=['test'], reorder=['foo', 'bar'], unique=['baz'])
self.sync()
cctx = self.pushnewctx()
query = blm.testcommit.Test._query(name='test')
query.attrList = {'name', 'reorder', 'toirefmap'}
toi, = query.run()
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {}}
assert not toi._phantom
query = blm.testcommit.Test._query(name='test')
query.attrList = {'unique'}
toi, = query.run()
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {},
'unique': ['baz']}
# test that we don't explode on non existing attributes in attrlist
query = blm.testcommit.Test._query(name='test')
query.attrList = {'doesnotexist'}
toi, = query.run() # don't explode
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {},
'unique': ['baz']}
def test_clearTois(self):
toi = blm.testcommit.Test(name=['test'])
assert 'name' in toi._attrData
self.ctx.clearTois()
assert toi._attrData == {}
def test_clone(self):
clone = self.ctx.clone()
assert id(clone) != id(self.ctx)
assert type(clone) == type(self.ctx)
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
class OtherContext(context.ReadonlyContext):
pass
clone = OtherContext.clone()
assert type(clone) == OtherContext
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
clone = OtherContext.clone(self.ctx)
assert type(clone) == OtherContext
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
clone = OtherContext.clone(self.ctx, user=self.user)
assert type(clone) == OtherContext
assert clone.user == self.user
assert clone.database == self.ctx.database
def test_requestAttribute(self):
toi = blm.testcommit.Test(name=['foo'], reorder=['bar'], unique=['baz'])
id = toi.id[0]
self.sync()
ctx = self.pushnewctx()
toi = blm.testcommit.Test._create(id)
assert toi._phantom # toi is not known yet
assert toi.name == ['foo']
assert toi.toirefmap == {} # default for maps is a dict, not a list
assert not toi._phantom # toi is known
# toi not in the db and not newly created
toi = blm.testcommit.Test._create(ObjectId())
assert toi._phantom # toi is not known yet
assert toi.name == []
assert toi._phantom # toi is still not known
def test_requestAttribute_copy_default(self):
toi1 = blm.testcommit.Test()
toi2 = blm.testcommit.Test()
id1, id2 = toi1.id[0], toi2.id[0]
self.sync()
ctx = self.pushnewctx()
toi1, = blm.testcommit.Test._query(id=id1).run()
name = toi1.name.value
name.append('foo')
toi1.name = name
assert toi1.name == ['foo'] # sanity
toi2, = blm.testcommit.Test._query(id=id2).run()
# if we are not careful with *copying* the default value above
# we may end up with toi2.name == ['foo']
assert toi2.name == []
def test_preload(self):
toi = blm.testcommit.Test(name=['foo'], reorder=['bar'], unique=['baz'])
id = toi.id[0]
self.sync()
ctx = self.pushnewctx()
toi, = blm.testcommit.Test._query(id=id).run()
assert not toi._attrData
toi._preload(['reorder', 'unique'])
assert not toi._attrData
assert toi.name.value # load
assert toi._attrData == {
'name': ['foo'],
'reorder': ['bar'],
'unique': ['baz'],
}
def test_setUser(self):
# Make sure contexts have user objects that are reliable from
# within the context itself:
# context.user should give you a TO which is equivalent to the
# one you'd get from a blm.User query
# Specifically, we do not want any stale data from an outdated
# context to linger in the object.
# Thus, we make sure to always create a fresh, context specific
# copy of the user TO in setUser().
user = blm.testcommit.User(name=['foo'])
user.allowRead = [user]
self.sync()
ctx = self.pushnewctx(ContextClass=commit.CommitContext, user=user)
user = ctx.user
user.name = ['not commited!']
with self.pushnewctx(user=user) as newctx:
assert newctx.user.id == user.id
assert newctx.user is not user
assert newctx.user.name == ['foo']
assert newctx.user.name != ['not commited!']
assert newctx.user in newctx.__instances__
class TestMaybeWithContext(object):
def test_with_no_context(self):
py.test.raises(Exception, lambda: ContextBroker().context) # sanity
database = object()
@context.maybe_with_context()
def foo(arg):
assert isinstance(ContextBroker().context, context.ReadonlyContext)
assert ContextBroker().context.database is database
return arg
obj = object()
assert foo(obj, database=database) is obj
def test_with_factory(self):
class MyContext(context.ReadonlyContext):
pass
@context.maybe_with_context(MyContext)
def foo():
assert isinstance(ContextBroker().context, MyContext)
foo(database=object())
def test_with_correct_context_class(self):
@context.maybe_with_context()
def foo():
return ContextBroker().context
with context.ReadonlyContext(object()) as ctx:
assert foo() is ctx
class MyContext(context.ReadonlyContext):
pass
with MyContext(object()) as ctx:
assert foo() is ctx
class WeirdContext(object):
database = object()
user = FakeUser()
def __enter__(self):
ContextBroker().pushContext(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert ContextBroker().context == self
ContextBroker().popContext()
with WeirdContext():
ctx = foo()
assert isinstance(ctx, context.ReadonlyContext)
assert ctx.database is WeirdContext.database
def test_with_custom_context_class(self):
class MyContext(context.ReadonlyContext):
pass
database = object()
user = FakeUser()
@context.maybe_with_context(MyContext)
def foo():
return ContextBroker().context
with context.ReadonlyContext(database, user):
ctx = foo()
assert isinstance(ctx, MyContext)
assert ctx.database is database
assert ctx.user is user
def test_no_database(self):
@context.maybe_with_context()
def foo():
return ContextBroker().context
py.test.raises(ValueError, foo)
|
the-stack_0_4490 | """
A stress-test of sorts for LLDB's handling of threads in the inferior.
This test sets a breakpoint in the main thread where test parameters (numbers of
threads) can be adjusted, runs the inferior to that point, and modifies the
locals that control the event thread counts. This test also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a
watchpoint on a global modified in watchpoint_func. The inferior is continued
until exit or a crash takes place, and the number of events seen by LLDB is
verified to match the expected number of events.
"""
from __future__ import print_function
import unittest2
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ConcurrentEventsBase(TestBase):
# Concurrency is the primary test factor here, not debug info variants.
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
super(ConcurrentEventsBase, self).setUp()
# Find the line number for our breakpoint.
self.filename = 'main.cpp'
self.thread_breakpoint_line = line_number(
self.filename, '// Set breakpoint here')
self.setup_breakpoint_line = line_number(
self.filename, '// Break here and adjust num')
self.finish_breakpoint_line = line_number(
self.filename, '// Break here and verify one thread is active')
def describe_threads(self):
ret = []
for x in self.inferior_process:
id = x.GetIndexID()
reason = x.GetStopReason()
status = "stopped" if x.IsStopped() else "running"
reason_str = lldbutil.stop_reason_to_str(reason)
if reason == lldb.eStopReasonBreakpoint:
bpid = x.GetStopReasonDataAtIndex(0)
bp = self.inferior_target.FindBreakpointByID(bpid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(bp), bp.GetHitCount())
elif reason == lldb.eStopReasonWatchpoint:
watchid = x.GetStopReasonDataAtIndex(0)
watch = self.inferior_target.FindWatchpointByID(watchid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(watch), watch.GetHitCount())
elif reason == lldb.eStopReasonSignal:
signals = self.inferior_process.GetUnixSignals()
signal_name = signals.GetSignalAsCString(
x.GetStopReasonDataAtIndex(0))
reason_str = "signal %s" % signal_name
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
ret.append(
"thread %d %s due to %s at\n\t%s" %
(id, status, reason_str, location))
return ret
def add_breakpoint(self, line, descriptions):
""" Adds a breakpoint at self.filename:line and appends its description to descriptions, and
returns the LLDB SBBreakpoint object.
"""
bpno = lldbutil.run_break_set_by_file_and_line(
self, self.filename, line, num_expected_locations=-1)
bp = self.inferior_target.FindBreakpointByID(bpno)
descriptions.append(
": file = 'main.cpp', line = %d" %
self.finish_breakpoint_line)
return bp
def inferior_done(self):
""" Returns true if the inferior is done executing all the event threads (and is stopped at self.finish_breakpoint,
or has terminated execution.
"""
return self.finish_breakpoint.GetHitCount() > 0 or \
self.crash_count > 0 or \
self.inferior_process.GetState() == lldb.eStateExited
def count_signaled_threads(self):
count = 0
for thread in self.inferior_process:
if thread.GetStopReason() == lldb.eStopReasonSignal and thread.GetStopReasonDataAtIndex(
0) == self.inferior_process.GetUnixSignals().GetSignalNumberFromName('SIGUSR1'):
count += 1
return count
def do_thread_actions(self,
num_breakpoint_threads=0,
num_signal_threads=0,
num_watchpoint_threads=0,
num_crash_threads=0,
num_delay_breakpoint_threads=0,
num_delay_signal_threads=0,
num_delay_watchpoint_threads=0,
num_delay_crash_threads=0):
""" Sets a breakpoint in the main thread where test parameters (numbers of threads) can be adjusted, runs the inferior
to that point, and modifies the locals that control the event thread counts. Also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a watchpoint on a global modified in
watchpoint_func. The inferior is continued until exit or a crash takes place, and the number of events seen by LLDB
is verified to match the expected number of events.
"""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Get the target
self.inferior_target = self.dbg.GetSelectedTarget()
expected_bps = []
# Initialize all the breakpoints (main thread/aux thread)
self.setup_breakpoint = self.add_breakpoint(
self.setup_breakpoint_line, expected_bps)
self.finish_breakpoint = self.add_breakpoint(
self.finish_breakpoint_line, expected_bps)
# Set the thread breakpoint
if num_breakpoint_threads + num_delay_breakpoint_threads > 0:
self.thread_breakpoint = self.add_breakpoint(
self.thread_breakpoint_line, expected_bps)
# Verify breakpoints
self.expect(
"breakpoint list -f",
"Breakpoint locations shown correctly",
substrs=expected_bps)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Check we are at line self.setup_breakpoint
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."])
# Initialize the (single) watchpoint on the global variable (g_watchme)
if num_watchpoint_threads + num_delay_watchpoint_threads > 0:
self.runCmd("watchpoint set variable g_watchme")
for w in self.inferior_target.watchpoint_iter():
self.thread_watchpoint = w
self.assertTrue(
"g_watchme" in str(
self.thread_watchpoint),
"Watchpoint location not shown correctly")
# Get the process
self.inferior_process = self.inferior_target.GetProcess()
# We should be stopped at the setup site where we can set the number of
# threads doing each action (break/crash/signal/watch)
self.assertEqual(
self.inferior_process.GetNumThreads(),
1,
'Expected to stop before any additional threads are spawned.')
self.runCmd("expr num_breakpoint_threads=%d" % num_breakpoint_threads)
self.runCmd("expr num_crash_threads=%d" % num_crash_threads)
self.runCmd("expr num_signal_threads=%d" % num_signal_threads)
self.runCmd("expr num_watchpoint_threads=%d" % num_watchpoint_threads)
self.runCmd(
"expr num_delay_breakpoint_threads=%d" %
num_delay_breakpoint_threads)
self.runCmd(
"expr num_delay_crash_threads=%d" %
num_delay_crash_threads)
self.runCmd(
"expr num_delay_signal_threads=%d" %
num_delay_signal_threads)
self.runCmd(
"expr num_delay_watchpoint_threads=%d" %
num_delay_watchpoint_threads)
# Continue the inferior so threads are spawned
self.runCmd("continue")
# Make sure we see all the threads. The inferior program's threads all synchronize with a pseudo-barrier; that is,
# the inferior program ensures all threads are started and running
# before any thread triggers its 'event'.
num_threads = self.inferior_process.GetNumThreads()
expected_num_threads = num_breakpoint_threads + num_delay_breakpoint_threads \
+ num_signal_threads + num_delay_signal_threads \
+ num_watchpoint_threads + num_delay_watchpoint_threads \
+ num_crash_threads + num_delay_crash_threads + 1
self.assertEqual(
num_threads,
expected_num_threads,
'Expected to see %d threads, but seeing %d. Details:\n%s' %
(expected_num_threads,
num_threads,
"\n\t".join(
self.describe_threads())))
self.signal_count = self.count_signaled_threads()
self.crash_count = len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
# Run to completion (or crash)
while not self.inferior_done():
if self.TraceOn():
self.runCmd("thread backtrace all")
self.runCmd("continue")
self.signal_count += self.count_signaled_threads()
self.crash_count += len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
if num_crash_threads > 0 or num_delay_crash_threads > 0:
# Expecting a crash
self.assertTrue(
self.crash_count > 0,
"Expecting at least one thread to crash. Details: %s" %
"\t\n".join(
self.describe_threads()))
# Ensure the zombie process is reaped
self.runCmd("process kill")
elif num_crash_threads == 0 and num_delay_crash_threads == 0:
# There should be a single active thread (the main one) which hit
# the breakpoint after joining
self.assertEqual(
1,
self.finish_breakpoint.GetHitCount(),
"Expected main thread (finish) breakpoint to be hit once")
num_threads = self.inferior_process.GetNumThreads()
self.assertEqual(
1,
num_threads,
"Expecting 1 thread but seeing %d. Details:%s" %
(num_threads,
"\n\t".join(
self.describe_threads())))
self.runCmd("continue")
# The inferior process should have exited without crashing
self.assertEqual(
0,
self.crash_count,
"Unexpected thread(s) in crashed state")
self.assertEqual(
self.inferior_process.GetState(),
lldb.eStateExited,
PROCESS_EXITED)
# Verify the number of actions took place matches expected numbers
expected_breakpoint_threads = num_delay_breakpoint_threads + num_breakpoint_threads
breakpoint_hit_count = self.thread_breakpoint.GetHitCount(
) if expected_breakpoint_threads > 0 else 0
self.assertEqual(
expected_breakpoint_threads,
breakpoint_hit_count,
"Expected %d breakpoint hits, but got %d" %
(expected_breakpoint_threads,
breakpoint_hit_count))
expected_signal_threads = num_delay_signal_threads + num_signal_threads
self.assertEqual(
expected_signal_threads,
self.signal_count,
"Expected %d stops due to signal delivery, but got %d" %
(expected_signal_threads,
self.signal_count))
expected_watchpoint_threads = num_delay_watchpoint_threads + num_watchpoint_threads
watchpoint_hit_count = self.thread_watchpoint.GetHitCount(
) if expected_watchpoint_threads > 0 else 0
self.assertEqual(
expected_watchpoint_threads,
watchpoint_hit_count,
"Expected %d watchpoint hits, got %d" %
(expected_watchpoint_threads,
watchpoint_hit_count))
|
the-stack_0_4491 | import socket
host = '127.0.0.1'
port = 4000
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
client.send(b'Hello! Is there anybody in there?!')
response = client.recv(4096)
print(response)
|
the-stack_0_4496 | import argparse
import datetime
import logging
import requests
import rdflib
import sys
import tempfile
import time
import urllib
import xml.etree.ElementTree as ET
def run():
parser = argparse.ArgumentParser(description='Finds vocabulary concepts (identifying them by a namespace) in a triplestore and enriches the triplestore with full concept definitions fetched from their URIs (assuming requesting concept\'s URI with HTTP Accept text/turtle header will provide concept\'s data in the turtle format)')
parser.add_argument('sparqlUrl', help="Triplestore's SPARQL endpoint URL")
parser.add_argument('conceptsNamespace', help="URI namespace of RDF nodes to be processed")
parser.add_argument('--sparqlUser', help='HTTP basic auth user name to be used when communicating with the triplestore')
parser.add_argument('--sparqlPswd', help='HTTP basic auth password to be used when communicating with the triplestore')
parser.add_argument('--sparqlGraph', help='Process only a given triplestore graph')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG if args.verbose else logging.INFO)
harvester = VocabularyHarvester(args)
harvester.harvest()
class VocabularyHarvester:
sparqlUrl = None
sparqlAuth = None
sparqlGraph = None
conceptsNmsp = None
def __init__(self, args):
self.sparqlUrl = args.sparqlUrl
if args.sparqlGraph:
self.sparqlGraph = rdflib.term.URIRef(args.sparqlGraph).n3()
self.conceptsNmsp = rdflib.term.Literal(args.conceptsNamespace).n3()
if args.sparqlUser != '' and args.sparqlPswd != '':
self.sparqlAuth = requests.auth.HTTPBasicAuth(args.sparqlUser, args.sparqlPswd)
def harvest(self):
fromGraph = "from named %s" % self.sparqlGraph if self.sparqlGraph else ''
query = """
select distinct ?g ?o %s
where {
graph ?g {
?s ?p ?o
filter strstarts(str(?o), %s)
}
}
""" % (fromGraph, self.conceptsNmsp)
response = requests.post(self.sparqlUrl, data={"query": query}, headers={"Accept": "application/json"}, auth=self.sparqlAuth)
if response.status_code != 200:
logging.error("Failed to find concepts in the triplestore with status code %d and response body: %s" % (response.status_code, response.text))
data = response.json()
for i in data['results']['bindings']:
logging.info("Fetching concept %s" % i['o']['value'])
try:
conceptGraph = self.fetchConcept(i['o']['value'])
#print(conceptGraph.serialize(format='turtle'))
self.updateTriplestore(conceptGraph, i['g']['value'])
except Exception as e:
logging.warning("Failed to fetch data for concept %s:\n %s" % (i['o']['value'], str(e)))
def fetchConcept(self, url):
response = requests.get(url, headers={"Accept": "text/turtle"})
graph = rdflib.Graph()
graph.parse(data=response.text, format='turtle')
return graph
def updateTriplestore(self, conceptGraph, graph):
graph = rdflib.term.URIRef(graph).n3()
query = "INSERT DATA { GRAPH " + graph + " { " + conceptGraph.serialize(format='nt') + " } }"
response = requests.post(self.sparqlUrl, data={'update': query}, auth=self.sparqlAuth)
if response.status_code != 200:
raise Exception("Sending data to the triplestore failed with code %d and response body: %s" % (response.status_code, response.text))
|
the-stack_0_4497 | # stdlib
from typing import Any
from typing import Optional
# third party
from torch import device
# relative
from ...core.common.serde.serializable import serializable
from ...proto.lib.torch.device_pb2 import Device as Device_PB
# use -2 to represent index=None
INDEX_NONE = -2
def object2proto(obj: device) -> "Device_PB":
proto = Device_PB()
proto.type = obj.type
proto.index = INDEX_NONE if obj.index is None else obj.index
return proto
def proto2object(proto: "Device_PB") -> Any:
device_type = proto.type
index: Optional[int] = None if proto.index == INDEX_NONE else proto.index
obj = device(device_type, index)
return obj
serializable(generate_wrapper=True)(
wrapped_type=device,
import_path="torch.device",
protobuf_scheme=Device_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
the-stack_0_4500 | from base64 import b64encode
from base64 import b64decode
from threading import local
import boto3
import six
__all__ = [
'_as_bytes',
'b64_str',
'from_b64_str',
'_get_client',
'_prefix_alias',
]
thread_local = local()
thread_local.sessions = {}
def _as_bytes(value):
if isinstance(value, six.string_types):
value = value.encode('utf-8')
return value
def b64_str(value: bytes):
return b64encode(value).decode('utf-8')
def from_b64_str(value: str):
value = value.encode('utf-8')
return b64decode(value)
def _get_client(region: str = None, profile: str = None):
key = f'{region}-{profile}'
client = thread_local.sessions.get(key)
if not client:
session = boto3.Session(region_name=region, profile_name=profile)
client = session.client('kms')
thread_local.sessions[key] = client
return client
def _prefix_alias(alias: str):
if not alias.startswith('alias/'):
alias = f'alias/{alias}'
return alias
|
the-stack_0_4502 | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import unittest
import torch
from botorch import fit_gpytorch_model
from botorch.models import SingleTaskGP
from botorch.optim.fit import (
OptimizationIteration,
fit_gpytorch_scipy,
fit_gpytorch_torch,
)
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
NOISE = [0.127, -0.113, -0.345, -0.034, -0.069, -0.272, 0.013, 0.056, 0.087, -0.081]
class TestFitGPyTorchModel(unittest.TestCase):
def _getModel(self, double=False, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
dtype = torch.double if double else torch.float
train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
noise = torch.tensor(NOISE, device=device, dtype=dtype)
train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
model = SingleTaskGP(train_x, train_y)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
return mll.to(device=device, dtype=dtype)
def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
options = {"disp": False, "maxiter": 5}
for double in (False, True):
mll = self._getModel(double=double, cuda=cuda)
mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
model = mll.model
# Make sure all of the parameters changed
self.assertGreater(model.likelihood.raw_noise.abs().item(), 1e-3)
self.assertLess(model.mean_module.constant.abs().item(), 0.1)
self.assertGreater(
model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
)
self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)
# test overriding the default bounds with user supplied bounds
mll = self._getModel(double=double, cuda=cuda)
mll = fit_gpytorch_model(
mll,
optimizer=optimizer,
options=options,
bounds={"likelihood.noise_covar.raw_noise": (1e-1, None)},
)
model = mll.model
self.assertGreaterEqual(model.likelihood.raw_noise.abs().item(), 1e-1)
self.assertLess(model.mean_module.constant.abs().item(), 0.1)
self.assertGreater(
model.covar_module.base_kernel.raw_lengthscale.abs().item(), 0.1
)
self.assertGreater(model.covar_module.raw_outputscale.abs().item(), 1e-3)
# test tracking iterations
mll = self._getModel(double=double, cuda=cuda)
if optimizer is fit_gpytorch_torch:
options["disp"] = True
mll, iterations = optimizer(mll, options=options, track_iterations=True)
self.assertEqual(len(iterations), options["maxiter"])
self.assertIsInstance(iterations[0], OptimizationIteration)
# test extra param that does not affect loss
options["disp"] = False
mll = self._getModel(double=double, cuda=cuda)
mll.register_parameter(
"dummy_param",
torch.nn.Parameter(
torch.tensor(
[5.0],
dtype=torch.double if double else torch.float,
device=torch.device("cuda" if cuda else "cpu"),
)
),
)
mll = fit_gpytorch_model(mll, optimizer=optimizer, options=options)
self.assertTrue(mll.dummy_param.grad is None)
def test_fit_gpytorch_model_scipy_cuda(self):
if torch.cuda.is_available():
self.test_fit_gpytorch_model(cuda=True)
def test_fit_gpytorch_model_torch(self, cuda=False):
self.test_fit_gpytorch_model(cuda=cuda, optimizer=fit_gpytorch_torch)
def test_fit_gpytorch_model_torch_cuda(self):
if torch.cuda.is_available():
self.test_fit_gpytorch_model_torch(cuda=True)
|
the-stack_0_4503 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited, Google and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import logging
from time import sleep
from target_script import TargetScript
from android import Screen, System
from android.workload import Workload
class CameraStartup(Workload):
"""
Android CameraStartup workload
This workload is intended to be used to collect traces of the camera starting
up to debug issues related to camera startup. For this reason, the camera app
is started after sleeping for 3 seconds after tracingStart. For this same
reason, energy cannot be collected since that disconnects USB.
"""
# Package required by this workload
package = 'com.google.android.GoogleCamera'
action = 'android.intent.action.MAIN'
def __init__(self, test_env):
super(CameraStartup, self).__init__(test_env)
self._log = logging.getLogger('CameraStartup')
self._log.debug('Workload created')
def run(self, out_dir, duration_s=10, collect='systrace'):
"""
Run a camera startup workload
:param out_dir: Path to experiment directory where to store results.
:type out_dir: str
:param duration_s: Duration of test
:type duration_s: int
:param collect: Specifies what to collect. Possible values:
- 'energy'
- 'systrace'
- 'ftrace'
- any combination of the above
:type collect: list(str)
"""
if 'energy' in collect:
raise RuntimeError('CameraStartup cannot do energy collection as app is started after tracingStart')
self._log.info("Running CameraStartup for {}s and collecting {}".format(duration_s, collect))
# Keep track of mandatory parameters
self.out_dir = out_dir
self.collect = collect
# Unlock device screen (assume no password required)
Screen.unlock(self._target)
# Set airplane mode
System.set_airplane_mode(self._target, on=True)
# Set min brightness
Screen.set_brightness(self._target, auto=False, percent=0)
# Force screen in PORTRAIT mode
Screen.set_orientation(self._target, portrait=True)
sleep(1)
self.tracingStart()
# Wait for a few seconds so that you can clear see start of trace and start of camera app
sleep(3)
# Use the monkey tool to start CameraStartup
System.monkey(self._target, self.package)
sleep(duration_s)
self.tracingStop()
# Close the app without clearing the local data to
# avoid the dialog to select the account at next start
System.force_stop(self._target, self.package, clear=False)
# Go back to home screen
System.home(self._target)
# Set brightness back to auto
Screen.set_brightness(self._target, auto=True)
# Switch back to screen auto rotation
Screen.set_orientation(self._target, auto=True)
# Switch off airplane mode
System.set_airplane_mode(self._target, on=False)
|
the-stack_0_4504 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import os
import numpy as np
import logging
import shutil
from copy import deepcopy
import rmgpy
from rmgpy.rmg.main import RMG
from rmgpy.rmg.model import CoreEdgeReactionModel
from rmgpy.data.rmg import getDB
from rmgpy.exceptions import InputError
################################################################################
class ExplorerJob(object):
def __init__(self, source, pdepjob, explore_tol, energy_tol=np.inf, flux_tol=0.0,
bathGas=None, maximumRadicalElectrons=np.inf):
self.source = source
self.explore_tol = explore_tol
self.energy_tol = energy_tol
self.flux_tol = flux_tol
self.maximumRadicalElectrons = maximumRadicalElectrons
self.pdepjob = pdepjob
if not hasattr(self.pdepjob,'outputFile'):
self.pdepjob.outputFile = None
if bathGas:
self.bathGas = bathGas
elif self.pdepjob.network and self.pdepjob.network.bathGas:
self.bathGas = self.pdepjob.network.bathGas
else:
raise InputError('bathGas not specified in explorer block')
def copy(self):
"""
Return a copy of the explorer job.
"""
return ExplorerJob(
source=deepcopy(self.source),
pdepjob=self.pdepjob,
explore_tol=self.explore_tol,
energy_tol=self.energy_tol,
flux_tol=self.flux_tol
)
def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesList=None, thermoLibrary=None, kineticsLibrary=None):
logging.info('Exploring network...')
rmg = RMG()
rmg.speciesConstraints = {'allowed' : ['input species', 'seed mechanisms', 'reaction libraries'], 'maximumRadicalElectrons' : self.maximumRadicalElectrons, 'explicitlyAllowedMolecules': []}
rmgpy.rmg.input.rmg = rmg
reaction_model = CoreEdgeReactionModel()
reaction_model.pressureDependence = self.pdepjob
reaction_model.pressureDependence.rmgmode = True
if outputFile:
reaction_model.pressureDependence.outputFile = os.path.dirname(outputFile)
kineticsDatabase = getDB('kinetics')
thermoDatabase = getDB('thermo')
thermoDatabase.libraries['thermojobs'] = thermoLibrary
thermoDatabase.libraryOrder.insert(0,'thermojobs')
kineticsDatabase.libraries['kineticsjobs'] = kineticsLibrary
kineticsDatabase.libraryOrder.insert(0,('kineticsjobs','Reaction Library'))
jobRxns = [rxn for rxn in reaction_model.core.reactions]
self.jobRxns = jobRxns
if outputFile is not None:
if not os.path.exists(os.path.join(reaction_model.pressureDependence.outputFile,'pdep')):
os.mkdir(os.path.join(reaction_model.pressureDependence.outputFile,'pdep'))
else:
shutil.rmtree(os.path.join(reaction_model.pressureDependence.outputFile,'pdep'))
os.mkdir(os.path.join(reaction_model.pressureDependence.outputFile,'pdep'))
# get the molecular formula for the network
mmol = None
for spc in self.source:
if mmol:
mmol.merge(spc.molecule[0])
else:
mmol = spc.molecule[0]
form = mmol.getFormula()
for spec in self.bathGas.keys()+self.source:
nspec,isNew = reaction_model.makeNewSpecies(spec,reactive=False)
flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species])
reaction_model.enlarge(nspec,reactEdge=False,unimolecularReact=flags,
bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species))))
reaction_model.addSeedMechanismToCore('kineticsjobs')
for lib in kineticsDatabase.libraryOrder:
if lib[0] != 'kineticsjobs':
reaction_model.addReactionLibraryToEdge(lib[0])
for spc in reaction_model.core.species:
for i,item in enumerate(self.source):
if spc.isIsomorphic(item):
self.source[i] = spc
# react initial species
flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species])
reaction_model.enlarge(reactEdge=True,unimolecularReact=flags,
bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species))))
# find the network we're interested in
for nwk in reaction_model.networkList:
if set(nwk.source) == set(self.source):
self.source = nwk.source
network = nwk
break
else:
raise ValueError('Did not generate a network with the requested source. This usually means no unimolecular'
'reactions were generated for the source. Note that library reactions that are not'
' properly flagged as elementary_high_p can replace RMG generated reactions that would'
' otherwise be part of networks.')
network.bathGas = self.bathGas
self.network = network
# determine T and P combinations
if self.pdepjob.Tlist:
Tlist = self.pdepjob.Tlist.value_si
else:
Tlist = np.linspace(self.pdepjob.Tmin.value_si,self.pdepjob.Tmax.value_si,self.pdepjob.Tcount)
if self.pdepjob.Plist:
Plist = self.pdepjob.Plist.value_si
else:
Plist = np.linspace(self.pdepjob.Pmin.value_si,self.pdepjob.Pmax.value_si,self.pdepjob.Pcount)
# generate the network
forbiddenStructures = getDB('forbidden')
incomplete = True
while incomplete:
incomplete = False
for T in Tlist:
for P in Plist:
if network.getLeakCoefficient(T=T,P=P) > self.explore_tol:
incomplete = True
spc = network.getMaximumLeakSpecies(T=T,P=P)
if forbiddenStructures.isMoleculeForbidden(spc.molecule[0]):
reaction_model.removeSpeciesFromEdge(reaction_model.reactionSystems,spc)
reaction_model.removeEmptyPdepNetworks()
logging.error(spc.label)
else:
logging.info('adding new isomer {0} to network'.format(spc))
flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species])
reaction_model.enlarge((network,spc),reactEdge=False,unimolecularReact=flags,
bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species))))
flags = np.array([s.molecule[0].getFormula()==form for s in reaction_model.core.species])
reaction_model.enlarge(reactEdge=True,unimolecularReact=flags,
bimolecularReact=np.zeros((len(reaction_model.core.species),len(reaction_model.core.species))))
rmRxns = []
for rxn in network.pathReactions: # remove reactions with forbidden species
for r in rxn.reactants+rxn.products:
if forbiddenStructures.isMoleculeForbidden(r.molecule[0]):
rmRxns.append(rxn)
for rxn in rmRxns:
logging.info('Removing forbidden reaction: {0}'.format(rxn))
network.pathReactions.remove(rxn)
# clean up output files
if outputFile is not None:
path = os.path.join(reaction_model.pressureDependence.outputFile,'pdep')
for name in os.listdir(path):
if name.endswith('.py') and '_' in name:
if name.split('_')[-1].split('.')[0] != str(len(network.isomers)):
os.remove(os.path.join(path,name))
else:
os.rename(os.path.join(path,name),os.path.join(path,'network_full.py'))
warns = []
for rxn in jobRxns:
if rxn not in network.pathReactions:
warns.append('Reaction {0} in the input file was not explored during network expansion and was not included in the full network. This is likely because your explore_tol value is too high.'.format(rxn))
# reduction process
if self.energy_tol != np.inf or self.flux_tol != 0.0:
rxnSet = None
for T in Tlist:
if self.energy_tol != np.inf:
rxns = network.get_energy_filtered_reactions(T,self.energy_tol)
if rxnSet is not None:
rxnSet &= set(rxns)
else:
rxnSet = set(rxns)
for P in Plist:
if self.flux_tol != 0.0:
rxns = network.get_rate_filtered_reactions(T,P,self.flux_tol)
if rxnSet is not None:
rxnSet &= set(rxns)
else:
rxnSet = set(rxns)
logging.info('removing reactions during reduction:')
for rxn in rxnSet:
logging.info(rxn)
network.remove_reactions(reaction_model,list(rxnSet))
for rxn in jobRxns:
if rxn not in network.pathReactions:
warns.append('Reaction {0} in the input file was not included in the reduced model.'.format(rxn))
self.network = network
self.pdepjob.network = network
self.pdepjob.execute(outputFile, plot, format='pdf', print_summary=True)
if warns != []:
logging.info('\nOUTPUT WARNINGS:\n')
for w in warns:
logging.warning(w)
|
the-stack_0_4505 |
import psycopg2
import psycopg2.extras
from ..sql import SqlMinqlClient
class PostgresqlMinqlClient(SqlMinqlClient):
def __init__(self, address, name, user, password, *args, **kwargs):
url, port = address.split(':')
params = "dbname='%s' user='%s' password='%s' host='%s' port='%s'" % (name, user, password, url, port)
self.connection = psycopg2.connect(params)
self.print_values_query = False
super(PostgresqlMinqlClient, self).__init__(*args, **kwargs)
# TODO find out how to do the same in hyperdex
# and add it to the interface?
def get_tables():
cur = self.connection.cursor()
cur.execute("""SELECT datname from pg_database""")
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(row[0])
return tables
def create_table(self, table_name, schema):
print('Creating PostgreSQL table %s' % table_name)
attrs = []
for key, value in schema.iteritems():
attr = '"%s" ' % key
if value['type'] == 'string':
attr += 'VARCHAR(500)'
elif value['type'] == 'float':
attr += 'REAL'
elif value['type'] == 'int':
attr += 'INT'
elif value['type'] == 'text':
attr += 'TEXT'
else:
raise NotImplementedError
if value['required']:
attr += ' NOT NULL'
attrs.append(attr)
query = '''
CREATE TABLE "%s" (
id VARCHAR(100) PRIMARY KEY NOT NULL''' % table_name
if attrs:
query += ', \n' + ', \n'.join(attrs)
query += ');'
cur = self.connection.cursor()
print(query)
cur.execute(query)
self.connection.commit()
for key, value in schema.iteritems():
if 'index' in value and value['index']:
query = 'CREATE INDEX "%s_%s_index" ON "%s" ("%s");' % (
table_name, key, table_name, key)
print(query)
cur = self.connection.cursor()
cur.execute(query)
self.connection.commit()
# TODO postgres doesn't drop tables
def _drop_table(self, table_name):
print('postgres drop table', table_name)
query = 'DROP TABLE IF EXISTS "%s"' % table_name
print(query)
cur = self.connection.cursor()
self.connection.set_isolation_level(0)
# cur.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (
# table_name, table_name))
cur.execute(query)
self.connection.commit()
def get_criteria_string(self, criteria):
if criteria:
crit = []
for attr, value in criteria.iteritems():
if type(value) is dict:
# TODO replace 'ge' with '>=' everywhere
if 'ge' in value:
criterion = '%s >= %s' % (attr, str(value['ge']))
if 'le' in value:
criterion = '%s <= %s' % (attr, str(value['le']))
if 'gt' in value:
criterion = '%s > %s' % (attr, str(value['gt']))
if 'lt' in value:
criterion = '%s < %s' % (attr, str(value['lt']))
elif isinstance(value, basestring):
criterion = "%s = '%s'" % (attr, value)
else:
criterion = '%s = %s' % (attr, str(value))
crit.append(criterion)
return ' where ' + ' and '.join(crit)
else:
return ''
def search(self, table_name, criteria={}):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
query = 'SELECT * from "%s"' % table_name
query += self.get_criteria_string(criteria)
print(query)
cur.execute(query)
return cur.fetchall()
def delete(self, table_name, criteria):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
query = 'DELETE FROM "%s"' % table_name
query += self.get_criteria_string(criteria)
print(query)
cur.execute(query)
def update(self, table_name, row):
assert 'id' in row and row['id'], 'The row needs an id field.'
cur = self.connection.cursor()
updates = []
for key, value in row.iteritems():
if key != 'id':
if isinstance(value, basestring):
val = "'%s'" % value
else:
val = str(value)
updates.append('%s = %s' % (key, val) )
prequery = 'UPDATE "%s"' % table_name
query = '%s SET %s' % (
prequery, ', '.join(updates))
postquery = " where id = '%s'" % row['id']
query += postquery
if self.print_values_query:
print(query)
else:
print(prequery, postquery)
cur.execute(query)
self.connection.commit()
return row
def insert(self, table_name, row):
cur = self.connection.cursor()
values = []
for value in row.values():
if isinstance(value, basestring):
values.append( "'%s'" % value )
else:
values.append( str(value) )
query = 'INSERT INTO "%s" (%s)' % (
table_name,
', '.join(row.keys()),
)
print(query)
query = '%s VALUES (%s)' % (
query,
', '.join(values)
)
if self.print_values_query:
print(query)
cur.execute(query)
self.connection.commit()
return row
def _get(self, table_name, id):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
query = 'SELECT * from "%s"' % table_name
query += " where id = '%s'" % id
print(query)
cur.execute(query)
return cur.fetchone()
|
the-stack_0_4506 | import random
from models import model
def create_room(room_type, room_name, dojo):
"""
input : room_type -> string represent type of room_type
room_name -> string represent name of room_name
output : returns -> return Room with name -> room_name
Raises -> TypeError if room_name exists
'Invalid name ' if room_name exists
"""
# remove excess white charcters
room_name_stripped = room_name.strip()
room_type_stripped = room_type.strip()
if len(room_type_stripped) == 0:
raise TypeError
room_type_cleaned = room_type_stripped
if len(room_name_stripped) == 0 or not room_name_stripped.isalnum():
return 'Invalid name'
room_name_cleaned = room_name_stripped
# map room_type to respective data type
datatype = {'office': model.Office, 'livingspace': model.LivingSpace}
if not room_type_cleaned.lower() in datatype:
raise TypeError
if room_name_cleaned in dojo.takken_names:
return 'duplicates'
return datatype[room_type_cleaned.lower()](room_name_cleaned)
def add_person(names, person_type, wants_livingspace='N'):
"""
input: firstname lastname Fellow/Staff [Y]
"""
# validate fields data types
if not isinstance(names, tuple) or not isinstance(person_type, str) or\
not isinstance(wants_livingspace, str):
raise TypeError
# validate person_type
person_type = person_type.lower().strip()
if person_type not in ["fellow", "staff"]:
raise TypeError
# validate name
name1 = names[0].strip().lower()
name2 = names[1].strip().lower()
if not name1.isalnum() or not name2.isalnum():
return "Invalid name"
name = name1 + " " + name2
# validate wants_livingspace
wants_livingspace = wants_livingspace.strip().lower()
if wants_livingspace not in 'yn' and person_type == "fellow":
return "Invalid choice"
choice = True if wants_livingspace == 'y' else False
if person_type == 'staff':
new_person = model.Staff(name)
new_person.office = False
else:
new_person = model.Fellow(name, choice)
new_person.livingspace = False
new_person.office = False
new_person.wants_living = False
if choice:
new_person.wants_living = True
return new_person
def allocate_room(new_person, dojo):
"""
allocates a room to new_person
Returns a dictionary of status messages about success of adding to rooms
"""
status_messages = {'office': None, 'livingspace': None}
if new_person == 'Invalid name':
status_messages['status'] = 'Invalid name'
return status_messages
elif new_person == "Invalid choice":
status_messages['status'] = 'Invalid choice'
return status_messages
elif isinstance(new_person, model.Fellow):
if new_person.wants_living:
status_messages['livingspace'] = allocate_livingspace(new_person,
dojo=dojo)
dojo.add_fellow(new_person)
status_messages['person_type'] = 'fellow'
else:
dojo.add_staff(new_person)
status_messages['person_type'] = 'staff'
status_messages['office'] = allocate_office(new_person, dojo=dojo)
return status_messages
def allocate_office(new_person, dojo, name_office=None):
'''
allocates office to new person_type
Returns name of office if added else None
'''
if not name_office:
name_office = choose_office_random(dojo)
office = dojo.get_office(name_office)
if name_office != "NoRoomException" and not office.is_full():
dojo.add_person_office(name_office, new_person)
new_person.office = True
name_office = office.name
else:
name_office = None
return name_office
def allocate_livingspace(new_person, dojo, name_livingspace=None):
'''
allocates livingspace to new_person
Returns name of living space if added else None
'''
if not name_livingspace:
name_livingspace = choose_living_space_random(dojo)
livingspace = dojo.get_livingspace(name_livingspace)
if name_livingspace == "NoRoomException" or livingspace.is_full():
name_livingspace = None
elif new_person.wants_living:
dojo.add_fellow_living(name_livingspace, new_person)
new_person.livingspace = True
name_livingspace = livingspace.name
else:
name_livingspace = None
return name_livingspace
def choose_office_random(dojo):
"""
choose an office at random
"""
number_of_offices = len(dojo.office)
if number_of_offices > 0:
index = random.randrange(number_of_offices)
else:
return "NoRoomException"
list_offices = list(dojo.office)
return list_offices[index].name
def choose_living_space_random(dojo):
"""
choose a livingspace at random
"""
number_of_livingspace = len(dojo.livingspace)
if number_of_livingspace > 0:
index = random.randrange(number_of_livingspace)
else:
return "NoRoomException"
list_livingspace = list(dojo.livingspace)
return list_livingspace[index].name
class NoRoomException(Exception):
pass
def save_data_text(file_name, data, mode='wt'):
if file_name[len(file_name) - 4:] != '.txt':
file_name = file_name + '.txt'
file_out = open(file_name, mode)
for name in data:
print(name, file=file_out)
file_out.close()
def load_data_text(file_name):
data = []
raw_data = open(file_name, 'rt')
while True:
line = raw_data.readline()
if not line:
break
data.append(line.split())
return data
def deallocate_person(room_type, person, office=None, livingspace=None):
deallocation = None
if room_type == 'O' and office:
deallocation = deallocate_office(person, office)
elif room_type == 'L' and livingspace:
deallocation = deallocate_livingspace(person, livingspace)
elif room_type == 'L' and isinstance(person, model.Staff):
deallocation = 'Invalid Operation'
return deallocation
def get_roomname_type(room_name, dojo):
status_messages = {}
room_name = room_name.strip().lower()
if room_name not in dojo.takken_names:
status_messages['status'] = "Room not found"
else:
office = dojo.get_office(room_name.strip().lower())
livingspace = dojo.get_livingspace(room_name.strip().lower())
# we can only reallocate one room at a time office or livingspace
status_messages['in'] = (office, 'O') if office else (livingspace, 'L')
status_messages['status'] = 'ok'
return status_messages
def deallocate_livingspace(person, room):
if isinstance(person, model.Staff):
return 'Invalid Operation'
if person.is_allocated_living() and person.wants_living:
room.remove_occupant(person)
person.livingspace = False
elif not person.wants_living:
return 'Invalid Operation'
return 'Done'
def deallocate_office(person, room):
room.remove_occupant(person)
person.office = False
return 'Done'
|
the-stack_0_4507 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Type
import pytorch_lightning as pl # type: ignore
from d2go.config import CfgNode, temp_defrost, auto_scale_world_size
from d2go.runner import create_runner
from d2go.runner.callbacks.quantization import (
QuantizationAwareTraining,
)
from d2go.runner.lightning_task import GeneralizedRCNNTask
from d2go.setup import basic_argument_parser
from d2go.utils.misc import dump_trained_model_configs
from detectron2.utils.events import EventStorage
from detectron2.utils.file_io import PathManager
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torch.distributed import get_rank
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("detectron2go.lightning.train_net")
FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}"
@dataclass
class TrainOutput:
output_dir: str
accuracy: Optional[Dict[str, Any]] = None
tensorboard_log_dir: Optional[str] = None
model_configs: Optional[Dict[str, str]] = None
def maybe_override_output_dir(cfg: CfgNode, output_dir: Optional[str]) -> None:
"""Overrides the output directory if `output_dir` is not None. """
if output_dir is not None and output_dir != cfg.OUTPUT_DIR:
cfg.OUTPUT_DIR = output_dir
logger.warning(
f"Override cfg.OUTPUT_DIR ({cfg.OUTPUT_DIR}) to be the same as "
f"output_dir {output_dir}"
)
def _get_trainer_callbacks(cfg: CfgNode) -> List[Callback]:
"""Gets the trainer callbacks based on the given D2Go Config.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
Returns:
A list of configured Callbacks to be used by the Lightning Trainer.
"""
callbacks: List[Callback] = [
LearningRateMonitor(logging_interval="step"),
ModelCheckpoint(
dirpath=cfg.OUTPUT_DIR,
save_last=True,
),
]
if cfg.QUANTIZATION.QAT.ENABLED:
callbacks.append(QuantizationAwareTraining.from_config(cfg))
return callbacks
def _get_accelerator(use_cpu: bool) -> str:
return "ddp_cpu" if use_cpu else "ddp"
def get_trainer_params(cfg: CfgNode, num_machines: int, num_processes: int) -> Dict[str, Any]:
use_cpu = cfg.MODEL.DEVICE.lower() == "cpu"
return {
# training loop is bounded by max steps, use a large max_epochs to make
# sure max_steps is met first
"max_epochs": 10 ** 8,
"max_steps": cfg.SOLVER.MAX_ITER,
"val_check_interval": cfg.TEST.EVAL_PERIOD
if cfg.TEST.EVAL_PERIOD > 0
else cfg.SOLVER.MAX_ITER,
"num_nodes": num_machines,
"gpus": None if use_cpu else num_processes,
"num_processes": num_processes,
"accelerator": _get_accelerator(use_cpu),
"callbacks": _get_trainer_callbacks(cfg),
"logger": TensorBoardLogger(save_dir=cfg.OUTPUT_DIR),
"num_sanity_val_steps": 0,
"progress_bar_refresh_rate": 10,
"terminate_on_nan": True,
"replace_sampler_ddp": False,
}
def do_train(
cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask
) -> Dict[str, str]:
"""Runs the training loop with given trainer and task.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
Returns:
A map of model name to trained model config path.
"""
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT)
trainer.save_checkpoint(final_ckpt) # for validation monitor
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = final_ckpt
model_configs = dump_trained_model_configs(
cfg.OUTPUT_DIR, {"model_final": trained_cfg}
)
return model_configs
def do_test(trainer: pl.Trainer, task: GeneralizedRCNNTask):
"""Runs the evaluation with a pre-trained model.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
"""
with EventStorage() as storage:
task.storage = storage
trainer.test(task)
def main(
cfg: CfgNode,
output_dir: Optional[str] = None,
task_cls: Type[GeneralizedRCNNTask] = GeneralizedRCNNTask,
eval_only: bool = False,
num_machines: int = 1,
num_processes: int = 1,
) -> TrainOutput:
"""Main function for launching a training with lightning trainer
Args:
cfg: D2go config node
num_machines: Number of nodes used for distributed training
num_processes: Number of processes on each node.
eval_only: True if run evaluation only.
"""
auto_scale_world_size(cfg, num_machines * num_processes)
maybe_override_output_dir(cfg, output_dir)
task = task_cls.from_config(cfg, eval_only)
trainer_params = get_trainer_params(cfg, num_machines, num_processes)
last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
if PathManager.exists(last_checkpoint):
# resume training from checkpoint
trainer_params["resume_from_checkpoint"] = last_checkpoint
logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")
trainer = pl.Trainer(**trainer_params)
model_configs = None
if eval_only:
do_test(trainer, task)
else:
model_configs = do_train(cfg, trainer, task)
return TrainOutput(
output_dir=cfg.OUTPUT_DIR,
tensorboard_log_dir=trainer_params["logger"].log_dir,
accuracy=task.eval_res,
model_configs=model_configs,
)
def build_config(
config_file: str,
task_cls: Type[GeneralizedRCNNTask],
opts: Optional[List[str]] = None,
) -> CfgNode:
"""Build config node from config file
Args:
config_file: Path to a D2go config file
output_dir: When given, this will override the OUTPUT_DIR in the config
opts: A list of config overrides. e.g. ["SOLVER.IMS_PER_BATCH", "2"]
"""
cfg = task_cls.get_default_cfg()
cfg.merge_from_file(config_file)
if opts:
cfg.merge_from_list(opts)
return cfg
def argument_parser():
parser = basic_argument_parser(distributed=True, requires_output_dir=False)
parser.add_argument(
"--num-gpus", type=int, default=0, help="number of GPUs per machine"
)
return parser
if __name__ == "__main__":
args = argument_parser().parse_args()
task_cls = create_runner(args.runner) if args.runner else GeneralizedRCNNTask
cfg = build_config(args.config_file, task_cls, args.opts)
ret = main(
cfg,
args.output_dir,
task_cls,
eval_only=False, # eval_only
num_machines=args.num_machines,
num_processes=args.num_processes,
)
if get_rank() == 0:
print(ret)
|
the-stack_0_4509 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.utilities import memory
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
@RunIf(min_gpus=2)
def test_multi_gpu_early_stop_ddp_spawn(tmpdir):
tutils.set_random_main_port()
trainer_options = dict(
default_root_dir=tmpdir,
callbacks=[EarlyStopping(monitor="train_acc")],
max_epochs=50,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
strategy="ddp_spawn",
)
dm = ClassifDataModule()
model = ClassificationModel()
tpipes.run_model_test(trainer_options, model, dm)
@RunIf(min_gpus=2)
def test_multi_gpu_model_ddp_spawn(tmpdir):
tutils.set_random_main_port()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
strategy="ddp_spawn",
enable_progress_bar=False,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model)
# test memory helper functions
memory.get_memory_profile("min_max")
@RunIf(min_gpus=2)
def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.set_random_main_port()
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=1,
limit_train_batches=0.2,
limit_val_batches=0.2,
gpus=[0, 1],
strategy="ddp_spawn",
)
trainer.fit(model, train_dataloaders=model.train_dataloader(), val_dataloaders=model.val_dataloader())
assert trainer.state.finished, "DDP doesn't work with dataloaders passed to fit()."
|
the-stack_0_4510 | import numpy as np
import pandas as pd
from hamcrest import assert_that, has_item
import cifrum as lib
from conftest import decimal_places
from cifrum._settings import _MONTHS_PER_YEAR
__asset_name = 'index/OKID10'
def test__present_in_available_names():
sym_ids = [x.fin_sym_id.format() for x in lib.available_names(namespace='index')]
assert_that(sym_ids, has_item(__asset_name))
def test__have_valid_max_period_range():
okid10 = lib.portfolio_asset(name=__asset_name)
cbr_top10 = lib.information(name='cbr/TOP_rates')
assert okid10.close().start_period == cbr_top10.start_period + _MONTHS_PER_YEAR
assert (cbr_top10.end_period - okid10.close().end_period).n < 2
def test__have_valid_selected_period_range():
start_period = pd.Period('2013-1', freq='M')
end_period = pd.Period('2015-3', freq='M')
okid10 = lib.portfolio_asset(name=__asset_name, start_period=str(start_period), end_period=str(end_period))
assert okid10.close().start_period == start_period
assert okid10.close().end_period == end_period
def test__have_correct_values():
okid10 = lib.portfolio_asset(name=__asset_name, end_period='2018-12')
np.testing.assert_almost_equal(okid10.close()[:5].values,
[100., 100.9854, 101.9356, 102.8515, 103.7328], decimal_places)
np.testing.assert_almost_equal(okid10.close()[-5:].values,
[212.0694, 213.2737, 214.4767, 215.6832, 216.8961], decimal_places)
def test__compute_correctly_in_other_currencies():
okid10_usd = lib.portfolio_asset(name=__asset_name, end_period='2018-12', currency='usd')
okid10_rub = lib.portfolio_asset(name=__asset_name, end_period='2018-12', currency='rub')
okid10_currency_rate = okid10_usd.close() / okid10_rub.close()
vs_rub = lib.portfolio_asset(name='cbr/RUB',
start_period=okid10_currency_rate.start_period,
end_period=okid10_currency_rate.end_period,
currency='usd').close()
np.testing.assert_almost_equal(okid10_currency_rate.values, vs_rub.values, decimal_places)
|
the-stack_0_4516 | #!/usr/bin/env python
from __future__ import print_function
import thread
import socket
import argparse
import sys, time, os, glob, shutil, math, datetime
from tmuxsend import TmuxSend
def run_server(port):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#sock.settimeout(3)
# Bind the socket to the port
server_address = ('', port)
sock.bind(server_address)
sock.listen(1)
print("Speech server started on port %d ..." %port)
print("Speech server commands available [@audio, @audiokill]")
print("Example: echo \"@audio\" | netcat -w 1 localhost %d" %port)
print("TTS command: echo \"TTS[en] hello!\" | netcat -w 1 localhost 9001")
tmux = TmuxSend('bringup', ['audio server','cmd'])
connected = False
dorun = True
while dorun:
if not connected:
print("-- Waiting for connection ...")
while (dorun and not connected):
try:
# Wait for a connection
connection, client_address = sock.accept()
connected = True
print ('-- Connection from %s' %client_address[0])
except KeyboardInterrupt:
print("User interrupt (quit)")
dorun = False
except Exception as e:
print(e)
pass # keep listening
if not dorun:
return
# print("-- Waiting for data...")
data = None
while dorun and connected and data is None:
# receive data
try:
#connection.settimeout(3) # timeout when listening (exit with CTRL+C)
data = connection.recv(320) # blocking
data = data.strip()
except KeyboardInterrupt:
print("User interrupt (quit)")
dorun = False
except socket.timeout:
data = None
print("socket timeout")
if data is not None:
if len(data)==0:
connected = False
else:
print(data)
folder = "~/src/marrtino_apps/audio"
if data=='@audio':
tmux.cmd(0,'cd %s' %folder)
tmux.cmd(0,'python audio_server.py')
elif data=='@audiokill':
tmux.Cc(0)
else:
print('Unknown command %s')
if __name__ == '__main__':
default_port = 9239
parser = argparse.ArgumentParser(description='speech bringup')
parser.add_argument('-server_port', type=int, default=default_port, help='server port')
args = parser.parse_args()
run_server(args.server_port)
|
the-stack_0_4517 | #!/usr/bin/env python
import click
import json
import os
import shutil
import subprocess
import uuid
LOGISTICIAN_ROOT = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.expanduser("~/.logistician/")
def random_id():
return str(uuid.uuid4()).split("-")[0]
def write_to_file(path, contents):
f = open(path, "w")
f.write(contents)
f.close()
def from_template_file(template_file, vars):
f = open(template_file, "r")
template = f.read()
f.close()
return template % vars
def create_config_directory():
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
def load_params(experiment_path):
params_filename = os.path.join(experiment_path, "parameters.json")
g = open(params_filename)
params = json.load(g)
g.close()
return params
def echo_command_string(s):
click.secho(s, fg='green')
def verbose_call(cmd):
echo_command_string(subprocess.list2cmdline(cmd))
subprocess.call(cmd)
def local_docker_command(params):
return params.get("local_docker_command", "docker")
def remote_docker_command(params):
return params.get("remote_docker_command", "docker")
def config():
"""
Interactively create config file
"""
create_config_directory()
docker_username = click.prompt("Please enter your Docker Hub username")
docker_repository = click.prompt("Please enter your preferred Docker repository name", "experiments")
aws_access_key = click.prompt("Please enter your AWS access key (e.g., AKIAIOSFODNN7EXAMPLE)")
aws_secret_key = click.prompt("Please enter your AWS secret key (e.g., wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY)")
configuration = {
"aws_access_key": aws_access_key,
"aws_secret_key": aws_secret_key,
"docker_username": docker_username,
"docker_repository": docker_repository
}
f = open(os.path.join(CONFIG_PATH, "config.json"), "w")
json.dump(configuration, f)
f.close()
def create_ssh_key():
"""
Create and store SSH key
"""
create_config_directory()
private_key_path = os.path.join(CONFIG_PATH, "ssh-key")
if os.path.exists(private_key_path):
click.echo("File already exists at {0}".format(private_key_path))
else:
verbose_call(["ssh-keygen", "-t", "rsa", "-b", "4096", "-f", private_key_path, "-P", ""])
def build(experiment_path):
"""
Build Docker image for experiment
"""
params = load_params(experiment_path)
experiment_name = params["experiment_name"]
click.echo("Building Docker image for {0}".format(experiment_name))
verbose_call([local_docker_command(params), "build", "-t", experiment_name, experiment_path])
click.echo("Docker build done.")
def get_project_path(file_path):
cmd = "cd '{0}' && git rev-parse --show-toplevel".format(file_path)
echo_command_string(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
return p.stdout.read().strip()
ExperimentPathType = click.Path(exists=True, file_okay=False, dir_okay=True, writable=True, readable=True, resolve_path=True)
@click.group()
def cli():
pass
@click.command()
@click.pass_context
def setup(ctx):
"""
Run initial interactive setup for Logistician
"""
click.echo("This is the interactive setup for Logistician.")
config()
create_ssh_key()
click.echo("Configuration done.")
@click.command()
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def sync(experiment_path):
"""
Sync all data from cloud to local machine
"""
# Load IP addresses
machines_filename = os.path.join(experiment_path, "machines.txt")
if not os.path.exists(machines_filename):
click.echo("Machine file {0} does not exist. Can't sync.".format(machines_filename))
return
f = open(machines_filename)
machines = [line.strip().split(", ") for line in f.read().strip().split("\n")]
f.close()
# Load AWS AMI user
params = load_params(experiment_path)
aws_ami_user = params["aws_ami_user"]
# Create data folder if it doesn't exist
verbose_call(["mkdir", "-p", os.path.join(experiment_path, "data/")])
docker_command = remote_docker_command(params)
for (ip, condition) in machines:
remote_address = "{0}@{1}".format(aws_ami_user, ip)
local_path = os.path.join(experiment_path, "data/", condition)
click.echo("Syncing {0} to {1}".format(remote_address, local_path))
# Copy latest Docker logs to remote data directory
verbose_call(["ssh", "-o", "StrictHostKeyChecking no", "-i", "~/.logistician/ssh-key", remote_address,
"sudo bash -c '" + docker_command + " logs `" + docker_command + " ps -aq | head -n 1` > /data/logs/docker.txt'"])
# Retrieve remote data directory
verbose_call(["rsync", "-azvv", "-e", "ssh -i ~/.logistician/ssh-key", "{0}:/data/".format(remote_address), local_path])
click.echo("Syncing done.")
@click.command()
@click.option('--options', '-o', help='Options to pass to experiment script', default='')
@click.option('--data_readonly', help='Data folder to read from (optional)', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=False, readable=True, resolve_path=True), default=None)
@click.option('--clone/--no-clone', help='Clone from remote repo, don\'t use project folder', default=False)
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def run(experiment_path, clone, options, data_readonly):
"""
Run experiment locally
"""
build(experiment_path)
params = load_params(experiment_path)
experiment_name = params["experiment_name"]
click.echo("Running {0} with options '{1}'".format(experiment_name, options))
if clone:
# If we don't mount project volume, it will be cloned
clone_args = []
else:
project_path = get_project_path(experiment_path)
clone_args = ["-v", "{0}:/project".format(project_path)]
if data_readonly:
data_args = ["-v", "{0}:/data:ro".format(data_readonly)]
else:
data_args = []
cmd = [local_docker_command(params), "run"] + clone_args + data_args + ["-e", "OPTIONS={0}".format(options), "-it", experiment_name]
verbose_call(cmd)
click.echo("Experiment done.")
@click.command()
@click.option('--volume/--no-volume', help='Mount project folder as /project volume in Docker', default=True)
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def shell(experiment_path, volume=True):
"""
Open shell in experiment environment
"""
build(experiment_path)
params = load_params(experiment_path)
experiment_name = params["experiment_name"]
docker_command = local_docker_command(params)
click.echo("Opening shell for {0}".format(experiment_name))
if volume:
project_path = get_project_path(experiment_path)
verbose_call([docker_command, "run", "-v", "{0}:/project".format(project_path), "-it",
experiment_name, "bash", "-c", "cd /project && bash"])
else:
verbose_call([docker_command, "run", "-it", experiment_name, "bash"])
click.echo("Shell exited.")
@click.command()
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def deploy(experiment_path):
"""
Run experiment in the cloud
"""
build(experiment_path)
click.echo("Deploying {0} to cloud".format(experiment_path))
params_file = os.path.join(experiment_path, "parameters.json")
config_file = os.path.join(CONFIG_PATH, "config.json")
terraform_aws_path = os.path.join(LOGISTICIAN_ROOT, "terraform/aws")
verbose_call(["terraform", "apply", '-var-file={0}'.format(params_file), '-var-file={0}'.format(config_file), terraform_aws_path])
click.echo("Deployment done.")
@click.command()
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def status(experiment_path):
"""
Show deployment status
"""
verbose_call(["terraform", "show", os.path.join(experiment_path, "terraform.tfstate")])
@click.command()
@click.argument('experiment_path', type=ExperimentPathType, default=lambda: os.getcwd())
def terminate(experiment_path):
"""
Terminate cloud experiment
"""
click.echo("Terminating {0} in cloud".format(experiment_path))
params_file = os.path.join(experiment_path, "parameters.json")
config_file = os.path.join(CONFIG_PATH, "config.json")
terraform_aws_path = os.path.join(LOGISTICIAN_ROOT, "terraform/aws")
verbose_call(["terraform", "destroy", '-var-file={0}'.format(params_file), '-var-file={0}'.format(config_file), terraform_aws_path])
click.echo("Experiment terminated.")
@click.command()
@click.option('--base', help='Path to previous experiment used as base (optional)', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True, resolve_path=True), default=None)
@click.argument('experiment_path', type=click.Path(exists=False), default=lambda: None)
def create(experiment_path, base):
"""
Run interactive setup for a new experiment
"""
if not experiment_path:
experiment_path = click.prompt("Path for new experiment", default=os.path.join(os.getcwd(), random_id()))
if os.path.exists(experiment_path):
click.echo("Experiment path should not exist")
return
click.echo("This script will interactively create a new experiment stored at:")
click.echo(os.path.abspath(experiment_path) + "\n")
# Create folder for new experiment
os.makedirs(experiment_path)
# Get experiment name
dirname = os.path.basename(os.path.dirname(os.path.join(experiment_path, '')))
experiment_name = click.prompt("Globally unique experiment name", default=dirname)
if base:
create_derived_experiment(experiment_path, experiment_name, base)
else:
create_fresh_experiment(experiment_path, experiment_name)
def create_derived_experiment(experiment_path, experiment_name, base):
# Copy over Dockerfile
dockerfile_path = os.path.join(experiment_path, "Dockerfile")
shutil.copyfile(os.path.join(base, "Dockerfile"), dockerfile_path);
# Copy over parameter.json, replacing experiment_name with new one
f = open(os.path.join(base, "parameters.json"))
params = json.load(f)
f.close()
params["experiment_name"] = experiment_name
parameters_path = os.path.join(experiment_path, "parameters.json")
f = open(parameters_path, "w")
json.dump(params, f, indent=2, sort_keys=True)
f.close()
show_experiment_info(experiment_path, dockerfile_path, parameters_path)
def create_fresh_experiment(experiment_path, experiment_name):
git_remote_url = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]).strip()
project_git_url = click.prompt("Remote Git URL", default=git_remote_url)
experiment_cmd = click.prompt("Experiment command (relative to project root)")
settings = {
"experiment_name": experiment_name,
"project_git_url": project_git_url,
"experiment_cmd": experiment_cmd
}
# Create Dockerfile
dockerfile_template_path = os.path.join(LOGISTICIAN_ROOT, "templates/experiment/Dockerfile")
dockerfile_contents = from_template_file(dockerfile_template_path, settings)
dockerfile_path = os.path.join(experiment_path, "Dockerfile")
write_to_file(dockerfile_path, dockerfile_contents)
# Create parameters.json
parameters_template_path = os.path.join(LOGISTICIAN_ROOT, "templates/experiment/parameters.json")
parameters_contents = from_template_file(parameters_template_path, settings)
parameters_path = os.path.join(experiment_path, "parameters.json")
write_to_file(parameters_path, parameters_contents)
show_experiment_info(experiment_path, dockerfile_path, parameters_path)
def show_experiment_info(experiment_path, dockerfile_path, parameters_path):
# Instruct user to edit Dockerfile
click.echo("\nExperiment created.")
click.echo("\nYou can now edit the Dockerfile and parameters:")
click.echo("Dockerfile: {0}".format(dockerfile_path))
click.echo("Parameters: {0}".format(parameters_path))
click.echo("\nOnce done editing, you can run your experiment:")
click.echo("logistician run {0}".format(os.path.relpath(experiment_path)))
cli.add_command(create)
cli.add_command(deploy)
cli.add_command(run)
cli.add_command(setup)
cli.add_command(shell)
cli.add_command(status)
cli.add_command(sync)
cli.add_command(terminate) |
the-stack_0_4519 | """
This script takes a pre-trained Spatial Transformer and applies it to an unaligned dataset to create an aligned and
filtered dataset in an unsupervised fashion. By default, this script will only use the similarity transformation
portion of the Spatial Transformer (rotation + crop) to avoid introducing warping artifacts.
"""
import os
import sys
sys.path.insert(1, os.path.dirname(sys.path[0]))
import torch
import numpy as np
from PIL import Image
from tqdm import tqdm
from prepare_data import create_dataset, border_pad
from models import ComposedSTN
from models.spatial_transformers.warping_heads import SimilarityHead
from applications import base_eval_argparse, load_stn, determine_flips
from applications.flow_scores import filter_dataset
from utils.distributed import setup_distributed, primary, get_rank, all_gatherv, synchronize, get_world_size
from datasets import MultiResolutionDataset
def apply_congealing(args, dataset, stn, stn_full, out_path, device, rank, n_processes, **stn_args):
def prepro(x, from_np=False):
if from_np:
x = np.asarray(x)
return torch.from_numpy(x).float().div_(255.0).add_(-0.5).mul_(2.0).permute(2, 0, 1).unsqueeze_(0).to(device)
total = 0
prefix = chr(ord('a') + rank)
print(f'({rank}) Using prefix {prefix}')
pbar = tqdm if rank == 0 else lambda x: x
indices = torch.arange(rank, len(dataset), n_processes)
one_hot = torch.tensor([[[0, 0, 1]]], dtype=torch.float, device=device)
used_indices = []
for i in pbar(indices):
with torch.no_grad():
x = dataset[i.item()] # (1, C, H, W)
w, h = x.size
size = max(w, h)
x_big = prepro(border_pad(x, size, resize=False, to_pil=False)) # (1, C, size, size)
x_in = prepro(border_pad(x, args.flow_size, to_pil=False)) # (1, C, flow_size, flow_size)
x_in, flip_indices, warp_policy = determine_flips(args, stn_full, None, x_in)
x_big = torch.where(flip_indices.view(-1, 1, 1, 1), x_big.flip(3,), x_big)
image_bounds = torch.tensor([[h, w]], dtype=torch.float, device='cuda')
try:
aligned, M, oob = stn(x_in, return_flow=True, return_out_of_bounds=True, input_img_for_sampling=x_big,
output_resolution=args.output_resolution, image_bounds=image_bounds, **stn_args)
except RuntimeError:
print(f'Rank {rank}: WARNING: Ran out of GPU memory, skipping...')
continue
# The scale of the similarity transform can be extracted from our affine matrix
# by taking the square-root of its determinant:
M = torch.cat([M, one_hot], 1)
scale = torch.det(M).sqrt_()
too_low_res = (scale.item() * min(w, h)) < args.min_effective_resolution
# We don't want to include images that can only be aligned by extrapolating a significant number of pixels
# beyond the image boundary:
if not (too_low_res or oob.item()):
used_indices.append(i)
write_image_batch(aligned, out_path, start_index=total, prefix=prefix)
total += aligned.size(0)
print(f'({rank}) Saved {total} images')
used_indices = torch.stack(used_indices).to(device)
return used_indices
def write_image_batch(images, out_path, start_index=0, prefix=''):
def norm(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min)
norm(images, -1, 1)
ndarr = images.mul(255).add_(0.5).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
for i in range(ndarr.shape[0]):
index = i + start_index
Image.fromarray(ndarr[i]).save(f'{out_path}/{prefix}{index:07}.png')
def align_and_filter_dataset(args, t):
# The aligned + filtered images will be saved directly as pngs to temp_folder below:
temp_folder = f'{args.out}_imagefolder'
if primary():
os.makedirs(temp_folder, exist_ok=True)
os.makedirs(args.out, exist_ok=True)
# Step 1: Apply the STN to every image in the dataset
dataset = MultiResolutionDataset(args.real_data_path, resolution=args.real_size, transform=None)
if args.flow_scores is not None: # Filter the dataset based on flow scores:
dataset = filter_dataset(dataset, args.flow_scores, args.fraction_retained)
if isinstance(t, ComposedSTN):
t_sim = t.stns[0] # Only use the similarity transformation
else:
t_sim = t
assert isinstance(t_sim.warp_head, SimilarityHead), 'Currently only similarity transformations are supported ' \
'for this script'
used_indices = apply_congealing(args, dataset, t_sim, t, temp_folder, 'cuda', get_rank(), get_world_size(),
iters=args.iters, padding_mode=args.padding_mode)
synchronize()
used_indices = all_gatherv(used_indices)
# Step 2: Create an lmdb from temp_folder:
if primary():
create_dataset(args.out, temp_folder, size=args.output_resolution, format='png')
used_indices = used_indices.sort().values.cpu()
print(f'Saving indices of images (size={used_indices.size(0)})')
torch.save(used_indices, f'{args.out}/dataset_indices.pt')
print('Done.')
if __name__ == '__main__':
parser = base_eval_argparse()
# Dataset congealing + creation hyperparameters:
parser.add_argument("--out", type=str, help='Directory to save output aligned dataset', required=True)
parser.add_argument("--output_resolution", type=int, default=256, help='Resolution of output aligned images')
parser.add_argument("--flow_scores", default=None, type=str,
help='Path to pre-computed flow scores to filter dataset (see flow_scores.py for more info)')
parser.add_argument("--fraction_retained", default=1.0, type=float,
help='Fraction of dataset images to retain based on flow scores')
# Also see --fraction_retained in base_eval_argparse()
parser.add_argument("--min_effective_resolution", type=int, default=192,
help='Some images will have small objects that the STN successfully aligns. But, you may not '
'want these aligned images in your dataset because the STN will have produced a large '
'zoom that yields a low resolution image when resized to output_resolution. Any aligned '
'image with size less than min_effective_resolution will be excluded from the output '
'dataset.')
args = parser.parse_args()
assert args.num_heads == 1, 'Clustering not currently supported for congeal_dataset.py'
args.distributed = setup_distributed(args.local_rank)
t_ema = load_stn(args)
align_and_filter_dataset(args, t_ema)
|
the-stack_0_4521 | from glob import glob
import pandas as pd
import process_trial
import json, os
from tqdm import tqdm
print(os.getcwd())
print('Process testset')
for test_group in tqdm(glob('data/raw/*/metrics_*.csv')):
try:
platform = test_group.split('/')[2]
df = pd.read_csv(test_group)
df['init_time'] = pd.to_datetime(df['init_time'])
df.reset_index(inplace=True)
for index, row in df.iterrows():
row['times'] = json.loads(row['times'])
hw_log = test_group.replace('metrics_', 'hardware_log_')
process_trial.compute(row, hw_log, index, platform)
except:
print(f'ERROR: {test_group}')
print('Process testset completed') |
the-stack_0_4523 | #!/usr/bin/env python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import argparse
import os
import platform
import shutil
import subprocess
import sys
import time
from vmstat import capture_sample
from vmstat import plot_output
from vmstat import print_output_to_file
def main():
args = parse_args()
ret = os.system(f"cd ../examples && b2 release {args.toolset} stage_client_test stage_connection_tester")
save_dir = args.directory
print(f"save dir: {save_dir}")
if ret != 0:
print('ERROR: build failed: %d' % ret)
sys.exit(1)
rm_file_or_dir(".ses_state")
rm_file_or_dir(".resume")
if not os.path.exists('checking_benchmark.torrent'):
ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t checking_benchmark.torrent')
if ret != 0:
print('ERROR: connection_tester failed: %d' % ret)
sys.exit(1)
if not os.path.exists(f"{save_dir}/checking_benchmark"):
cmd_line = f'../examples/connection_tester gen-data -t checking_benchmark.torrent -P {save_dir}'
print(cmd_line)
ret = os.system(cmd_line)
if ret != 0:
print('ERROR: connection_tester failed: %d' % ret)
sys.exit(1)
for threads in [1, 2, 4, 8, 16, 32, 64]:
print("drop caches now. e.g. \"echo 1 | sudo tee /proc/sys/vm/drop_caches\"")
input("Press Enter to continue...")
run_test(f"{threads}", f"--hashing_threads={threads}", save_dir)
def run_test(name, client_arg, save_dir: str):
output_dir = 'logs_checking_%s' % name
timing_path = os.path.join(output_dir, 'timing.txt')
if os.path.exists(timing_path):
print('file "{path}" exists, skipping test "{name}"'.format(path=timing_path, name=name))
return
rm_file_or_dir(output_dir)
try:
os.mkdir(output_dir)
except Exception:
pass
rm_file_or_dir(f"{save_dir}/.resume")
client_cmd = ('../examples/client_test checking_benchmark.torrent '
'--enable_dht=0 --enable_lsd=0 --enable_upnp=0 --enable_natpmp=0 '
f'-1 {client_arg} -s {save_dir} -f {output_dir}/events.log --alert_mask=all')
client_out = open('%s/client.out' % output_dir, 'w+')
print('client_cmd: "{cmd}"'.format(cmd=client_cmd))
c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE)
start = time.monotonic()
if platform.system() == "Linux":
out = {}
while c.returncode is None:
capture_sample(c.pid, start, out)
time.sleep(0.1)
c.poll()
stats_filename = f"{output_dir}/memory_stats.log"
keys = print_output_to_file(out, stats_filename)
plot_output(stats_filename, keys)
else:
c.wait()
client_out.close()
start_time = 0
end_time = 0
for l in open('%s/events.log' % output_dir, 'r'):
if 'checking_benchmark: start_checking, m_checking_piece: ' in l \
and start_time == 0:
start_time = int(l.split(' ')[0][1:-1])
if 'state changed to: finished' in l \
and start_time != 0:
end_time = int(l.split(' ')[0][1:-1])
print('%s: %d' % (name, end_time - start_time))
with open('%s/timing.txt' % output_dir, 'w+') as f:
f.write('%s: %d\n' % (name, end_time - start_time))
def rm_file_or_dir(path):
""" Attempt to remove file or directory at path
"""
try:
shutil.rmtree(path)
except Exception:
pass
try:
os.remove(path)
except Exception:
pass
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--toolset', default="")
p.add_argument('--directory', default=".")
return p.parse_args()
if __name__ == '__main__':
main()
|
the-stack_0_4525 | import json
from contextlib import contextmanager
from datetime import datetime, timedelta
from xml.sax.saxutils import unescape
from mock import patch
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.sharedmodels import CommCareCaseIndex
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.tzmigration.timezonemigration import MISSING
from corehq.form_processor.backends.couch.dbaccessors import CaseAccessorCouch
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.form_processor.models import CommCareCaseIndexSQL
from corehq.form_processor.utils.general import (
clear_local_domain_sql_backend_override,
)
from corehq.util.dates import iso_string_to_datetime
from corehq.util.test_utils import capture_log_output
from .test_migration import BaseMigrationTestCase, Diff, IGNORE, make_test_form
from .. import casediff
from .. import casedifftool as mod
from ..diffrule import ANY
from ..statedb import open_state_db
class TestCouchSqlDiff(BaseMigrationTestCase):
def test_diff(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
clear_local_domain_sql_backend_override(self.domain_name)
with self.augmented_couch_case("case-1") as case:
case.age = '35'
case.save()
self.do_case_diffs()
self.compare_diffs([
Diff('case-1', 'diff', ['age'], old='35', new='27'),
])
self.do_migration(forms="missing", case_diff="patch")
def test_diff_specific_case(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
clear_local_domain_sql_backend_override(self.domain_name)
with self.augmented_couch_case("case-1") as case:
case.age = '35'
case.save()
self.do_case_diffs(cases="case-1")
self.compare_diffs([
Diff('case-1', 'diff', ['age'], old='35', new='27'),
])
def test_pending_diff(self):
def diff_none(case_ids, log_cases=None):
return casediff.DiffData([])
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff='none')
clear_local_domain_sql_backend_override(self.domain_name)
with self.augmented_couch_case("case-1") as case:
case.age = '35'
case.save()
with patch("corehq.apps.couch_sql_migration.casedifftool.diff_cases", diff_none):
result = self.do_case_diffs()
self.assertEqual(result, mod.PENDING_WARNING)
self.do_case_diffs(cases="pending")
self.compare_diffs([
Diff('case-1', 'diff', ['age'], old='35', new='27'),
])
def test_live_diff(self):
# do not diff case modified since most recent case created in SQL
self.submit_form(make_test_form("form-1", case_id="case-1"), timedelta(minutes=-90))
self.submit_form(make_test_form("form-2", case_id="case-1", age=35))
self.do_migration(live=True, chunk_size=1, case_diff="none")
self.assert_backend("sql")
case = self._get_case("case-1")
self.assertEqual(case.dynamic_case_properties()["age"], '27')
self.do_case_diffs()
self.compare_diffs(ignore_fail=True)
def test_failed_diff(self):
self.pool_mock.stop()
self.addCleanup(self.pool_mock.start)
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
# patch init_worker to make subprocesses use the same database
# connections as this process (which is operating in a transaction)
init_worker_path = "corehq.apps.couch_sql_migration.casedifftool.init_worker"
with patch(init_worker_path, mod.global_diff_state), \
patch("corehq.apps.couch_sql_migration.casediff.diff_case") as mock, \
capture_log_output("corehq.apps.couch_sql_migration.parallel") as log:
mock.side_effect = Exception("diff failed!")
self.do_case_diffs()
logs = log.get_output()
self.assertIn("error processing item in worker", logs)
self.assertIn("Exception: diff failed!", logs)
self.compare_diffs()
db = open_state_db(self.domain_name, self.state_dir)
self.assertEqual(list(db.iter_undiffed_case_ids()), ["case-1"])
def test_reconcile_transaction_order(self):
from ..rebuildcase import SortTransactionsRebuild
form1 = make_test_form("form-1", age="33", date="2016-08-04T18:25:56.656Z")
form2 = make_test_form("form-2", age="32", date="2015-08-04T18:25:56.656Z")
self.submit_form(form1)
self.submit_form(form2)
self.assertEqual(self._get_case("test-case").age, "33")
with self.diff_without_rebuild():
self.do_migration()
self.compare_diffs([
Diff('test-case', 'diff', ['age'], old='33', new='32'),
])
clear_local_domain_sql_backend_override(self.domain_name)
self.do_case_diffs(cases="with-diffs")
sql_case = self._get_case("test-case")
self.assertEqual(sql_case.dynamic_case_properties()["age"], "33")
self.compare_diffs()
details = sql_case.transactions[-1].details
self.assertEqual(details["reason"], SortTransactionsRebuild._REASON)
server_dates = details["original_server_dates"]
self.assertEqual(len(server_dates), 1, server_dates)
def test_couch_with_missing_forms(self):
form1 = make_test_form("form-1", age="33", date="2016-08-04T18:25:56.656Z")
form2 = make_test_form("form-2", age="32", date="2015-08-04T18:25:56.656Z")
self.submit_form(THING_FORM)
self.submit_form(form1)
self.submit_form(form2)
case = self._get_case("test-case")
self.assertEqual(case.age, "33")
self.assertEqual(case.thing, "1")
del case.thing
case.actions = [a for a in case.actions if a.form_id != "thing-form"]
case.save()
with self.assertRaises(AttributeError):
self._get_case("test-case").thing
with self.diff_without_rebuild():
self.do_migration()
self.compare_diffs([
Diff('test-case', 'diff', ['age'], old='33', new='32'),
Diff('test-case', 'missing', ['thing'], old=MISSING, new='1'),
])
clear_local_domain_sql_backend_override(self.domain_name)
self.do_case_diffs(cases="with-diffs")
sql_case = self._get_case("test-case")
self.assertEqual(sql_case.dynamic_case_properties()["age"], "33")
self.compare_diffs(changes=[
Diff('test-case', 'missing', ['thing'], old=MISSING, new='1', reason='rebuild case'),
])
self.do_migration(patch=True, diffs=[])
def test_couch_missing_create_case(self):
with self.skip_case_and_ledger_updates("thing-form"):
self.submit_form(THING_FORM)
self.submit_form(UPDATE_FORM)
case = self._get_case("test-case")
# simulate null properties seen in the wild
object.__setattr__(case, "name", None)
object.__setattr__(case, "type", None)
case.save()
with self.diff_without_rebuild():
self.do_migration()
self.compare_diffs([
Diff('test-case', 'missing', ['thing'], old=MISSING, new='1'),
Diff('test-case', 'set_mismatch', ['xform_ids', '[*]'], old='', new='thing-form'),
Diff('test-case', 'type', ['name'], old=None, new='Thing'),
Diff('test-case', 'type', ['type'], old=None, new='testing'),
])
self.do_migration(patch=True, diffs=[])
case = self._get_case("test-case")
self.assertEqual(case.name, "")
self.assertEqual(case.type, "")
self.assertEqual(case.dynamic_case_properties()["thing"], "")
self.assertEqual(case.xform_ids, ['thing-form', 'update-form', ANY])
def test_case_with_deleted_form(self):
# form state=normal / deleted -> missing case
one = self.submit_form(make_test_form("one", age=27))
FormAccessors(self.domain_name).soft_delete_forms(
[one.form_id], datetime.utcnow(), 'test-deletion')
self.do_migration()
self.compare_diffs(changes=[
Diff('test-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"),
])
def test_diff_case_with_wrong_domain(self):
wrong_domain = create_domain("wrong")
self.addCleanup(wrong_domain.delete)
self.submit_form(make_test_form("form-1"), domain="wrong")
self.do_migration(case_diff="none", domain="wrong")
self.do_migration(case_diff="none")
clear_local_domain_sql_backend_override(self.domain_name)
with capture_log_output("corehq.apps.couch_sql_migration") as log, \
self.augmented_couch_case("test-case") as case:
# modify case so it would have a diff (if it were diffed)
case.age = '35'
case.save()
# try to diff case in wrong domain
self.do_case_diffs(cases="test-case")
self.compare_diffs([
Diff('test-case', 'diff', ['domain'], old='wrong', new=self.domain_name),
])
logs = log.get_output()
self.assertIn("couch case test-case has wrong domain: wrong", logs)
def test_ledger_dup_transaction_diff(self):
product_id = self.create_form_with_duplicate_stock_transaction()
self.do_migration(case_diff='none')
self.compare_diffs(ignore_fail=True)
clear_local_domain_sql_backend_override(self.domain_name)
self.do_case_diffs()
self.compare_diffs(changes=[Diff(
f"test-case/things/{product_id}",
reason="duplicate stock transaction",
type="diff",
path=["balance"],
old=2,
new=1,
kind="stock state",
)])
def test_patch_known_properties(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
clear_local_domain_sql_backend_override(self.domain_name)
open_date = datetime(2010, 9, 8)
with self.augmented_couch_case("case-1") as case:
case.name = "Zena"
case.type = "old-type"
case.user_id = "old-user"
case.owner_id = "old-owner"
case.opened_on = open_date
case.save()
self.do_case_diffs()
self.compare_diffs([
Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'),
Diff('case-1', 'diff', ['owner_id'], old='old-owner', new='3fae4ea4af440efaa53441b5'),
Diff('case-1', 'diff', ['type'], old='old-type', new='testing'),
Diff('case-1', 'diff', ['user_id'], old='old-user', new='3fae4ea4af440efaa53441b5'),
])
self.do_migration(forms="missing", case_diff="patch")
self.assertEqual(self._get_case("case-1").opened_on, open_date)
def test_unpatchable_properties(self):
date1 = "2018-07-13T11:20:11.381000Z"
self.submit_form(make_test_form("form-1", case_id="case-1"))
case = self._get_case("case-1")
user = case.user_id
case.closed = True
case.closed_by = "someone"
case.closed_on = iso_string_to_datetime(date1)
case.external_id = "ext"
case.name = "Zena"
case.opened_by = "someone"
case.server_modified_on = iso_string_to_datetime(date1)
case.user_id = "person"
case.save()
self.do_migration(diffs=[
Diff('case-1', 'diff', ['closed'], old=True, new=False),
Diff('case-1', 'diff', ['closed_by'], old='someone', new=''),
Diff('case-1', 'diff', ['external_id'], old='ext', new=''),
Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'),
Diff('case-1', 'diff', ['opened_by'], old='someone', new=user),
Diff('case-1', 'diff', ['user_id'], old='person', new=user),
Diff('case-1', 'type', ['closed_on'], old=date1, new=None),
])
self.do_migration(patch=True, diffs=[])
close2 = iso_string_to_datetime("2015-08-04T18:25:56.656Z")
case = self._get_case("case-1")
self.assertEqual(case.closed, True) # patched
self.assertEqual(case.closed_by, "person") # unpatched
self.assertEqual(case.closed_on, close2) # unpatched
self.assertEqual(case.external_id, 'ext') # patched, not sure how/why
self.assertEqual(case.name, "Zena") # patched
self.assertEqual(case.opened_by, user) # unpatched
self.assertEqual(case.user_id, "person") # patched
self.assertNotEqual(case.server_modified_on,
iso_string_to_datetime(date1)) # unpatched
form = self._get_form(case.xform_ids[-1])
diffs = json.loads(unescape(form.form_data["diff"]))
self.assertEqual(diffs, {
"case_id": "case-1",
"diffs": [
{"path": ["closed"], "old": True, "new": False, "patch": True},
{"path": ["closed_by"], "old": "someone", "new": "", "patch": False},
{"path": ["closed_on"], "old": date1, "new": None, "patch": False},
{"path": ["external_id"], "old": "ext", "new": "", "patch": False},
{"path": ["name"], "old": "Zena", "new": "Xeenax", "patch": True},
{"path": ["opened_by"], "old": "someone", "new": user, "patch": False},
{"path": ["user_id"], "old": "person", "new": user, "patch": True},
],
})
def test_patch_closed_case(self):
from casexml.apps.case.cleanup import close_case
self.submit_form(make_test_form("form-1", case_id="case-1"))
close_case("case-1", self.domain_name, "system", "test")
self.do_migration(case_diff="none")
clear_local_domain_sql_backend_override(self.domain_name)
with self.augmented_couch_case("case-1") as case:
case.name = "Zena"
case.save()
self.do_case_diffs()
self.compare_diffs([
Diff('case-1', 'diff', ['name'], old='Zena', new='Xeenax'),
])
self.do_migration(forms="missing", case_diff="patch")
self.assertEqual(self._get_case("case-1").closed, True)
self.assert_patched_cases(["case-1"])
def test_patch_case_needing_sql_rebuild(self):
with self.skip_case_and_ledger_updates("form-1"):
self.submit_form(make_test_form("form-1", age=30))
self.submit_form(make_test_form("form-2"))
with self.diff_without_rebuild():
self.do_migration()
with patch.object(mod.CaseDiffTool, "diff_cases"):
self.do_case_patch()
self.compare_diffs([
Diff('test-case', 'set_mismatch', ['xform_ids', '[*]'], old='', new='form-1'),
])
case = self._get_case("test-case")
case.case_json["age"] = "30" # diff -> reubild SQL case
case.save()
self.do_case_diffs("pending")
self.compare_diffs([])
self.assert_patched_cases(["test-case"])
def test_cannot_patch_case_missing_in_couch(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
CommCareCase.get_db().delete_doc("case-1")
self.do_migration(forms="missing", case_diff="patch", diffs=[
Diff('case-1', 'missing', ['*'], old=MISSING, new='present'),
])
self.assert_patched_cases()
def test_convert_error_form_for_case_missing_in_couch(self):
def find_forms(case_id):
return ["form-1"]
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
CommCareCase.get_db().delete_doc("case-1")
clear_local_domain_sql_backend_override(self.domain_name)
form = self._get_form("form-1")
form.problem = "something went wrong"
form.save()
self.do_case_diffs("pending")
self.compare_diffs([
Diff('case-1', 'missing', ['*'], old=MISSING, new='present'),
])
with patch.object(casediff, "find_form_ids_updating_case", find_forms):
self.do_migration(forms="missing", diffs=[])
def test_patch_case_closed_in_couch_not_sql(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
with self.augmented_couch_case("case-1") as case:
case.closed = True
case.closed_by = "system"
case.closed_on = datetime(2010, 9, 8, 7, 6, 5)
case.user_id = "system"
case.save()
self.do_case_diffs()
self.compare_diffs([
Diff('case-1', 'diff', ['closed'], old=True, new=False),
Diff('case-1', 'diff', ['user_id'], old='system', new='3fae4ea4af440efaa53441b5'),
Diff('case-1', 'type', ['closed_by'], old='system', new=None),
Diff('case-1', 'type', ['closed_on'], old='2010-09-08T07:06:05.000000Z', new=None),
])
self.do_case_patch()
self.compare_diffs()
self.assert_patched_cases(["case-1"])
def test_patch_case_index(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
index = {
"doc_type": "CommCareCaseIndex",
"identifier": "parent",
"referenced_type": "household",
"referenced_id": "a53346d5",
"relationship": "child",
}
with self.augmented_couch_case("case-1") as case:
case.indices = [CommCareCaseIndex.wrap(index)]
case.save()
self.do_case_diffs()
self.compare_diffs([
Diff('case-1', 'missing', ['indices', '[*]'], old=index, new=MISSING),
])
self.do_case_patch()
self.compare_diffs()
self.assert_patched_cases(["case-1"])
def test_patch_missing_case_index(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
self.do_migration(case_diff="none")
CommCareCaseIndexSQL(
domain=self.domain_name,
case_id="case-1",
identifier="parent",
referenced_id="a53346d5",
referenced_type="household",
relationship_id=CommCareCaseIndexSQL.CHILD,
).save()
with self.diff_without_rebuild():
self.do_case_diffs()
index = {
"case_id": "case-1",
"identifier": "parent",
"referenced_id": "a53346d5",
"referenced_type": "household",
"relationship": "child",
}
self.compare_diffs([
Diff('case-1', 'missing', ['indices', '[*]'], old=MISSING, new=index),
])
with self.diff_without_rebuild():
self.do_case_patch()
self.compare_diffs()
self.assert_patched_cases(["case-1"])
def test_patch_missing_case_with_index(self):
self.submit_form(make_test_form("form-1", case_id="case-1"))
case = CaseAccessorCouch.get_case("case-1")
case.indices = [CommCareCaseIndex.wrap({
"doc_type": "CommCareCaseIndex",
"identifier": "parent",
"referenced_type": "household",
"referenced_id": "a53346d5",
"relationship": "child",
})]
case.save()
FormAccessors(self.domain_name).soft_delete_forms(
['form-1'], datetime.utcnow(), 'test-deletion')
self.do_migration(diffs=IGNORE)
self.compare_diffs(changes=[
Diff('case-1', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"),
])
self.do_case_patch()
self.compare_diffs()
self.assert_patched_cases(["case-1"])
def test_patch_cases_with_diffs(self):
self.do_migration_with_diffs_and_changes()
self.do_case_patch(cases="with-diffs")
self.assert_patched_cases(["diff-case"])
self.compare_diffs(changes=[
Diff('change-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"),
])
def test_patch_cases_with_changes(self):
self.do_migration_with_diffs_and_changes()
self.do_case_patch(cases="with-changes")
self.assert_patched_cases(["change-case"])
self.compare_diffs([
Diff('diff-case', 'diff', ['age'], old='30', new='27'),
Diff('diff-case', 'set_mismatch', ['xform_ids', '[*]'], old='one', new=''),
])
def do_migration_with_diffs_and_changes(self):
self.submit_form(make_test_form("zero", case_id="diff-case", age=27))
one = self.submit_form(make_test_form("one", case_id="diff-case", age=30))
one.initial_processing_complete = False
one.save()
two = self.submit_form(make_test_form("two", case_id="change-case", age=27))
FormAccessors(self.domain_name).soft_delete_forms(
[two.form_id], datetime.utcnow(), 'test-deletion')
self.do_migration(diffs=IGNORE)
self.compare_diffs(diffs=[
Diff('diff-case', 'diff', ['age'], old='30', new='27'),
Diff('diff-case', 'set_mismatch', ['xform_ids', '[*]'], old='one', new=''),
], changes=[
Diff('change-case', 'missing', ['*'], old='*', new=MISSING, reason="deleted forms"),
])
def create_form_with_duplicate_stock_transaction(self):
from corehq.apps.commtrack.helpers import make_product
from corehq.apps.commtrack.processing import process_stock
thing1 = make_product(self.domain_name, 'thing-1', 'thing-1')
self.submit_form(LEDGER_FORM.replace("thing-1", thing1._id))
stock_result = process_stock([self._get_form("ledger-form")])
stock_result.populate_models()
for model in stock_result.models_to_save:
model.save()
return thing1._id
def do_migration(self, *args, **kw):
if kw.get("case_diff") != "patch":
kw.setdefault("diffs", IGNORE)
return super().do_migration(*args, **kw)
def do_case_diffs(self, cases=None, stop=False):
self.migration_success = True # clear migration failure on diff cases
migrator = mod.get_migrator(self.domain_name, self.state_dir)
return mod.do_case_diffs(migrator, cases, stop=stop, batch_size=100)
def do_case_patch(self, cases=None, stop=False):
self.migration_success = True # clear migration failure on diff cases
migrator = mod.get_migrator(self.domain_name, self.state_dir)
return mod.do_case_patch(migrator, cases, stop=stop, batch_size=100)
@contextmanager
def augmented_couch_case(self, case_id):
case = CaseAccessorCouch.get_case(case_id)
with self.diff_without_rebuild():
yield case
def assert_patched_cases(self, case_ids=None):
statedb = open_state_db(self.domain_name, self.state_dir)
self.assertEqual(list(statedb.iter_patched_case_ids()), case_ids or [])
self.assertFalse(list(statedb.iter_undiffed_case_ids()))
THING_FORM = """
<?xml version="1.0" ?>
<data
name="Thing"
uiVersion="1"
version="11"
xmlns="http://openrosa.org/formdesigner/thing-form"
xmlns:jrm="http://dev.commcarehq.org/jr/xforms"
>
<thing>1</thing>
<n0:case
case_id="test-case"
date_modified="2014-08-04T18:25:56.656Z"
user_id="a362027f228d"
xmlns:n0="http://commcarehq.org/case/transaction/v2"
>
<n0:create>
<n0:case_name>Thing</n0:case_name>
<n0:owner_id>a362027f228d</n0:owner_id>
<n0:case_type>testing</n0:case_type>
</n0:create>
<n0:update>
<n0:thing>1</n0:thing>
</n0:update>
</n0:case>
<n1:meta xmlns:n1="http://openrosa.org/jr/xforms">
<n1:deviceID>cloudcare</n1:deviceID>
<n1:timeStart>2014-07-13T11:20:11.381Z</n1:timeStart>
<n1:timeEnd>2014-08-04T18:25:56.656Z</n1:timeEnd>
<n1:username>thing</n1:username>
<n1:userID>a362027f228d</n1:userID>
<n1:instanceID>thing-form</n1:instanceID>
<n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion>
</n1:meta>
</data>
""".strip()
UPDATE_FORM = """
<?xml version="1.0" ?>
<data
name="Update"
uiVersion="1"
version="11"
xmlns="http://openrosa.org/formdesigner/update-form"
xmlns:jrm="http://dev.commcarehq.org/jr/xforms"
>
<age>27</age>
<n0:case
case_id="test-case"
date_modified="2015-08-04T18:25:56.656Z"
user_id="3fae4ea4af440efaa53441b5"
xmlns:n0="http://commcarehq.org/case/transaction/v2"
>
<n0:update>
<n0:age>27</n0:age>
</n0:update>
</n0:case>
<n1:meta xmlns:n1="http://openrosa.org/jr/xforms">
<n1:deviceID>cloudcare</n1:deviceID>
<n1:timeStart>2015-07-13T11:20:11.381Z</n1:timeStart>
<n1:timeEnd>2015-08-04T18:25:56.656Z</n1:timeEnd>
<n1:username>jeremy</n1:username>
<n1:userID>3fae4ea4af440efaa53441b5</n1:userID>
<n1:instanceID>update-form</n1:instanceID>
<n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion>
</n1:meta>
</data>
""".strip()
LEDGER_FORM = """
<?xml version="1.0" ?>
<data
name="Ledger"
uiVersion="1"
version="11"
xmlns="http://openrosa.org/formdesigner/ledger-form"
xmlns:jrm="http://dev.commcarehq.org/jr/xforms"
>
<thing>1</thing>
<n2:transfer
date="2014-08-04"
dest="test-case"
section-id="things"
type="write_things_to_ledger"
xmlns:n2="http://commcarehq.org/ledger/v1"
>
<n2:entry id="thing-1" quantity="1"/>
</n2:transfer>
<n0:case
case_id="test-case"
date_modified="2014-08-04T18:25:56.656Z"
user_id="a362027f228d"
xmlns:n0="http://commcarehq.org/case/transaction/v2"
>
<n0:create>
<n0:case_name>Ledger</n0:case_name>
<n0:owner_id>a362027f228d</n0:owner_id>
<n0:case_type>testing</n0:case_type>
</n0:create>
<n0:update>
<n0:thing>1</n0:thing>
</n0:update>
</n0:case>
<n1:meta xmlns:n1="http://openrosa.org/jr/xforms">
<n1:deviceID>cloudcare</n1:deviceID>
<n1:timeStart>2014-07-13T11:20:11.381Z</n1:timeStart>
<n1:timeEnd>2014-08-04T18:25:56.656Z</n1:timeEnd>
<n1:username>thing</n1:username>
<n1:userID>a362027f228d</n1:userID>
<n1:instanceID>ledger-form</n1:instanceID>
<n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion>
</n1:meta>
</data>
""".strip()
|
the-stack_0_4528 | import http.server
class MyHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "good":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"A good request")
return
self.send_response(400)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"A bad request")
http.server.HTTPServer(('', 8000), MyHandler).serve_forever()
|
the-stack_0_4531 | import argparse
import logging
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdRun(CmdBase):
def run(self):
if not any(
[
self.args.deps,
self.args.outs,
self.args.outs_no_cache,
self.args.metrics,
self.args.metrics_no_cache,
self.args.plots,
self.args.plots_no_cache,
self.args.outs_persist,
self.args.outs_persist_no_cache,
self.args.params,
self.args.command,
]
): # pragma: no cover
logger.error(
"too few arguments. Specify at least one: `-d`, `-o`, `-O`, "
"`-m`, `-M`, `-p`, `--plots`, `--plots-no-cache`, "
"`--outs-persist`, `--outs-persist-no-cache`, `command`."
)
return 1
try:
self.repo.run(
cmd=self._parsed_cmd(),
outs=self.args.outs,
outs_no_cache=self.args.outs_no_cache,
metrics=self.args.metrics,
metrics_no_cache=self.args.metrics_no_cache,
plots=self.args.plots,
plots_no_cache=self.args.plots_no_cache,
deps=self.args.deps,
params=self.args.params,
fname=self.args.file,
wdir=self.args.wdir,
no_exec=self.args.no_exec,
force=self.args.force,
run_cache=not self.args.no_run_cache,
no_commit=self.args.no_commit,
outs_persist=self.args.outs_persist,
outs_persist_no_cache=self.args.outs_persist_no_cache,
always_changed=self.args.always_changed,
name=self.args.name,
single_stage=self.args.single_stage,
external=self.args.external,
)
except DvcException:
logger.exception("")
return 1
return 0
def _parsed_cmd(self):
"""
We need to take into account two cases:
- ['python code.py foo bar']: Used mainly with dvc as a library
- ['echo', 'foo bar']: List of arguments received from the CLI
The second case would need quoting, as it was passed through:
dvc run echo "foo bar"
"""
if len(self.args.command) < 2:
return " ".join(self.args.command)
return " ".join(self._quote_argument(arg) for arg in self.args.command)
def _quote_argument(self, argument):
if " " not in argument or '"' in argument:
return argument
return f'"{argument}"'
def add_parser(subparsers, parent_parser):
RUN_HELP = "Generate a stage file from a command and execute the command."
run_parser = subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(RUN_HELP, "run"),
help=RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
run_parser.add_argument(
"-d",
"--deps",
action="append",
default=[],
help="Declare dependencies for reproducible cmd.",
metavar="<path>",
)
run_parser.add_argument(
"-n", "--name", help="Stage name.",
)
run_parser.add_argument(
"-o",
"--outs",
action="append",
default=[],
help="Declare output file or directory.",
metavar="<filename>",
)
run_parser.add_argument(
"-O",
"--outs-no-cache",
action="append",
default=[],
help="Declare output file or directory "
"(do not put into DVC cache).",
metavar="<filename>",
)
run_parser.add_argument(
"-p",
"--params",
action="append",
default=[],
help="Declare parameter to use as additional dependency.",
metavar="[<filename>:]<params_list>",
)
run_parser.add_argument(
"-m",
"--metrics",
action="append",
default=[],
help="Declare output metric file.",
metavar="<path>",
)
run_parser.add_argument(
"-M",
"--metrics-no-cache",
action="append",
default=[],
help="Declare output metric file (do not put into DVC cache).",
metavar="<path>",
)
run_parser.add_argument(
"--plots",
action="append",
default=[],
help="Declare output plot file.",
metavar="<path>",
)
run_parser.add_argument(
"--plots-no-cache",
action="append",
default=[],
help="Declare output plot file (do not put into DVC cache).",
metavar="<path>",
)
run_parser.add_argument(
"--file",
help="Specify name of the DVC-file this command will generate.",
metavar="<filename>",
)
run_parser.add_argument(
"-w",
"--wdir",
help="Directory within your repo to run your command in.",
metavar="<path>",
)
run_parser.add_argument(
"--no-exec",
action="store_true",
default=False,
help="Only create stage file without actually running it.",
)
run_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Overwrite existing stage",
)
run_parser.add_argument(
"--no-run-cache",
action="store_true",
default=False,
help=(
"Execute the command even if this stage has already been run "
"with the same command/dependencies/outputs/etc before."
),
)
run_parser.add_argument(
"--no-commit",
action="store_true",
default=False,
help="Don't put files/directories into cache.",
)
run_parser.add_argument(
"--outs-persist",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro.",
metavar="<filename>",
)
run_parser.add_argument(
"--outs-persist-no-cache",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro (do not put into DVC cache).",
metavar="<filename>",
)
run_parser.add_argument(
"--always-changed",
action="store_true",
default=False,
help="Always consider this DVC-file as changed.",
)
run_parser.add_argument(
"--single-stage",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
run_parser.add_argument(
"--external",
action="store_true",
default=False,
help="Allow outputs that are outside of the DVC repository.",
)
run_parser.add_argument(
"command", nargs=argparse.REMAINDER, help="Command to execute."
)
run_parser.set_defaults(func=CmdRun)
|
the-stack_0_4532 | from functools import lru_cache as memoized
import os
from os import path
import sys
import yaml
import geopandas as gpd
from mapshader.colors import colors
from mapshader.io import load_raster
from mapshader.io import load_vector
from mapshader.transforms import get_transform_by_name
import spatialpandas
class MapSource(object):
def __init__(self,
name=None,
description=None,
filepath=None,
legend=None,
config_path=None,
data=None,
geometry_type=None,
key=None,
text=None,
fields=None,
span=None,
route=None,
geometry_field='geometry',
xfield='geometry',
yfield='geometry',
zfield=None,
agg_func=None,
raster_interpolate='linear',
shade_how='linear',
cmap=colors['viridis'],
color_key=None,
dynspread=None,
extras=None,
raster_padding=0,
service_types=None,
full_extent=None,
default_extent=None,
default_height=256,
default_width=256,
overviews=None,
transforms=None,
attrs=None,
preload=False):
if fields is None and isinstance(data, (gpd.GeoDataFrame)):
fields = [geometry_field]
if zfield:
fields.append(zfield)
if extras is None:
extras = []
if transforms is None:
transforms = []
if overviews is None:
overviews = {}
if service_types is None:
service_types = ('tile', 'image', 'wms', 'geojson')
if span == 'min/max' and zfield is None and geometry_type != 'raster':
raise ValueError('You must include a zfield for min/max scan calculation')
if legend is not None and geometry_type == 'raster':
if legend[0].get('value') is not None:
cmap = {}
for leg in legend:
cor = leg['color']
val = leg['value']
if isinstance(val, (list, tuple)):
val = tuple(val)
cmap[val] = cor
val = 20037508.3427892
if default_extent is None:
default_extent = [-val, -val, val, val]
self.name = name
self.description = description
self.filepath = filepath
self.config_path = config_path
self.geometry_type = geometry_type
self.key = key
self.text = text
self.legend = legend
self.fields = fields
self.span = span
self.route = route
self.xfield = xfield
self.raster_padding = 0
self.yfield = yfield
self.zfield = zfield
self.agg_func = agg_func
self.overviews = overviews
self.raster_agg_func = raster_interpolate
self.shade_how = shade_how
self.cmap = cmap
self.color_key = color_key
self.dynspread = dynspread
self.extras = extras
self.service_types = service_types
self.transforms = transforms
self.default_extent = default_extent
self.default_width = default_width
self.default_height = default_height
self.preload = preload
self.geometry_field = geometry_field
self.is_loaded = False
self.data = data
# autoload if overviews are present
contains_overviews = bool(len([t for t in transforms if 'overviews' in t['name']]))
if self.preload or contains_overviews:
self.load()
@property
def load_func(self):
raise NotImplementedError()
def get_full_extent(self):
raise NotImplementedError()
def load(self):
if self.is_loaded:
return self
if self.data is None:
if self.config_path:
ogcwd = os.getcwd()
config_dir = path.abspath(path.dirname(self.config_path))
os.chdir(config_dir)
try:
data_path = path.abspath(path.expanduser(self.filepath))
finally:
os.chdir(ogcwd)
elif self.filepath.startswith('zip'):
print('Zipfile Path', file=sys.stdout)
data_path = self.filepath
elif not path.isabs(self.filepath):
print('Not Absolute', file=sys.stdout)
data_path = path.abspath(path.expanduser(self.filepath))
else:
print('Using Given Filepath unmodified: config{self.config_file}', file=sys.stdout)
data_path = self.filepath
data = self.load_func(data_path)
else:
data = self.data
if self.fields:
data = data[self.fields]
self.data = data
self._finish_load()
return self
def _finish_load(self):
if self.is_loaded:
return self
self._apply_transforms()
self.is_loaded = True
def _apply_transforms(self):
print('# ----------------------', file=sys.stdout)
print(f'# APPLYING TRANSFORMS {self.name}', file=sys.stdout)
print('# ----------------------', file=sys.stdout)
for trans in self.transforms:
transform_name = trans['name']
print(f'\tApplying {transform_name}', file=sys.stdout)
func = get_transform_by_name(transform_name)
args = trans.get('args', {})
if 'overviews' in transform_name:
self.overviews = func(self.data, **args)
else:
self.data = func(self.data, **args)
# apply transforms to overviews if they exist
for level, overview_data in self.overviews.items():
self.overviews[level] = func(overview_data, **args)
return self
@staticmethod
def from_obj(obj: dict):
transforms = obj.get('transforms')
if transforms and isinstance(transforms, (list, tuple)):
n = 'raster_to_categorical_points'
has_to_vector = len([t for t in transforms if t['name'] == n])
else:
has_to_vector = False
if obj['geometry_type'] == 'raster' or has_to_vector:
return RasterSource(**obj)
else:
return VectorSource(**obj)
class RasterSource(MapSource):
@property
def load_func(self):
return load_raster
@property
@memoized()
def full_extent(self):
return (self.data.coords['x'].min().compute().item(),
self.data.coords['y'].min().compute().item(),
self.data.coords['x'].max().compute().item(),
self.data.coords['y'].max().compute().item())
class VectorSource(MapSource):
@property
def load_func(self):
return load_vector
@property
@memoized()
def full_extent(self):
if isinstance(self.data, spatialpandas.GeoDataFrame):
return self.data.to_geopandas()[self.geometry_field].total_bounds
else:
return self.data[self.geometry_field].total_bounds
class MapService():
def __init__(self, source: MapSource, renderers=[]):
self.source = source
self.renderers = renderers
@property
def key(self):
return f'{self.source.key}-{self.service_type}'
@property
def name(self):
return f'{self.source.name} {self.service_type}'
@property
def legend_name(self):
return f'{self.name}-legend'
@property
def default_extent(self):
return self.source.default_extent
@property
def default_width(self):
return self.source.default_width
@property
def default_height(self):
return self.source.default_height
@property
def service_page_url(self):
return f'/{self.key}'
@property
def legend_url(self):
return f'/{self.key}/legend'
@property
def service_page_name(self):
return f'/{self.key}-{self.service_type}'
@property
def service_url(self):
raise NotImplementedError()
@property
def client_url(self):
raise NotImplementedError()
@property
def default_url(self):
raise NotImplementedError()
@property
def service_type(self):
raise NotImplementedError()
class TileService(MapService):
@property
def service_url(self):
return f'/{self.key}' + '/tile/<z>/<x>/<y>'
@property
def client_url(self):
return f'/{self.key}' + '/tile/{z}/{x}/{y}'
@property
def default_url(self):
return f'/{self.key}' + '/tile/0/0/0'
@property
def service_type(self):
return 'tile'
class ImageService(MapService):
@property
def service_url(self):
url = (f'/{self.key}'
'/image'
'/<xmin>/<ymin>/<xmax>/<ymax>'
'/<width>/<height>')
return url
@property
def client_url(self):
return f'/{self.key}' + '/image/{XMIN}/{YMIN}/{XMAX}/{YMAX}/{width}/{height}'
@property
def default_url(self):
xmin = self.default_extent[0]
ymin = self.default_extent[1]
xmax = self.default_extent[2]
ymax = self.default_extent[3]
width = self.default_width
height = self.default_height
return f'/{self.key}/image/{xmin}/{ymin}/{xmax}/{ymax}/{width}/{height}'
@property
def service_type(self):
return 'image'
class WMSService(MapService):
@property
def service_url(self):
url = f'/{self.key}/wms'
return url
@property
def client_url(self, width=256, height=256):
url = f'/{self.key}'
url += '?bbox={XMIN},{YMIN},{XMAX},{YMAX}'
url += f'&width={width}&height={height}'
return url
@property
def default_url(self):
xmin = self.default_extent[0]
ymin = self.default_extent[1]
xmax = self.default_extent[2]
ymax = self.default_extent[3]
width = self.default_width
height = self.default_height
return f'/{self.key}?bbox={xmin},{ymin},{xmax},{ymax}&width={width}&height={height}'
@property
def service_type(self):
return 'wms'
class GeoJSONService(MapService):
@property
def service_url(self):
url = f'/{self.key}/geojson'
return url
@property
def client_url(self):
url = f'/{self.key}/geojson'
return url
@property
def default_url(self):
return f'/{self.key}/geojson'
@property
def service_type(self):
return 'geojson'
# ----------------------------------------------------------------------------
# DEFAULT MAP SOURCES
# ----------------------------------------------------------------------------
def world_countries_source():
# construct transforms
select_by_attrs_transform = dict(name='select_by_attributes',
args=dict(field='name',
value=['Antarctica', 'Fr. S. Antarctic Lands'],
operator='NOT IN'))
reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857))
sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry'))
overviews_transform = dict(name='build_vector_overviews',
args=dict(levels={'0': 10000,
'1': 2500,
'2': 1250,
'3': 650,
'4': 300,
'5': 150,
'6': 75,
'7': 32,
'8': 20,
'9': 10,
'10': 5},
geometry_field='geometry'))
transforms = [select_by_attrs_transform,
reproject_transform,
overviews_transform,
sp_transform]
# construct value obj
source_obj = dict()
source_obj['name'] = 'World Countries'
source_obj['key'] = 'world-countries'
source_obj['text'] = 'World Countries'
source_obj['description'] = 'World Country Polygons'
source_obj['geometry_type'] = 'polygon'
source_obj['agg_func'] = 'max'
source_obj['shade_how'] = 'linear'
source_obj['span'] = 'min/max'
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'x'
source_obj['yfield'] = 'y'
source_obj['zfield'] = 'pop_est'
source_obj['filepath'] = gpd.datasets.get_path('naturalearth_lowres')
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def world_boundaries_source():
# construct transforms
select_by_attrs_transform = dict(name='select_by_attributes',
args=dict(field='name',
value=['Antarctica', 'Fr. S. Antarctic Lands'],
operator='NOT IN'))
reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857))
polygon_to_line_transform = dict(name='polygon_to_line', args=dict(geometry_field='geometry'))
sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry'))
transforms = [select_by_attrs_transform,
polygon_to_line_transform,
reproject_transform,
sp_transform]
# construct value obj
source_obj = dict()
source_obj['name'] = 'World Boundaries'
source_obj['key'] = 'world-boundaries'
source_obj['text'] = 'World Boundaries'
source_obj['description'] = 'World Country Boundaries'
source_obj['geometry_type'] = 'line'
source_obj['agg_func'] = 'max'
source_obj['shade_how'] = 'linear'
source_obj['cmap'] = ['aqua', 'aqua']
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'x'
source_obj['yfield'] = 'y'
source_obj['filepath'] = gpd.datasets.get_path('naturalearth_lowres')
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def world_cities_source():
# construct transforms
reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857))
add_xy_fields_transform = dict(name='add_xy_fields', args=dict(geometry_field='geometry'))
sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry'))
transforms = [reproject_transform, add_xy_fields_transform, sp_transform]
# construct value obj
source_obj = dict()
source_obj['name'] = 'World Cities'
source_obj['key'] = 'world-cities'
source_obj['text'] = 'World Cities'
source_obj['description'] = 'World Cities Point Locations'
source_obj['geometry_type'] = 'point'
source_obj['agg_func'] = 'max'
source_obj['cmap'] = ['aqua', 'aqua']
source_obj['shade_how'] = 'linear'
source_obj['dynspread'] = 2
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'X'
source_obj['yfield'] = 'Y'
source_obj['filepath'] = gpd.datasets.get_path('naturalearth_cities')
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def nybb_source():
# construct transforms
reproject_transform = dict(name='reproject_vector', args=dict(epsg=3857))
sp_transform = dict(name='to_spatialpandas', args=dict(geometry_field='geometry'))
transforms = [reproject_transform, sp_transform]
# construct value obj
source_obj = dict()
source_obj['name'] = 'NYC Admin'
source_obj['key'] = 'nyc-boroughs'
source_obj['text'] = 'NYC Boroughs'
source_obj['description'] = 'New York City Boroughs'
source_obj['geometry_type'] = 'polygon'
source_obj['agg_func'] = 'max'
source_obj['shade_how'] = 'linear'
source_obj['span'] = 'min/max'
source_obj['dynspread'] = None
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'geometry'
source_obj['yfield'] = 'geometry'
source_obj['zfield'] = 'BoroCode'
source_obj['filepath'] = gpd.datasets.get_path('nybb')
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def elevation_source():
# find data path
HERE = path.abspath(path.dirname(__file__))
FIXTURES_DIR = path.join(HERE, 'tests', 'fixtures')
elevation_path = path.join(FIXTURES_DIR, 'elevation.tif')
# construct transforms
squeeze_transform = dict(name='squeeze', args=dict(dim='band'))
cast_transform = dict(name='cast', args=dict(dtype='float64'))
orient_transform = dict(name='orient_array')
flip_transform = dict(name='flip_coords', args=dict(dim='y'))
reproject_transform = dict(name='reproject_raster', args=dict(epsg=3857))
transforms = [squeeze_transform,
cast_transform,
orient_transform,
flip_transform,
reproject_transform]
# construct value obj
source_obj = dict()
source_obj['name'] = 'Elevation'
source_obj['key'] = 'elevation'
source_obj['text'] = 'Elevation'
source_obj['description'] = 'Global Elevation Dataset'
source_obj['geometry_type'] = 'raster'
source_obj['shade_how'] = 'linear'
source_obj['cmap'] = ['white', 'black']
source_obj['span'] = (58, 248)
source_obj['raster_padding'] = 0
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'geometry'
source_obj['yfield'] = 'geometry'
source_obj['filepath'] = elevation_path
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def elevation_source_netcdf():
# find data path
HERE = path.abspath(path.dirname(__file__))
FIXTURES_DIR = path.join(HERE, 'tests', 'fixtures')
elevation_path = path.join(FIXTURES_DIR, 'elevation.nc')
# construct transforms
transforms = []
# construct value obj
source_obj = dict()
source_obj['name'] = 'Elevation NetCDF'
source_obj['key'] = 'elevation-netcdf'
source_obj['text'] = 'Elevation NetCDF'
source_obj['description'] = 'Global Elevation Dataset (NetCDF)'
source_obj['geometry_type'] = 'raster'
source_obj['shade_how'] = 'linear'
source_obj['cmap'] = ['white', 'black']
source_obj['span'] = (58, 248)
source_obj['raster_padding'] = 0
source_obj['raster_interpolate'] = 'linear'
source_obj['xfield'] = 'geometry'
source_obj['yfield'] = 'geometry'
source_obj['filepath'] = elevation_path
source_obj['transforms'] = transforms
source_obj['service_types'] = ['tile', 'wms', 'image', 'geojson']
return source_obj
def parse_sources(source_objs, config_path=None, contains=None):
service_classes = {
'tile': TileService,
'wms': WMSService,
'image': ImageService,
'geojson': GeoJSONService,
}
for source in source_objs:
for service_type in source['service_types']:
source['config_path'] = config_path
if contains and contains not in source.get('key'):
continue
# create sources
source_obj = MapSource.from_obj(source)
# create services
ServiceKlass = service_classes[service_type]
# TODO: add renderers here...
yield ServiceKlass(source=source_obj)
def get_services(config_path=None, include_default=True, contains=None, sources=None):
source_objs = None
if sources is not None:
source_objs = sources
elif config_path is None:
print('No Config Found...using default services...', file=sys.stdout)
source_objs = [world_countries_source(),
world_boundaries_source(),
world_cities_source(),
nybb_source(),
elevation_source(),
elevation_source_netcdf()]
else:
with open(config_path, 'r') as f:
content = f.read()
config_obj = yaml.load(content)
source_objs = config_obj['sources']
if include_default:
source_objs += [world_countries_source(),
world_boundaries_source(),
world_cities_source(),
nybb_source(),
elevation_source()]
for service in parse_sources(source_objs, config_path=config_path, contains=contains):
yield service
|
the-stack_0_4535 | # Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2016 copyright by Peter Cock. All rights reserved.
# Revisions 2008-2009 copyright by Cymon J. Cox. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
"""Implementations of Biopython-like Seq objects on top of BioSQL.
This allows retrival of items stored in a BioSQL database using
a biopython-like SeqRecord and Seq interface.
Note: Currently we do not support recording per-letter-annotations
(like quality scores) in BioSQL.
"""
from Bio.Seq import Seq, UnknownSeq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
from Bio import SeqFeature
class DBSeq(Seq):
"""BioSQL equivalent of the Biopython Seq object."""
def __init__(self, primary_id, adaptor, alphabet=None, start=0, length=0):
"""Create a new DBSeq object referring to a BioSQL entry.
You wouldn't normally create a DBSeq object yourself, this is done
for you when retrieving a DBSeqRecord object from the database.
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
self.primary_id = primary_id
self.adaptor = adaptor
self._length = length
self.start = start
def __len__(self):
"""Return the length of the sequence."""
return self._length
def __getitem__(self, index): # Seq API requirement
"""Return a subsequence or single letter."""
if isinstance(index, int):
# Return a single letter as a string
i = index
if i < 0:
if -i > self._length:
raise IndexError(i)
i = i + self._length
elif i >= self._length:
raise IndexError(i)
return self.adaptor.get_subseq_as_string(
self.primary_id, self.start + i, self.start + i + 1
)
if not isinstance(index, slice):
raise TypeError("Unexpected index type")
# Return the (sub)sequence as another DBSeq or Seq object
# (see the Seq obect's __getitem__ method)
if index.start is None:
i = 0
else:
i = index.start
if i < 0:
# Map to equavilent positive index
if -i > self._length:
raise IndexError(i)
i = i + self._length
elif i >= self._length:
# Trivial case, should return empty string!
i = self._length
if index.stop is None:
j = self._length
else:
j = index.stop
if j < 0:
# Map to equavilent positive index
if -j > self._length:
raise IndexError(j)
j = j + self._length
elif j >= self._length:
j = self._length
if i >= j:
# Trivial case, empty string.
return Seq("")
elif index.step is None or index.step == 1:
# Easy case - can return a DBSeq with the start and end adjusted
return self.__class__(
self.primary_id, self.adaptor, None, self.start + i, j - i
)
else:
# Tricky. Will have to create a Seq object because of the stride
full = self.adaptor.get_subseq_as_string(
self.primary_id, self.start + i, self.start + j
)
return Seq(full[:: index.step])
def tostring(self):
"""Return the full sequence as a python string (DEPRECATED).
You are now encouraged to use str(my_seq) instead of
my_seq.tostring().
"""
import warnings
warnings.warn(
"This method is obsolete; please use str(my_seq) "
"instead of my_seq.tostring().",
PendingDeprecationWarning,
)
return self.adaptor.get_subseq_as_string(
self.primary_id, self.start, self.start + self._length
)
def __str__(self):
"""Return the full sequence as a python string."""
return self.adaptor.get_subseq_as_string(
self.primary_id, self.start, self.start + self._length
)
data = property(tostring, doc="Sequence as string (DEPRECATED)")
def toseq(self):
"""Return the full sequence as a Seq object."""
# Note - the method name copies that of the MutableSeq object
return Seq(str(self))
def __add__(self, other):
"""Add another sequence or string to this sequence.
The sequence is first converted to a Seq object before the addition.
The returned object is a Seq object, not a DBSeq object.
"""
return self.toseq() + other
def __radd__(self, other):
"""Add another sequence or string to the left.
The sequence is first converted to a Seq object before the addition.
The returned object is a Seq object, not a DBSeq object.
"""
return other + self.toseq()
def __mul__(self, other):
"""Multiply sequence by an integer.
The sequence is first converted to a Seq object before multiplication.
The returned object is a Seq object, not a DBSeq object.
"""
return self.toseq() * other
def __rmul__(self, other):
"""Multiply integer by a sequence.
The sequence is first converted to a Seq object before multiplication.
The returned object is a Seq object, not a DBSeq object.
"""
return other * self.toseq()
def __imul__(self, other):
"""Multiply sequence by integer in-place.
The sequence is first converted to a Seq object before multiplication.
The returned object is a Seq object, not a DBSeq object.
"""
return self.toseq() * other
def _retrieve_seq_len(adaptor, primary_id):
# The database schema ensures there will be only one matching row
seqs = adaptor.execute_and_fetchall(
"SELECT length FROM biosequence WHERE bioentry_id = %s", (primary_id,)
)
if not seqs:
return None
assert len(seqs) == 1
(given_length,) = seqs[0]
return int(given_length)
def _retrieve_seq(adaptor, primary_id):
# The database schema ensures there will be only one matching
# row in the table.
# If an UnknownSeq was recorded, seq will be NULL,
# but length will be populated. This means length(seq)
# will return None.
seqs = adaptor.execute_and_fetchall(
"SELECT alphabet, length, length(seq) FROM biosequence WHERE bioentry_id = %s",
(primary_id,),
)
if not seqs:
return
assert len(seqs) == 1
moltype, given_length, length = seqs[0]
try:
length = int(length)
given_length = int(length)
assert length == given_length
have_seq = True
except TypeError:
assert length is None
seqs = adaptor.execute_and_fetchall(
"SELECT alphabet, length, seq FROM biosequence WHERE bioentry_id = %s",
(primary_id,),
)
assert len(seqs) == 1
moltype, given_length, seq = seqs[0]
assert seq is None or seq == ""
length = int(given_length)
have_seq = False
del seq
del given_length
if have_seq:
return DBSeq(primary_id, adaptor, alphabet=None, start=0, length=int(length))
else:
if moltype in ("dna", "rna"):
character = "N"
elif moltype == "protein":
character = "X"
else:
character = "?"
return UnknownSeq(length, character=character)
def _retrieve_dbxrefs(adaptor, primary_id):
"""Retrieve the database cross references for the sequence (PRIVATE)."""
_dbxrefs = []
dbxrefs = adaptor.execute_and_fetchall(
"SELECT dbname, accession, version"
" FROM bioentry_dbxref join dbxref using (dbxref_id)"
" WHERE bioentry_id = %s"
' ORDER BY "rank"',
(primary_id,),
)
for dbname, accession, version in dbxrefs:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
_dbxrefs.append("%s:%s" % (dbname, v))
return _dbxrefs
def _retrieve_features(adaptor, primary_id):
sql = (
'SELECT seqfeature_id, type.name, "rank"'
" FROM seqfeature join term type on (type_term_id = type.term_id)"
" WHERE bioentry_id = %s"
' ORDER BY "rank"'
)
results = adaptor.execute_and_fetchall(sql, (primary_id,))
seq_feature_list = []
for seqfeature_id, seqfeature_type, seqfeature_rank in results:
# Get qualifiers [except for db_xref which is stored separately]
qvs = adaptor.execute_and_fetchall(
"SELECT name, value"
" FROM seqfeature_qualifier_value join term using (term_id)"
" WHERE seqfeature_id = %s"
' ORDER BY "rank"',
(seqfeature_id,),
)
qualifiers = {}
for qv_name, qv_value in qvs:
qualifiers.setdefault(qv_name, []).append(qv_value)
# Get db_xrefs [special case of qualifiers]
qvs = adaptor.execute_and_fetchall(
"SELECT dbxref.dbname, dbxref.accession"
" FROM dbxref join seqfeature_dbxref using (dbxref_id)"
" WHERE seqfeature_dbxref.seqfeature_id = %s"
' ORDER BY "rank"',
(seqfeature_id,),
)
for qv_name, qv_value in qvs:
value = "%s:%s" % (qv_name, qv_value)
qualifiers.setdefault("db_xref", []).append(value)
# Get locations
results = adaptor.execute_and_fetchall(
"SELECT location_id, start_pos, end_pos, strand"
" FROM location"
" WHERE seqfeature_id = %s"
' ORDER BY "rank"',
(seqfeature_id,),
)
locations = []
# convert to Python standard form
# Convert strand = 0 to strand = None
# re: comment in Loader.py:
# Biopython uses None when we don't know strand information but
# BioSQL requires something (non null) and sets this as zero
# So we'll use the strand or 0 if Biopython spits out None
for location_id, start, end, strand in results:
if start:
start -= 1
if strand == 0:
strand = None
if strand not in (+1, -1, None):
raise ValueError(
"Invalid strand %s found in database for "
"seqfeature_id %s" % (strand, seqfeature_id)
)
if start is not None and end is not None and end < start:
import warnings
from Bio import BiopythonWarning
warnings.warn(
"Inverted location start/end (%i and %i) for "
"seqfeature_id %s" % (start, end, seqfeature_id),
BiopythonWarning,
)
# For SwissProt unknown positions (?)
if start is None:
start = SeqFeature.UnknownPosition()
if end is None:
end = SeqFeature.UnknownPosition()
locations.append((location_id, start, end, strand))
# Get possible remote reference information
remote_results = adaptor.execute_and_fetchall(
"SELECT location_id, dbname, accession, version"
" FROM location join dbxref using (dbxref_id)"
" WHERE seqfeature_id = %s",
(seqfeature_id,),
)
lookup = {}
for location_id, dbname, accession, version in remote_results:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
# subfeature remote location db_ref are stored as a empty string
# when not present
if dbname == "":
dbname = None
lookup[location_id] = (dbname, v)
feature = SeqFeature.SeqFeature(type=seqfeature_type)
# Store the key as a private property
feature._seqfeature_id = seqfeature_id
feature.qualifiers = qualifiers
if len(locations) == 0:
pass
elif len(locations) == 1:
location_id, start, end, strand = locations[0]
# See Bug 2677, we currently don't record the location_operator
# For consistency with older versions Biopython, default to "".
feature.location_operator = _retrieve_location_qualifier_value(
adaptor, location_id
)
dbname, version = lookup.get(location_id, (None, None))
feature.location = SeqFeature.FeatureLocation(start, end)
feature.strand = strand
feature.ref_db = dbname
feature.ref = version
else:
locs = []
for location in locations:
location_id, start, end, strand = location
dbname, version = lookup.get(location_id, (None, None))
locs.append(
SeqFeature.FeatureLocation(
start, end, strand=strand, ref=version, ref_db=dbname
)
)
# Locations are typically in biological in order (see negative
# strands below), but because of remote locations for
# sub-features they are not necessarily in numerical order:
strands = {l.strand for l in locs}
if len(strands) == 1 and -1 in strands:
# Evil hack time for backwards compatibility
# TODO - Check if BioPerl and (old) Biopython did the same,
# we may have an existing incompatibility lurking here...
locs = locs[::-1]
feature.location = SeqFeature.CompoundLocation(locs, "join")
# TODO - See Bug 2677 - we don't yet record location operator,
# so for consistency with older versions of Biopython default
# to assuming its a join.
seq_feature_list.append(feature)
return seq_feature_list
def _retrieve_location_qualifier_value(adaptor, location_id):
value = adaptor.execute_and_fetch_col0(
"SELECT value FROM location_qualifier_value WHERE location_id = %s",
(location_id,),
)
try:
return value[0]
except IndexError:
return ""
def _retrieve_annotations(adaptor, primary_id, taxon_id):
annotations = {}
annotations.update(_retrieve_alphabet(adaptor, primary_id))
annotations.update(_retrieve_qualifier_value(adaptor, primary_id))
annotations.update(_retrieve_reference(adaptor, primary_id))
annotations.update(_retrieve_taxon(adaptor, primary_id, taxon_id))
annotations.update(_retrieve_comment(adaptor, primary_id))
# Convert values into strings in cases of unicode from the database.
# BioSQL could eventually be expanded to be unicode aware.
str_anns = {}
for key, val in annotations.items():
if isinstance(val, list):
val = [_make_unicode_into_string(x) for x in val]
elif isinstance(val, str):
val = str(val)
str_anns[key] = val
return str_anns
def _make_unicode_into_string(text):
if isinstance(text, str):
return str(text)
else:
return text
def _retrieve_alphabet(adaptor, primary_id):
results = adaptor.execute_and_fetchall(
"SELECT alphabet FROM biosequence WHERE bioentry_id = %s", (primary_id,),
)
assert len(results) == 1
alphabets = results[0]
assert len(alphabets) == 1
alphabet = alphabets[0]
if alphabet == "dna":
molecule_type = "DNA"
elif alphabet == "rna":
molecule_type = "RNA"
elif alphabet == "protein":
molecule_type = "protein"
else:
molecule_type = None
if molecule_type is not None:
return {"molecule_type": molecule_type}
else:
return {}
def _retrieve_qualifier_value(adaptor, primary_id):
qvs = adaptor.execute_and_fetchall(
"SELECT name, value"
" FROM bioentry_qualifier_value JOIN term USING (term_id)"
" WHERE bioentry_id = %s"
' ORDER BY "rank"',
(primary_id,),
)
qualifiers = {}
for name, value in qvs:
if name == "keyword":
name = "keywords"
# See handling of "date" in Loader.py
elif name == "date_changed":
name = "date"
elif name == "secondary_accession":
name = "accessions"
qualifiers.setdefault(name, []).append(value)
return qualifiers
def _retrieve_reference(adaptor, primary_id):
# XXX dbxref_qualifier_value
refs = adaptor.execute_and_fetchall(
"SELECT start_pos, end_pos, "
" location, title, authors,"
" dbname, accession"
" FROM bioentry_reference"
" JOIN reference USING (reference_id)"
" LEFT JOIN dbxref USING (dbxref_id)"
" WHERE bioentry_id = %s"
' ORDER BY "rank"',
(primary_id,),
)
references = []
for start, end, location, title, authors, dbname, accession in refs:
reference = SeqFeature.Reference()
# If the start/end are missing, reference.location is an empty list
if (start is not None) or (end is not None):
if start is not None:
start -= 1 # python counting
reference.location = [SeqFeature.FeatureLocation(start, end)]
# Don't replace the default "" with None.
if authors:
reference.authors = authors
if title:
reference.title = title
reference.journal = location
if dbname == "PUBMED":
reference.pubmed_id = accession
elif dbname == "MEDLINE":
reference.medline_id = accession
references.append(reference)
if references:
return {"references": references}
else:
return {}
def _retrieve_taxon(adaptor, primary_id, taxon_id):
a = {}
common_names = adaptor.execute_and_fetch_col0(
"SELECT name FROM taxon_name WHERE taxon_id = %s"
" AND name_class = 'genbank common name'",
(taxon_id,),
)
if common_names:
a["source"] = common_names[0]
scientific_names = adaptor.execute_and_fetch_col0(
"SELECT name FROM taxon_name WHERE taxon_id = %s"
" AND name_class = 'scientific name'",
(taxon_id,),
)
if scientific_names:
a["organism"] = scientific_names[0]
ncbi_taxids = adaptor.execute_and_fetch_col0(
"SELECT ncbi_taxon_id FROM taxon WHERE taxon_id = %s", (taxon_id,)
)
if ncbi_taxids and ncbi_taxids[0] and ncbi_taxids[0] != "0":
a["ncbi_taxid"] = ncbi_taxids[0]
# Old code used the left/right values in the taxon table to get the
# taxonomy lineage in one SQL command. This was actually very slow,
# and would fail if the (optional) left/right values were missing.
#
# The following code is based on a contribution from Eric Gibert, and
# relies on the taxon table's parent_taxon_id field only (ignoring the
# optional left/right values). This means that it has to make a
# separate SQL query for each entry in the lineage, but it does still
# appear to be *much* faster. See Bug 2494.
taxonomy = []
while taxon_id:
name, rank, parent_taxon_id = adaptor.execute_one(
"SELECT taxon_name.name, taxon.node_rank, taxon.parent_taxon_id"
" FROM taxon, taxon_name"
" WHERE taxon.taxon_id=taxon_name.taxon_id"
" AND taxon_name.name_class='scientific name'"
" AND taxon.taxon_id = %s",
(taxon_id,),
)
if taxon_id == parent_taxon_id:
# If the taxon table has been populated by the BioSQL script
# load_ncbi_taxonomy.pl this is how top parent nodes are stored.
# Personally, I would have used a NULL parent_taxon_id here.
break
taxonomy.insert(0, name)
taxon_id = parent_taxon_id
if taxonomy:
a["taxonomy"] = taxonomy
return a
def _retrieve_comment(adaptor, primary_id):
qvs = adaptor.execute_and_fetchall(
'SELECT comment_text FROM comment WHERE bioentry_id=%s ORDER BY "rank"',
(primary_id,),
)
comments = [comm[0] for comm in qvs]
# Don't want to add an empty list...
if comments:
return {"comment": comments}
else:
return {}
class DBSeqRecord(SeqRecord):
"""BioSQL equivalent of the Biopython SeqRecord object."""
def __init__(self, adaptor, primary_id):
"""Create a DBSeqRecord object.
Arguments:
- adaptor - A BioSQL.BioSeqDatabase.Adaptor object
- primary_id - An internal integer ID used by BioSQL
You wouldn't normally create a DBSeqRecord object yourself,
this is done for you when using a BioSeqDatabase object
"""
self._adaptor = adaptor
self._primary_id = primary_id
(
self._biodatabase_id,
self._taxon_id,
self.name,
accession,
version,
self._identifier,
self._division,
self.description,
) = self._adaptor.execute_one(
"SELECT biodatabase_id, taxon_id, name, accession, version,"
" identifier, division, description"
" FROM bioentry"
" WHERE bioentry_id = %s",
(self._primary_id,),
)
if version and version != "0":
self.id = "%s.%s" % (accession, version)
else:
self.id = accession
# We don't yet record any per-letter-annotations in the
# BioSQL database, but we should set this property up
# for completeness (and the __str__ method).
# We do NOT want to load the sequence from the DB here!
length = _retrieve_seq_len(adaptor, primary_id)
self._per_letter_annotations = _RestrictedDict(length=length)
def __get_seq(self):
if not hasattr(self, "_seq"):
self._seq = _retrieve_seq(self._adaptor, self._primary_id)
return self._seq
def __set_seq(self, seq):
# TODO - Check consistent with self._per_letter_annotations
self._seq = seq
def __del_seq(self):
del self._seq
seq = property(__get_seq, __set_seq, __del_seq, "Seq object")
def __get_dbxrefs(self):
if not hasattr(self, "_dbxrefs"):
self._dbxrefs = _retrieve_dbxrefs(self._adaptor, self._primary_id)
return self._dbxrefs
def __set_dbxrefs(self, dbxrefs):
self._dbxrefs = dbxrefs
def __del_dbxrefs(self):
del self._dbxrefs
dbxrefs = property(
__get_dbxrefs, __set_dbxrefs, __del_dbxrefs, "Database cross references"
)
def __get_features(self):
if not hasattr(self, "_features"):
self._features = _retrieve_features(self._adaptor, self._primary_id)
return self._features
def __set_features(self, features):
self._features = features
def __del_features(self):
del self._features
features = property(__get_features, __set_features, __del_features, "Features")
def __get_annotations(self):
if not hasattr(self, "_annotations"):
self._annotations = _retrieve_annotations(
self._adaptor, self._primary_id, self._taxon_id
)
if self._identifier:
self._annotations["gi"] = self._identifier
if self._division:
self._annotations["data_file_division"] = self._division
return self._annotations
def __set_annotations(self, annotations):
self._annotations = annotations
def __del_annotations(self):
del self._annotations
annotations = property(
__get_annotations, __set_annotations, __del_annotations, "Annotations"
)
|
the-stack_0_4536 | # coding: utf-8
"""
Module containing various definitions of Stores.
Stores are a default access pattern to data and provide
various utilities
"""
import json
import yaml
from itertools import chain, groupby
from socket import socket
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import mongomock
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable, jsanitize
from monty.serialization import loadfn
from pydash import get, has, set_
from pymongo import MongoClient, ReplaceOne, uri_parser
from pymongo.errors import ConfigurationError, DocumentTooLarge, OperationFailure
from sshtunnel import SSHTunnelForwarder
from maggma.core import Sort, Store, StoreError
from maggma.utils import confirm_field_index
class SSHTunnel(MSONable):
__TUNNELS: Dict[str, SSHTunnelForwarder] = {}
def __init__(
self,
tunnel_server_address: str,
remote_server_address: str,
username: Optional[str] = None,
password: Optional[str] = None,
private_key: Optional[str] = None,
**kwargs,
):
"""
Args:
tunnel_server_address: string address with port for the SSH tunnel server
remote_server_address: string address with port for the server to connect to
username: optional username for the ssh tunnel server
password: optional password for the ssh tunnel server; If a private_key is
supplied this password is assumed to be the private key password
private_key: ssh private key to authenticate to the tunnel server
kwargs: any extra args passed to the SSHTunnelForwarder
"""
self.tunnel_server_address = tunnel_server_address
self.remote_server_address = remote_server_address
self.username = username
self.password = password
self.private_key = private_key
self.kwargs = kwargs
if remote_server_address in SSHTunnel.__TUNNELS:
self.tunnel = SSHTunnel.__TUNNELS[remote_server_address]
else:
open_port = _find_free_port("127.0.0.1")
local_bind_address = ("127.0.0.1", open_port)
ssh_address, ssh_port = tunnel_server_address.split(":")
ssh_port = int(ssh_port) # type: ignore
remote_bind_address, remote_bind_port = remote_server_address.split(":")
remote_bind_port = int(remote_bind_port) # type: ignore
if private_key is not None:
ssh_password = None
ssh_private_key_password = password
else:
ssh_password = password
ssh_private_key_password = None
self.tunnel = SSHTunnelForwarder(
ssh_address_or_host=(ssh_address, ssh_port),
local_bind_address=local_bind_address,
remote_bind_address=(remote_bind_address, remote_bind_port),
ssh_username=username,
ssh_password=ssh_password,
ssh_private_key_password=ssh_private_key_password,
ssh_pkey=private_key,
**kwargs,
)
def start(self):
if not self.tunnel.is_active:
self.tunnel.start()
def stop(self):
if self.tunnel.tunnel_is_up:
self.tunnel.stop()
@property
def local_address(self) -> Tuple[str, int]:
return self.tunnel.local_bind_address
class MongoStore(Store):
"""
A Store that connects to a Mongo collection
"""
def __init__(
self,
database: str,
collection_name: str,
host: str = "localhost",
port: int = 27017,
username: str = "",
password: str = "",
ssh_tunnel: Optional[SSHTunnel] = None,
safe_update: bool = False,
**kwargs,
):
"""
Args:
database: The database name
collection_name: The collection name
host: Hostname for the database
port: TCP port to connect to
username: Username for the collection
password: Password to connect with
safe_update: fail gracefully on DocumentTooLarge errors on update
"""
self.database = database
self.collection_name = collection_name
self.host = host
self.port = port
self.username = username
self.password = password
self.ssh_tunnel = ssh_tunnel
self.safe_update = safe_update
self._collection = None # type: Any
self.kwargs = kwargs
super().__init__(**kwargs)
@property
def name(self) -> str:
"""
Return a string representing this data source
"""
return f"mongo://{self.host}/{self.database}/{self.collection_name}"
def connect(self, force_reset: bool = False):
"""
Connect to the source data
"""
if not self._collection or force_reset:
if self.ssh_tunnel is None:
conn = MongoClient(self.host, self.port)
else:
self.ssh_tunnel.start()
host, port = self.ssh_tunnel.local_address
conn = MongoClient(host=host, port=port)
db = conn[self.database]
if self.username != "":
db.authenticate(self.username, self.password)
self._collection = db[self.collection_name]
def __hash__(self) -> int:
"""Hash for MongoStore"""
return hash((self.database, self.collection_name, self.last_updated_field))
@classmethod
def from_db_file(cls, filename: str):
"""
Convenience method to construct MongoStore from db_file
from old QueryEngine format
"""
kwargs = loadfn(filename)
if "collection" in kwargs:
kwargs["collection_name"] = kwargs.pop("collection")
# Get rid of aliases from traditional query engine db docs
kwargs.pop("aliases", None)
return cls(**kwargs)
@classmethod
def from_launchpad_file(cls, lp_file, collection_name):
"""
Convenience method to construct MongoStore from a launchpad file
Note: A launchpad file is a special formatted yaml file used in fireworks
Returns:
"""
with open(lp_file, 'r') as f:
lp_creds = yaml.load(f, Loader=None)
db_creds = lp_creds.copy()
db_creds['database'] = db_creds['name']
for key in list(db_creds.keys()):
if key not in ['database', 'host', 'port', 'username', 'password']:
db_creds.pop(key)
db_creds['collection_name'] = collection_name
return cls(**db_creds)
def distinct(
self, field: str, criteria: Optional[Dict] = None, all_exist: bool = False
) -> List:
"""
Get all distinct values for a field
Args:
field: the field(s) to get distinct values for
criteria: PyMongo filter for documents to search in
"""
criteria = criteria or {}
try:
distinct_vals = self._collection.distinct(field, criteria)
except (OperationFailure, DocumentTooLarge):
distinct_vals = [
d["_id"]
for d in self._collection.aggregate(
[{"$match": criteria}, {"$group": {"_id": f"${field}"}}]
)
]
if all(isinstance(d, list) for d in filter(None, distinct_vals)): # type: ignore
distinct_vals = list(chain.from_iterable(filter(None, distinct_vals)))
return distinct_vals if distinct_vals is not None else []
def groupby(
self,
keys: Union[List[str], str],
criteria: Optional[Dict] = None,
properties: Union[Dict, List, None] = None,
sort: Optional[Dict[str, Union[Sort, int]]] = None,
skip: int = 0,
limit: int = 0,
) -> Iterator[Tuple[Dict, List[Dict]]]:
"""
Simple grouping function that will group documents
by keys.
Args:
keys: fields to group documents
criteria: PyMongo filter for documents to search in
properties: properties to return in grouped documents
sort: Dictionary of sort order for fields. Keys are field names and
values are 1 for ascending or -1 for descending.
skip: number documents to skip
limit: limit on total number of documents returned
Returns:
generator returning tuples of (key, list of docs)
"""
pipeline = []
if isinstance(keys, str):
keys = [keys]
if properties is None:
properties = []
if isinstance(properties, dict):
properties = list(properties.keys())
if criteria is not None:
pipeline.append({"$match": criteria})
if len(properties) > 0:
pipeline.append({"$project": {p: 1 for p in properties + keys}})
alpha = "abcdefghijklmnopqrstuvwxyz"
group_id = {letter: f"${key}" for letter, key in zip(alpha, keys)}
pipeline.append({"$group": {"_id": group_id, "docs": {"$push": "$$ROOT"}}})
for d in self._collection.aggregate(pipeline, allowDiskUse=True):
id_doc = {} # type: Dict[str,Any]
for letter, key in group_id.items():
if has(d["_id"], letter):
set_(id_doc, key[1:], d["_id"][letter])
yield (id_doc, d["docs"])
@classmethod
def from_collection(cls, collection):
"""
Generates a MongoStore from a pymongo collection object
This is not a fully safe operation as it gives dummy information to the MongoStore
As a result, this will not serialize and can not reset its connection
Args:
collection: the PyMongo collection to create a MongoStore around
"""
# TODO: How do we make this safer?
coll_name = collection.name
db_name = collection.database.name
store = cls(db_name, coll_name)
store._collection = collection
return store
@property # type: ignore
@deprecated(message="This will be removed in the future")
def collection(self):
"""Property referring to underlying pymongo collection"""
if self._collection is None:
raise StoreError("Must connect Mongo-like store before attemping to use it")
return self._collection
def count(self, criteria: Optional[Dict] = None) -> int:
"""
Counts the number of documents matching the query criteria
Args:
criteria: PyMongo filter for documents to count in
"""
criteria = criteria if criteria else {}
return self._collection.find(filter=criteria).count()
def query(
self,
criteria: Optional[Dict] = None,
properties: Union[Dict, List, None] = None,
sort: Optional[Dict[str, Union[Sort, int]]] = None,
skip: int = 0,
limit: int = 0,
) -> Iterator[Dict]:
"""
Queries the Store for a set of documents
Args:
criteria: PyMongo filter for documents to search in
properties: properties to return in grouped documents
sort: Dictionary of sort order for fields. Keys are field names and
values are 1 for ascending or -1 for descending.
skip: number documents to skip
limit: limit on total number of documents returned
"""
if isinstance(properties, list):
properties = {p: 1 for p in properties}
sort_list = (
[
(k, Sort(v).value) if isinstance(v, int) else (k, v.value)
for k, v in sort.items()
]
if sort
else None
)
for d in self._collection.find(
filter=criteria,
projection=properties,
skip=skip,
limit=limit,
sort=sort_list,
):
yield d
def ensure_index(self, key: str, unique: Optional[bool] = False) -> bool:
"""
Tries to create an index and return true if it suceeded
Args:
key: single key to index
unique: Whether or not this index contains only unique keys
Returns:
bool indicating if the index exists/was created
"""
if confirm_field_index(self._collection, key):
return True
else:
try:
self._collection.create_index(key, unique=unique, background=True)
return True
except Exception:
return False
def update(self, docs: Union[List[Dict], Dict], key: Union[List, str, None] = None):
"""
Update documents into the Store
Args:
docs: the document or list of documents to update
key: field name(s) to determine uniqueness for a
document, can be a list of multiple fields,
a single field, or None if the Store's key
field is to be used
"""
requests = []
if not isinstance(docs, list):
docs = [docs]
for d in docs:
d = jsanitize(d, allow_bson=True)
# document-level validation is optional
validates = True
if self.validator:
validates = self.validator.is_valid(d)
if not validates:
if self.validator.strict:
raise ValueError(self.validator.validation_errors(d))
else:
self.logger.error(self.validator.validation_errors(d))
if validates:
key = key or self.key
if isinstance(key, list):
search_doc = {k: d[k] for k in key}
else:
search_doc = {key: d[key]}
requests.append(ReplaceOne(search_doc, d, upsert=True))
if len(requests) > 0:
try:
self._collection.bulk_write(requests, ordered=False)
except (OperationFailure, DocumentTooLarge) as e:
if self.safe_update:
for req in requests:
req._filter
try:
self._collection.bulk_write([req], ordered=False)
except (OperationFailure, DocumentTooLarge):
self.logger.error(
f"Could not upload document for {req._filter} as it was too large for Mongo"
)
else:
raise e
def remove_docs(self, criteria: Dict):
"""
Remove docs matching the query dictionary
Args:
criteria: query dictionary to match
"""
self._collection.delete_many(filter=criteria)
def close(self):
"""Close up all collections"""
self._collection.database.client.close()
if self.ssh_tunnel is not None:
self.ssh_tunnel.stop()
def __eq__(self, other: object) -> bool:
"""
Check equality for MongoStore
other: other mongostore to compare with
"""
if not isinstance(other, MongoStore):
return False
fields = ["database", "collection_name", "host", "port", "last_updated_field"]
return all(getattr(self, f) == getattr(other, f) for f in fields)
class MongoURIStore(MongoStore):
"""
A Store that connects to a Mongo collection via a URI
This is expected to be a special mongodb+srv:// URIs that include
client parameters via TXT records
"""
def __init__(
self,
uri: str,
collection_name: str,
database: str = None,
ssh_tunnel: Optional[SSHTunnel] = None,
**kwargs,
):
"""
Args:
uri: MongoDB+SRV URI
database: database to connect to
collection_name: The collection name
"""
self.uri = uri
self.ssh_tunnel = ssh_tunnel
# parse the dbname from the uri
if database is None:
d_uri = uri_parser.parse_uri(uri)
if d_uri["database"] is None:
raise ConfigurationError(
"If database name is not supplied, a database must be set in the uri"
)
self.database = d_uri["database"]
else:
self.database = database
self.collection_name = collection_name
self.kwargs = kwargs
self._collection = None
super(MongoStore, self).__init__(**kwargs) # lgtm
@property
def name(self) -> str:
"""
Return a string representing this data source
"""
# TODO: This is not very safe since it exposes the username/password info
return self.uri
def connect(self, force_reset: bool = False):
"""
Connect to the source data
"""
if not self._collection or force_reset:
conn = MongoClient(self.uri)
db = conn[self.database]
self._collection = db[self.collection_name]
class MemoryStore(MongoStore):
"""
An in-memory Store that functions similarly
to a MongoStore
"""
def __init__(self, collection_name: str = "memory_db", **kwargs):
"""
Initializes the Memory Store
Args:
collection_name: name for the collection in memory
"""
self.collection_name = collection_name
self._collection = None
self.ssh_tunnel = None # This is to fix issues with the tunnel on close
self.kwargs = kwargs
super(MongoStore, self).__init__(**kwargs) # noqa
def connect(self, force_reset: bool = False):
"""
Connect to the source data
"""
if not self._collection or force_reset:
self._collection = mongomock.MongoClient().db[self.name]
@property
def name(self):
"""Name for the store"""
return f"mem://{self.collection_name}"
def __hash__(self):
"""Hash for the store"""
return hash((self.name, self.last_updated_field))
def groupby(
self,
keys: Union[List[str], str],
criteria: Optional[Dict] = None,
properties: Union[Dict, List, None] = None,
sort: Optional[Dict[str, Union[Sort, int]]] = None,
skip: int = 0,
limit: int = 0,
) -> Iterator[Tuple[Dict, List[Dict]]]:
"""
Simple grouping function that will group documents
by keys.
Args:
keys: fields to group documents
criteria: PyMongo filter for documents to search in
properties: properties to return in grouped documents
sort: Dictionary of sort order for fields. Keys are field names and
values are 1 for ascending or -1 for descending.
skip: number documents to skip
limit: limit on total number of documents returned
Returns:
generator returning tuples of (key, list of elemnts)
"""
keys = keys if isinstance(keys, list) else [keys]
data = [
doc
for doc in self.query(properties=keys, criteria=criteria)
if all(has(doc, k) for k in keys)
]
def grouping_keys(doc):
return tuple(get(doc, k) for k in keys)
for vals, group in groupby(sorted(data, key=grouping_keys), key=grouping_keys):
doc = {} # type: Dict[Any,Any]
for k, v in zip(keys, vals):
set_(doc, k, v)
yield doc, list(group)
def __eq__(self, other: object) -> bool:
"""
Check equality for MemoryStore
other: other MemoryStore to compare with
"""
if not isinstance(other, MemoryStore):
return False
fields = ["collection_name", "last_updated_field"]
return all(getattr(self, f) == getattr(other, f) for f in fields)
class JSONStore(MemoryStore):
"""
A Store for access to a single or multiple JSON files
"""
def __init__(self, paths: Union[str, List[str]], **kwargs):
"""
Args:
paths: paths for json files to turn into a Store
"""
paths = paths if isinstance(paths, (list, tuple)) else [paths]
self.paths = paths
self.kwargs = kwargs
super().__init__(collection_name="collection", **kwargs)
def connect(self, force_reset=False):
"""
Loads the files into the collection in memory
"""
super().connect(force_reset=force_reset)
for path in self.paths:
with zopen(path) as f:
data = f.read()
data = data.decode() if isinstance(data, bytes) else data
objects = json.loads(data)
objects = [objects] if not isinstance(objects, list) else objects
self.update(objects)
def __hash__(self):
return hash((*self.paths, self.last_updated_field))
def __eq__(self, other: object) -> bool:
"""
Check equality for JSONStore
Args:
other: other JSONStore to compare with
"""
if not isinstance(other, JSONStore):
return False
fields = ["paths", "last_updated_field"]
return all(getattr(self, f) == getattr(other, f) for f in fields)
def _find_free_port(address="0.0.0.0"):
s = socket()
s.bind((address, 0)) # Bind to a free port provided by the host.
return s.getsockname()[1] # Return the port number assigned.
|
the-stack_0_4538 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.forms.utils import ErrorDict
from django.utils.translation import ugettext_lazy as _
from shop.conf import app_settings as shop_settings
from shop.modifiers.pool import cart_modifiers_pool
from shopit.conf import app_settings
from shopit.forms.account import AccountDetailsForm, CleanEmailMixin
from shopit.models.address import ISO_3166_CODES, BillingAddress, ShippingAddress
from shopit.models.cart import CartDiscountCode
from shopit.models.customer import Customer
from shopit.models.modifier import DiscountCode
from shopit.utils import get_error_message as em
class CartDiscountCodeForm(forms.ModelForm):
"""
Form that handles entering a cart modifier code.
"""
_discount_code = None
class Meta:
model = CartDiscountCode
fields = ['code']
def __init__(self, *args, **kwargs):
self.cart = kwargs.pop('cart')
kwargs['instance'] = CartDiscountCode(cart=self.cart)
super(CartDiscountCodeForm, self).__init__(*args, **kwargs)
self.fields['code'].required = False
self.fields['code'].label = _('Discount code')
def clean_code(self):
code = self.cleaned_data.get('code', None)
if code:
cart_codes = self.cart.get_discount_codes().values_list('code', flat=True)
if code in cart_codes:
raise forms.ValidationError(em('cart_discount_code_exists'))
try:
dc = DiscountCode.objects.valid().get(code=code)
except DiscountCode.DoesNotExist:
raise forms.ValidationError(em('cart_discount_code_invalid'))
if dc.customer and code not in self.cart.customer.get_discount_codes().values_list('code', flat=True):
raise forms.ValidationError(em('cart_discount_code_wrong_customer'))
self._discount_code = dc
return code
def save(self, commit=True):
if self._discount_code is not None:
self._discount_code.use() # increment `num_uses` field on DiscountCode.
return super(CartDiscountCodeForm, self).save(commit)
class CheckoutFormMixin(object):
"""
Checkout form mixin ensures request and cart are passed in.
"""
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.cart = kwargs.pop('cart')
super(CheckoutFormMixin, self).__init__(*args, **kwargs)
class CustomerForm(CheckoutFormMixin, AccountDetailsForm):
def __init__(self, *args, **kwargs):
self.cart = kwargs.pop('cart')
return AccountDetailsForm.__init__(self, *args, **kwargs)
def save(self, commit=True):
self.instance.recognize_as_registered()
return super(CustomerForm, self).save(commit)
class GuestForm(CheckoutFormMixin, CleanEmailMixin, forms.ModelForm):
email = forms.EmailField(label=_('Email address'))
phone_number = forms.CharField(label=_('Phone number'))
class Meta:
model = get_user_model()
fields = ['email']
def __init__(self, *args, **kwargs):
super(GuestForm, self).__init__(*args, **kwargs)
self.customer = Customer.objects.get_from_request(self.request)
self.instance = self.customer.user
self.fields['email'].initial = self.instance.email
self.fields['phone_number'].initial = self.customer.phone_number
self.fields['phone_number'].required = app_settings.PHONE_NUMBER_REQUIRED
def save(self, commit=True):
self.customer.recognize_as_guest()
self.instance.is_active = shop_settings.SHOP_GUEST_IS_ACTIVE_USER
if self.instance.is_active:
password = get_user_model().objects.make_random_password(length=30)
self.instance.set_password(password)
self.customer.phone_number = self.cleaned_data.get('phone_number', '')
self.customer.save()
return super(GuestForm, self).save(commit)
class AddressForm(CheckoutFormMixin, forms.ModelForm):
priority = forms.IntegerField(
required=False,
widget=forms.HiddenInput,
)
existant = forms.ModelChoiceField(
required=False,
queryset=None,
label=_('Use existant address'),
)
# Field decides if a primary address should be used instead.
# Primary address is set to either 'shipping' or 'billing' using `PRIMARY_ADDRESS` setting.
use_primary_address = forms.BooleanField(
required=False,
initial=True,
)
class Meta:
exclude = ['customer']
def __init__(self, *args, **kwargs):
self.field_order = ['existant'] # Place `existant` field at the top.
super(AddressForm, self).__init__(*args, **kwargs)
self.customer = Customer.objects.get_from_request(self.request)
# Set existant addresses choices.
addresses = self.Meta.model.objects.filter(customer=self.customer).order_by('-priority')
self.fields['existant'].queryset = addresses
if not addresses.exists():
self.fields['existant'].widget = forms.HiddenInput()
# Set country choices based on `ADDRESS_COUNTRIES` setting.
if app_settings.ADDRESS_COUNTRIES:
countries = [('', '---------')] + [x for x in ISO_3166_CODES if x in app_settings.ADDRESS_COUNTRIES]
self.fields['country'].widget = forms.Select(choices=countries)
self.fields['country'].choices = countries
if self.is_primary:
self.fields.pop('use_primary_address') # remove field from primary address.
else:
self.fields['use_primary_address'].initial = \
getattr(self.cart, '%s_address' % self.address_type, None) is None
if hasattr(self, 'use_primary_address_label'):
self.fields['use_primary_address'].label = self.use_primary_address_label
# If current address is set to the cart, use it as existant one.
cart_address = getattr(self.cart, '%s_address' % self.address_type, None)
if cart_address:
self.fields['existant'].initial = cart_address
for fname in [f.name for f in cart_address._meta.get_fields() if f.name in self.fields]:
self.fields[fname].initial = getattr(cart_address, fname, '')
def full_clean(self):
super(AddressForm, self).full_clean()
if not self.is_primary:
if self.is_bound and self['use_primary_address'].value():
self._errors = ErrorDict()
def is_valid(self):
if not self.is_primary:
return self['use_primary_address'].value() or super(AddressForm, self).is_valid()
return super(AddressForm, self).is_valid()
def clean(self):
existant = self.cleaned_data['existant']
if existant:
self.instance = existant # Set existant as an instance if selected.
self.cleaned_data['priority'] = existant.priority
# Populate missing fields in `cleaned_data` with existant data and skip validation.
for field in [x for x in self.fields if x not in self.cleaned_data]:
self.cleaned_data[field] = getattr(existant, field)
del self._errors[field]
else:
self.cleaned_data['priority'] = self.Meta.model.objects.get_max_priority(self.customer) + 1
return super(AddressForm, self).clean()
def save(self, commit=True):
if self.is_primary or not self['use_primary_address'].value():
instance = super(AddressForm, self).save(commit=False)
instance.customer = self.customer
instance.priority = self.cleaned_data['priority']
instance.save()
return instance
@property
def address_type(self):
return self.Meta.model.__name__.lower().rstrip('address')
@property
def is_primary(self):
return app_settings.PRIMARY_ADDRESS == self.address_type
class ShippingAddressForm(AddressForm):
use_primary_address_label = _('Use billing address for shipping')
class Meta(AddressForm.Meta):
model = ShippingAddress
class BillingAddressForm(AddressForm):
use_primary_address_label = _('Use shipping address for billing')
class Meta(AddressForm.Meta):
model = BillingAddress
class PaymentMethodForm(CheckoutFormMixin, forms.Form):
payment_modifier = forms.ChoiceField(
label=_('Payment method'),
widget=forms.RadioSelect,
)
def __init__(self, *args, **kwargs):
super(PaymentMethodForm, self).__init__(*args, **kwargs)
choices = [x.get_choice() for x in cart_modifiers_pool.get_payment_modifiers() if not x.is_disabled(self.cart)]
self.fields['payment_modifier'].choices = choices
if len(choices) == 1:
self.fields['payment_modifier'].initial = choices[0][0]
class DeliveryMethodForm(CheckoutFormMixin, forms.Form):
shipping_modifier = forms.ChoiceField(
label=_('Delivery method'),
widget=forms.RadioSelect,
)
def __init__(self, *args, **kwargs):
super(DeliveryMethodForm, self).__init__(*args, **kwargs)
choices = [x.get_choice() for x in cart_modifiers_pool.get_shipping_modifiers()
if not x.is_disabled(self.cart)]
self.fields['shipping_modifier'].choices = choices
if len(choices) == 1:
self.fields['shipping_modifier'].initial = choices[0][0]
class ExtraAnnotationForm(CheckoutFormMixin, forms.Form):
annotation = forms.CharField(
label=_('Extra annotation for this order'),
required=False,
widget=forms.Textarea,
)
class AcceptConditionForm(CheckoutFormMixin, forms.Form):
accept = forms.BooleanField(
label=_('Accept'),
required=True,
widget=forms.CheckboxInput,
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.