ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3ece65c2140bc2f1d04dc5b9e93b026466a330 | # -*- coding: utf-8 -*-
'''
Utilities to enable exception reraising across the master commands
'''
# Import python libs
import exceptions
# Import salt libs
import salt.exceptions
def raise_error(name=None, args=None, message=''):
'''
Raise an exception with __name__ from name, args from args
If args is None Otherwise message from message\
If name is empty then use "Exception"
'''
name = name or 'Exception'
if hasattr(salt.exceptions, name):
ex = getattr(salt.exceptions, name)
elif hasattr(exceptions, name):
ex = getattr(exceptions, name)
else:
name = 'SaltException'
ex = getattr(salt.exceptions, name)
if args is not None:
raise ex(*args)
else:
raise ex(message)
def pack_exception(exc):
if hasattr(exc, 'pack'):
packed_exception = exc.pack()
else:
packed_exception = {'message': exc.__unicode__(), 'args': exc.args}
return packed_exception
def fire_exception(exc, opts, job=None, node='minion'):
'''
Fire raw exception across the event bus
'''
if job is None:
job = {}
event = salt.utils.event.SaltEvent(node, opts=opts)
event.fire_event(pack_exception, '_salt_error')
|
py | 1a3eceafed393dbeb42239cd2eb973ea93b91a80 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DanmakuAggregateItem(scrapy.Item):
aid = scrapy.Field()
p_name = scrapy.Field()
page_number = scrapy.Field()
word_frequency = scrapy.Field()
danmaku_density = scrapy.Field()
duration = scrapy.Field()
object_id = scrapy.Field()
|
py | 1a3ecf3c0fc1a610e94b04de3f7c15c5c1beed73 | # import matplotlib
# matplotlib.use("Agg")
# import matplotlib.pylab as plt
# import argparse
# import os
#
# import numpy as np
# import time
# from numpy import finfo
# import torch
#
# from hparams import create_hparams
# from layers import TacotronSTFT
# from audio_processing import griffin_lim, mel_denormalize
# #from train import load_model
#
# from model import Tacotron2
# from text import text_to_sequence
# from scipy.io.wavfile import write
# from distributed import apply_gradient_allreduce
#
# def plot_data(data, index, output_dir="", figsize=(16, 4)):
# fig, axes = plt.subplots(1, len(data), figsize=figsize)
# for i in range(len(data)):
# axes[i].imshow(data[i], aspect='auto', origin='bottom',
# interpolation='none')
# plt.savefig(os.path.join(output_dir, 'sentence_{}.png'.format(index)))
#
# def generate_mels(hparams, checkpoint_path, sentences, cleaner, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, output_dir,args):
# model = load_model(hparams,args)
# try:
# model = model.module
# except:
# pass
# model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(checkpoint_path)['state_dict'].items()})
# _ = model.eval()
#
# output_mels = []
# for i, s in enumerate(sentences):
# sequence = np.array(text_to_sequence(s, cleaner))[None, :]
# sequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
#
# stime = time.time()
# _, mel_outputs_postnet, _, alignments = model.inference(sequence)
# mel = mel_outputs_postnet.data.cpu().numpy()[0][:,:-removing_silence_mel_padding]
# mel = np.append(mel, np.ones((80,adding_silence_mel_padding),dtype=np.float32)*-4.0, axis=1)
# if(is_GL):
# plot_data((mel,
# alignments.data.cpu().numpy()[0].T), i, output_dir)
# inf_time = time.time() - stime
# print("{}th sentence, Infenrece time: {:.2f}s, len_mel: {}".format(i, inf_time, mel_outputs_postnet.size(2)))
# output_mels.append(mel)
# return output_mels
#
# def mels_to_wavs_GL(hparams, mels, taco_stft, output_dir="", ref_level_db = 0, magnitude_power=1.5):
# for i, mel in enumerate(mels):
# stime = time.time()
# mel_decompress = mel_denormalize(torch.from_numpy(mel).cuda().unsqueeze(0))
# mel_decompress = taco_stft.spectral_de_normalize(mel_decompress + ref_level_db) ** (1/magnitude_power)
# mel_decompress_ = mel_decompress.transpose(1, 2).data.cpu()
# spec_from_mel_scaling = 1000
# spec_from_mel = torch.mm(mel_decompress_[0], taco_stft.mel_basis)
# spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
# spec_from_mel = spec_from_mel * spec_from_mel_scaling
# waveform = griffin_lim(torch.autograd.Variable(spec_from_mel[:, :, :]),
# taco_stft.stft_fn, 60)
# waveform = waveform[0].data.cpu().numpy()
# dec_time = time.time() - stime
# len_audio = float(len(waveform)) / float(hparams.sampling_rate)
# str = "{}th sentence, audio length: {:.2f} sec, mel_to_wave time: {:.2f}".format(i, len_audio, dec_time)
# print(str)
# write(os.path.join(output_dir,"sentence_{}.wav".format(i)), hparams.sampling_rate, waveform)
#
# def load_model(hparams,args):
# model = Tacotron2(hparams).cuda()
# if hparams.fp16_run:
# #model = batchnorm_to_float(model.half())
# #model.decoder.attention_layer.score_mask_value = float(finfo('float16').min)
# model.decoder.attention_layer.score_mask_value = finfo('float16').min
#
#
# if hparams.distributed_run:
# model = apply_gradient_allreduce(model,args)
#
# return model
#
# def run(hparams, checkpoint_path, sentence_path, clenaer, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, is_melout, is_metaout, output_dir,args):
# f = open(sentence_path, 'r', encoding='utf-8')
# sentences = [x.strip() for x in f.readlines()]
# print('All sentences to infer:',sentences)
# f.close()
# os.makedirs(output_dir, exist_ok=True)
#
# stft = TacotronSTFT(
# hparams.filter_length, hparams.hop_length, hparams.win_length,
# hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
# hparams.mel_fmax)
#
# mels = generate_mels(hparams, checkpoint_path, sentences, clenaer, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, output_dir,args)
# if(is_GL): mels_to_wavs_GL(hparams, mels, stft, output_dir)
#
# mel_paths = []
# if is_melout:
# mel_dir = os.path.join(output_dir, 'mels')
# os.makedirs(mel_dir, exist_ok=True)
#
# for i, mel in enumerate(mels):
# mel_path = os.path.join(output_dir, 'mels/', "mel-{}.npy".format(i))
# mel_paths.append(mel_path)
# if(list(mel.shape)[1] >= hparams.max_decoder_steps - removing_silence_mel_padding):
# continue
# np.save(mel_path, mel)
#
#
# if is_metaout:
# with open(os.path.join(output_dir, 'metadata.csv'), 'w', encoding='utf-8') as file:
# lines = []
# for i, s in enumerate(sentences):
# mel_path = mel_paths[i]
# if (list(mels[i].shape)[1] >= hparams.max_decoder_steps - removing_silence_mel_padding):
# continue
# lines.append('{}|{}\n'.format(mel_path,s))
# file.writelines(lines)
#
# if __name__ == '__main__':
# """
# usage
# python inference.py -o=synthesis/80000 -c=nam_h_ep8/checkpoint_80000 -s=test.txt --silence_mel_padding=3 --is_GL
# -> wave, figure
# python inference.py -o=kss_mels_given_park_text -c=kakao_kss_model_checkpoint_23500 -s=skip_review_percentile_metadata_n.csv --silence_mel_padding=3 --is_melout --is_metaout
# -> mels, metadata.csv
# """
# parser = argparse.ArgumentParser()
# parser.add_argument('-o', '--output_directory', type=str,
# help='directory to save wave and fig')
# parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
# required=True, help='checkpoint path')
# parser.add_argument('-s', '--sentence_path', type=str, default=None,
# required=True, help='sentence path')
# parser.add_argument('--removing_silence_mel_padding', type=int, default=1,
# help='removing existing silence_mel_padding, silence audio size is hop_length * silence mel padding')
# parser.add_argument('--adding_silence_mel_padding', type=int, default=0,
# help='adding silence_mel_padding, silence audio size is hop_length * silence mel padding')
# parser.add_argument('--hparams', type=str,
# required=False, help='comma separated name=value pairs')
# parser.add_argument('--is_GL', action="store_true", help='Whether to do Giffin & Lim inference or not ')
# parser.add_argument('--is_melout', action="store_true", help='Whether to save melspectrogram file or not ')
# parser.add_argument('--is_metaout', action="store_true", help='Whether to save metadata.csv file for (mel, text) tuple or not ')
# parser.add_argument('--n_gpus', type=int, default=0,
# help='number of gpus')
# parser.add_argument('--group_name', type=str, default="",
# help='group name')
# parser.add_argument('--rank', type=int, default=0,
# help='rank')
#
#
# args = parser.parse_args()
# hparams = create_hparams(args.hparams)
# hparams.sampling_rate = 22050
# hparams.filter_length = 1024
# hparams.hop_length = 256
# hparams.win_length = 1024
#
#
#
# torch.backends.cudnn.enabled = hparams.cudnn_enabled
# torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
#
# run(hparams, args.checkpoint_path, args.sentence_path, hparams.text_cleaners, args.removing_silence_mel_padding, args.adding_silence_mel_padding, args.is_GL, args.is_melout, args.is_metaout, args.output_directory,args)
#
#
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import argparse
import os
import numpy as np
import time
import torch
from hparams import create_hparams
from layers import TacotronSTFT
from audio_processing import griffin_lim, mel_denormalize
from train import load_model
from text import text_to_sequence
from scipy.io.wavfile import write
def plot_data(data, index, output_dir="", figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
plt.savefig(os.path.join(output_dir, 'sentence_{}.png'.format(index)))
def generate_mels(hparams, checkpoint_path, sentences, cleaner, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, output_dir=""):
model = load_model(hparams)
try:
model = model.module
except:
pass
model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(checkpoint_path)['state_dict'].items()})
_ = model.eval()
output_mels = []
for i, s in enumerate(sentences):
sequence = np.array(text_to_sequence(s, cleaner))[None, :]
sequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
stime = time.time()
_, mel_outputs_postnet, _, alignments = model.inference(sequence)
mel = mel_outputs_postnet.data.cpu().numpy()[0][:,:-removing_silence_mel_padding]
mel = np.append(mel, np.ones((80,adding_silence_mel_padding),dtype=np.float32)*-4.0, axis=1)
if(is_GL):
plot_data((mel,
alignments.data.cpu().numpy()[0].T), i, output_dir)
inf_time = time.time() - stime
print("{}th sentence, Infenrece time: {:.2f}s, len_mel: {}".format(i, inf_time, mel_outputs_postnet.size(2)))
output_mels.append(mel)
return output_mels
def mels_to_wavs_GL(hparams, mels, taco_stft, output_dir="", ref_level_db = 0, magnitude_power=1.5):
for i, mel in enumerate(mels):
stime = time.time()
mel_decompress = mel_denormalize(torch.from_numpy(mel).cuda().unsqueeze(0))
mel_decompress = taco_stft.spectral_de_normalize(mel_decompress + ref_level_db) ** (1/magnitude_power)
mel_decompress_ = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress_[0], taco_stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
waveform = griffin_lim(torch.autograd.Variable(spec_from_mel[:, :, :]),
taco_stft.stft_fn, 60)
waveform = waveform[0].data.cpu().numpy()
dec_time = time.time() - stime
len_audio = float(len(waveform)) / float(hparams.sampling_rate)
str = "{}th sentence, audio length: {:.2f} sec, mel_to_wave time: {:.2f}".format(i, len_audio, dec_time)
print(str)
write(os.path.join(output_dir,"sentence_{}.wav".format(i)), hparams.sampling_rate, waveform)
def run(hparams, checkpoint_path, sentence_path, clenaer, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, is_melout, is_metaout, output_dir):
f = open(sentence_path, 'r', encoding='utf-8')
sentences = [x.strip() for x in f.readlines()]
print('All sentences to infer:',sentences)
f.close()
os.makedirs(output_dir, exist_ok=True)
stft = TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
mels = generate_mels(hparams, checkpoint_path, sentences, clenaer, removing_silence_mel_padding, adding_silence_mel_padding, is_GL, output_dir)
if(is_GL): mels_to_wavs_GL(hparams, mels, stft, output_dir)
mel_paths = []
if is_melout:
mel_dir = os.path.join(output_dir, 'mels')
os.makedirs(mel_dir, exist_ok=True)
for i, mel in enumerate(mels):
mel_path = os.path.join(output_dir, 'mels/', "mel-{}.npy".format(i))
mel_paths.append(mel_path)
if(list(mel.shape)[1] >= hparams.max_decoder_steps - removing_silence_mel_padding):
continue
np.save(mel_path, mel)
if is_metaout:
with open(os.path.join(output_dir, 'metadata.csv'), 'w', encoding='utf-8') as file:
lines = []
for i, s in enumerate(sentences):
mel_path = mel_paths[i]
if (list(mels[i].shape)[1] >= hparams.max_decoder_steps - removing_silence_mel_padding):
continue
lines.append('{}|{}\n'.format(mel_path,s))
file.writelines(lines)
if __name__ == '__main__':
"""
usage
python inference.py -o=synthesis/80000 -c=nam_h_ep8/checkpoint_80000 -s=test.txt --silence_mel_padding=3 --is_GL
-> wave, figure
python inference.py -o=kss_mels_given_park_text -c=kakao_kss_model_checkpoint_23500 -s=skip_review_percentile_metadata_n.csv --silence_mel_padding=3 --is_melout --is_metaout
-> mels, metadata.csv
"""
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save wave and fig')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=True, help='checkpoint path')
parser.add_argument('-s', '--sentence_path', type=str, default=None,
required=True, help='sentence path')
parser.add_argument('--removing_silence_mel_padding', type=int, default=1,
help='removing existing silence_mel_padding, silence audio size is hop_length * silence mel padding')
parser.add_argument('--adding_silence_mel_padding', type=int, default=0,
help='adding silence_mel_padding, silence audio size is hop_length * silence mel padding')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
parser.add_argument('--is_GL', action="store_true", help='Whether to do Giffin & Lim inference or not ')
parser.add_argument('--is_melout', action="store_true", help='Whether to save melspectrogram file or not ')
parser.add_argument('--is_metaout', action="store_true", help='Whether to save metadata.csv file for (mel, text) tuple or not ')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
hparams.sampling_rate = 22050
hparams.filter_length = 1024
hparams.hop_length = 256
hparams.win_length = 1024
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
run(hparams, args.checkpoint_path, args.sentence_path, hparams.text_cleaners, args.removing_silence_mel_padding, args.adding_silence_mel_padding, args.is_GL, args.is_melout, args.is_metaout, args.output_directory)
|
py | 1a3ed07950366d48f38a7bf9ccb4cadd2a33681d | from pdb import set_trace as breakpoint
class Dog():
def __init__(self, name, age, housebroken = True):
self.name = name
self.age = age
self.housebroken = housebroken
def bark(self):
print(f'{self.name} likes to bark!')
class Beagle(Dog):
def __init__(self, name, age, housebroken=True, barks_alot=True):
super().__init__(name, age, housebroken)
self.barks_alot = barks_alot
def bark(self):
if self.barks_alot == True:
print(f'{self.name} likes to bark!')
else :
print(f'{self.name} hates to bark!')
if __name__ == "__main__":
lucky = Dog("Lucky", 3)
spike = Beagle("Spike", 7, barks_alot=False)
breakpoint() |
py | 1a3ed13ac743a5fdf856ff81ad2a93ad5a001c5f | from typing import Any, List, Sequence
from boa3.builtin.interop.contract.callflagstype import CallFlags
from boa3.builtin.interop.contract.contract import Contract
from boa3.builtin.type import ECPoint, UInt160
def call_contract(script_hash: UInt160, method: str, args: Sequence = (), call_flags: CallFlags = CallFlags.ALL) -> Any:
"""
Calls a smart contract given the method and the arguments.
:param script_hash: the target smart contract's script hash
:type script_hash: UInt160
:param method: the name of the method to be executed
:type method: str
:param args: the specified method's arguments
:type args: Sequence[Any]
:param call_flags: the CallFlags to be used to call the contract
:type call_flags: CallFlags
:return: the result of the specified method
:rtype: Any
:raise Exception: raised if there isn't a valid CallFlags, the script hash is not a valid smart contract or the
method was not found or the arguments aren't valid to the specified method.
"""
pass
def create_contract(nef_file: bytes, manifest: bytes, data: Any = None) -> Contract:
"""
Creates a smart contract given the script and the manifest.
:param nef_file: the target smart contract's compiled nef
:type nef_file: bytes
:param manifest: the manifest.json that describes how the script should behave
:type manifest: bytes
:param data: the parameters for the _deploy function
:type data: Any
:return: the contract that was created
:rtype: Contract
:raise Exception: raised if the nef or the manifest are not a valid smart contract.
"""
pass
def update_contract(nef_file: bytes, manifest: bytes, data: Any = None):
"""
Updates the executing smart contract given the script and the manifest.
:param nef_file: the new smart contract's compiled nef
:type nef_file: bytes
:param manifest: the new smart contract's manifest
:type manifest: bytes
:param data: the parameters for the _deploy function
:type data: Any
:raise Exception: raised if the nef and the manifest are not a valid smart contract or the new contract is the
same as the old one.
"""
pass
def destroy_contract():
"""
Destroy the executing smart contract.
"""
pass
def get_minimum_deployment_fee() -> int:
"""
Gets the minimum fee of contract deployment.
:return: the minimum fee of contract deployment
"""
pass
def get_call_flags() -> CallFlags:
"""
Gets the CallFlags in the current context.
"""
pass
def create_standard_account(pub_key: ECPoint) -> UInt160:
"""
Calculates the script hash from a public key.
:param pub_key: the given public key
:type pub_key: ECPoint
:return: the corresponding script hash of the public key
:rtype: UInt160
"""
pass
def create_multisig_account(m: int, pub_keys: List[ECPoint]) -> UInt160:
"""
Calculates corresponding multisig account script hash for the given public keys.
:param m: the minimum number of correct signatures need to be provided in order for the verification to pass.
:type m: int
:param pub_keys: the public keys of the account
:type pub_keys: List[ECPoint]
:return: the hash of the corresponding account
:rtype: UInt160
"""
pass
NEO: UInt160 = UInt160()
"""
NEO's token script hash.
:meta hide-value:
"""
GAS: UInt160 = UInt160()
"""
GAS' token script hash.
:meta hide-value:
"""
|
py | 1a3ed2e0c5029b418e8a8df6b5129688380b2201 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the mlnx_ofed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import aarch64, centos, centos8, docker, ppc64le, ubuntu, ubuntu18, ubuntu20, x86_64
from hpccm.building_blocks.mlnx_ofed import mlnx_ofed
class Test_mlnx_ofed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default mlnx_ofed building block"""
mofed = mlnx_ofed()
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 5.2-2.2.0.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-2.2.0.0/ubuntu16.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu18
@docker
def test_defaults_ubuntu18(self):
"""Default mlnx_ofed building block"""
mofed = mlnx_ofed()
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 5.2-2.2.0.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-2.2.0.0/ubuntu18.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu20
@docker
def test_defaults_ubuntu20(self):
"""Default mlnx_ofed building block"""
mofed = mlnx_ofed()
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 5.2-2.2.0.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-2.2.0.0/ubuntu20.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default mlnx_ofed building block"""
mofed = mlnx_ofed()
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 5.2-2.2.0.0
RUN yum install -y \
ca-certificates \
gnupg \
wget && \
rm -rf /var/cache/yum/*
RUN rpm --import https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-2.2.0.0/rhel7.2/mellanox_mlnx_ofed.repo && \
yum install -y \
libibumad \
libibverbs \
libibverbs-utils \
librdmacm \
rdma-core \
rdma-core-devel && \
rm -rf /var/cache/yum/*''')
@x86_64
@centos8
@docker
def test_defaults_centos8(self):
"""Default mlnx_ofed building block"""
mofed = mlnx_ofed()
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 5.2-2.2.0.0
RUN yum install -y \
ca-certificates \
gnupg \
wget && \
rm -rf /var/cache/yum/*
RUN rpm --import https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox && \
yum install -y dnf-utils && \
yum-config-manager --add-repo https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-2.2.0.0/rhel8.0/mellanox_mlnx_ofed.repo && \
yum install -y \
libibumad \
libibverbs \
libibverbs-utils \
librdmacm \
rdma-core \
rdma-core-devel && \
rm -rf /var/cache/yum/*''')
@x86_64
@ubuntu
@docker
def test_prefix_ubuntu(self):
"""Prefix option"""
mofed = mlnx_ofed(prefix='/opt/ofed', version='4.6-1.0.1.1')
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 4.6-1.0.1.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
libnl-3-200 \
libnl-route-3-200 \
libnuma1 \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/4.6-1.0.1.1/ubuntu16.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
mkdir -m 777 -p /var/tmp/packages_download && cd /var/tmp/packages_download && \
DEBIAN_FRONTEND=noninteractive apt-get download -y --no-install-recommends \
ibverbs-utils \
libibmad \
libibmad-devel \
libibumad \
libibumad-devel \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 && \
mkdir -p /opt/ofed && \
find /var/tmp/packages_download -regextype posix-extended -type f -regex "/var/tmp/packages_download/(ibverbs-utils|libibmad|libibmad-devel|libibumad|libibumad-devel|libibverbs-dev|libibverbs1|libmlx4-1|libmlx4-dev|libmlx5-1|libmlx5-dev|librdmacm-dev|librdmacm1).*deb" -exec dpkg --extract {} /opt/ofed \; && \
rm -rf /var/tmp/packages_download && \
rm -f /etc/apt/sources.list.d/mellanox_mlnx_ofed.list && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /etc/libibverbs.d''')
@x86_64
@centos
@docker
def test_prefix_centos(self):
"""Prefix option"""
mofed = mlnx_ofed(prefix='/opt/ofed', version='4.6-1.0.1.1')
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 4.6-1.0.1.1
RUN yum install -y \
ca-certificates \
gnupg \
libnl \
libnl3 \
numactl-libs \
wget && \
rm -rf /var/cache/yum/*
RUN rpm --import https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://linux.mellanox.com/public/repo/mlnx_ofed/4.6-1.0.1.1/rhel7.2/mellanox_mlnx_ofed.repo && \
yum install -y yum-utils && \
mkdir -p /var/tmp/packages_download && \
yumdownloader --destdir=/var/tmp/packages_download -x \*i?86 --archlist=x86_64 \
libibmad \
libibmad-devel \
libibumad \
libibumad-devel \
libibverbs \
libibverbs-devel \
libibverbs-utils \
libmlx4 \
libmlx4-devel \
libmlx5 \
libmlx5-devel \
librdmacm \
librdmacm-devel && \
mkdir -p /opt/ofed && cd /opt/ofed && \
find /var/tmp/packages_download -regextype posix-extended -type f -regex "/var/tmp/packages_download/(libibmad|libibmad-devel|libibumad|libibumad-devel|libibverbs|libibverbs-devel|libibverbs-utils|libmlx4|libmlx4-devel|libmlx5|libmlx5-devel|librdmacm|librdmacm-devel).*rpm" -exec sh -c "rpm2cpio {} | cpio -idm" \; && \
rm -rf /var/tmp/packages_download && \
rm -rf /var/cache/yum/*
RUN mkdir -p /etc/libibverbs.d''')
@aarch64
@centos
@docker
def test_aarch64_centos(self):
"""aarch64"""
mofed = mlnx_ofed(version='4.6-1.0.1.1')
self.assertEqual(str(mofed),
r'''# Mellanox OFED version 4.6-1.0.1.1
RUN yum install -y \
ca-certificates \
gnupg \
wget && \
rm -rf /var/cache/yum/*
RUN rpm --import https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://linux.mellanox.com/public/repo/mlnx_ofed/4.6-1.0.1.1/rhel7.6alternate/mellanox_mlnx_ofed.repo && \
yum install -y \
libibmad \
libibmad-devel \
libibumad \
libibumad-devel \
libibverbs \
libibverbs-devel \
libibverbs-utils \
libmlx4 \
libmlx4-devel \
libmlx5 \
libmlx5-devel \
librdmacm \
librdmacm-devel && \
rm -rf /var/cache/yum/*''')
@x86_64
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
mofed = mlnx_ofed(version='5.0-2.1.8.0')
r = mofed.runtime()
self.assertEqual(r,
r'''# Mellanox OFED version 5.0-2.1.8.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.0-2.1.8.0/ubuntu16.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu
@docker
def test_prefix_runtime(self):
"""Prefix runtime"""
mofed = mlnx_ofed(prefix='/opt/ofed', version='5.0-2.1.8.0')
r = mofed.runtime()
self.assertEqual(r,
r'''# Mellanox OFED version 5.0-2.1.8.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-200 \
libnl-route-3-200 \
libnuma1 && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /etc/libibverbs.d
COPY --from=0 /opt/ofed /opt/ofed''')
|
py | 1a3ed4232f778e3263f6e3ad8264bde1ddb2cb33 | """
Created on Mar 26, 2010
@author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
import unittest
from jt.py4j.java_gateway import JavaGateway, GatewayParameters
from .java_gateway_test import ( # <AK> was: from py4j.tests.
start_example_app_process, safe_shutdown, sleep)
class AutoConvertTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_convert=True))
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testAutoConvert(self):
sj = self.gateway.jvm.java.util.HashSet()
sj.add("b")
sj.add(1)
sp = {1, "b"}
self.assertTrue(sj.equals(sp))
class SetTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testTreeSet(self):
set1 = set()
set2 = self.gateway.jvm.java.util.TreeSet()
set1.add("a")
set2.add("a")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual(repr(set1), repr(set2))
set1.add("b")
set2.add("b")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# not a good assumption with Python 3.3. Oh dear.
# self.assertEqual(repr(set1), repr(set2))
set1.remove("a")
set2.remove("a")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# self.assertEqual(repr(set1), repr(set2))
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# self.assertEqual(repr(set1), repr(set2))
def testHashSet(self):
set1 = set()
set2 = self.gateway.jvm.java.util.HashSet()
set1.add("a")
set2.add("a")
set1.add(1)
set2.add(1)
set1.add("b")
set2.add("b")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.remove(1)
set2.remove(1)
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
if __name__ == "__main__":
unittest.main()
|
py | 1a3ed45fa92b2299caea877ff11b041736263a71 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from rqalpha.model.base_account import BaseAccount
from rqalpha.environment import Environment
from rqalpha.events import EVENT
from rqalpha.const import DEFAULT_ACCOUNT_TYPE, POSITION_EFFECT, SIDE
from rqalpha.utils.i18n import gettext as _
from rqalpha.utils.logger import user_system_log
from ..api.api_future import order
def margin_of(order_book_id, quantity, price):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(order_book_id)
margin_multiplier = env.config.base.margin_multiplier
margin_rate = margin_info['long_margin_ratio'] * margin_multiplier
contract_multiplier = env.get_instrument(order_book_id).contract_multiplier
return quantity * contract_multiplier * price * margin_rate
class FutureAccount(BaseAccount):
__abandon_properties__ = [
"daily_holding_pnl",
"daily_realized_pnl"
]
def register_event(self):
event_bus = Environment.get_instance().event_bus
event_bus.add_listener(EVENT.SETTLEMENT, self._settlement)
event_bus.add_listener(EVENT.ORDER_PENDING_NEW, self._on_order_pending_new)
event_bus.add_listener(EVENT.ORDER_CREATION_REJECT, self._on_order_creation_reject)
event_bus.add_listener(EVENT.ORDER_CANCELLATION_PASS, self._on_order_unsolicited_update)
event_bus.add_listener(EVENT.ORDER_UNSOLICITED_UPDATE, self._on_order_unsolicited_update)
event_bus.add_listener(EVENT.TRADE, self._on_trade)
if self.AGGRESSIVE_UPDATE_LAST_PRICE:
event_bus.add_listener(EVENT.BAR, self._on_bar)
event_bus.add_listener(EVENT.TICK, self._on_tick)
def fast_forward(self, orders, trades=list()):
# 计算 Positions
for trade in trades:
if trade.exec_id in self._backward_trade_set:
continue
self._apply_trade(trade)
# 计算 Frozen Cash
self._frozen_cash = sum(self._frozen_cash_of_order(order) for order in orders if order.is_active())
def order(self, order_book_id, quantity, style, target=False):
position = self.positions[order_book_id]
if target:
# For order_to
quantity = quantity - position.buy_quantity + position.sell_quantity
orders = []
if quantity > 0:
# 平昨仓
if position.sell_old_quantity > 0:
soq = position.sell_old_quantity
orders.append(order(
order_book_id,
min(quantity, position.sell_old_quantity),
SIDE.BUY,
POSITION_EFFECT.CLOSE,
style
))
quantity -= soq
if quantity <= 0:
return orders
# 平今仓
if position.sell_today_quantity > 0:
stq = position.sell_today_quantity
orders.append(order(
order_book_id,
min(quantity, position.sell_today_quantity),
SIDE.BUY,
POSITION_EFFECT.CLOSE_TODAY,
style
))
quantity -= stq
if quantity <= 0:
return orders
# 开多仓
orders.append(order(
order_book_id,
quantity,
SIDE.BUY,
POSITION_EFFECT.OPEN,
style
))
return orders
else:
# 平昨仓
quantity *= -1
if position.buy_old_quantity > 0:
boq = position.buy_old_quantity
orders.append(order(
order_book_id,
min(quantity, position.buy_old_quantity),
SIDE.SELL,
POSITION_EFFECT.CLOSE,
style
))
quantity -= boq
if quantity <= 0:
return orders
# 平今仓
if position.buy_today_quantity > 0:
btq = position.buy_today_quantity
orders.append(order(
order_book_id,
min(quantity, position.buy_today_quantity),
SIDE.SELL,
POSITION_EFFECT.CLOSE_TODAY,
style
))
quantity -= btq
if quantity <= 0:
return orders
# 开空仓
orders.append(order(
order_book_id,
quantity,
SIDE.SELL,
POSITION_EFFECT.OPEN,
style
))
return orders
def get_state(self):
return {
'positions': {
order_book_id: position.get_state()
for order_book_id, position in six.iteritems(self._positions)
},
'frozen_cash': self._frozen_cash,
'total_cash': self._total_cash,
'backward_trade_set': list(self._backward_trade_set),
'transaction_cost': self._transaction_cost,
}
def set_state(self, state):
self._frozen_cash = state['frozen_cash']
self._backward_trade_set = set(state['backward_trade_set'])
self._transaction_cost = state['transaction_cost']
margin_changed = 0
self._positions.clear()
for order_book_id, v in six.iteritems(state['positions']):
position = self._positions.get_or_create(order_book_id)
position.set_state(v)
if 'margin_rate' in v and abs(v['margin_rate'] - position.margin_rate) > 1e-6:
margin_changed += position.margin * (v['margin_rate'] - position.margin_rate) / position.margin_rate
self._total_cash = state['total_cash'] + margin_changed
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@staticmethod
def _frozen_cash_of_order(order):
if order.position_effect == POSITION_EFFECT.OPEN:
return margin_of(order.order_book_id, order.unfilled_quantity, order.frozen_price)
else:
return 0
@staticmethod
def _frozen_cash_of_trade(trade):
if trade.position_effect == POSITION_EFFECT.OPEN:
return margin_of(trade.order_book_id, trade.last_quantity, trade.frozen_price)
else:
return 0
@property
def total_value(self):
return self._total_cash + self.margin + self.holding_pnl
# -- Margin 相关
@property
def margin(self):
"""
[float] 总保证金
"""
return sum(position.margin for position in six.itervalues(self._positions))
@property
def buy_margin(self):
"""
[float] 买方向保证金
"""
return sum(position.buy_margin for position in six.itervalues(self._positions))
@property
def sell_margin(self):
"""
[float] 卖方向保证金
"""
return sum(position.sell_margin for position in six.itervalues(self._positions))
# -- PNL 相关
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.realized_pnl + self.holding_pnl - self.transaction_cost
@property
def holding_pnl(self):
"""
[float] 浮动盈亏
"""
return sum(position.holding_pnl for position in six.itervalues(self._positions))
@property
def realized_pnl(self):
"""
[float] 平仓盈亏
"""
return sum(position.realized_pnl for position in six.itervalues(self._positions))
def _settlement(self, event):
total_value = self.total_value
for position in list(self._positions.values()):
order_book_id = position.order_book_id
if position.is_de_listed() and position.buy_quantity + position.sell_quantity != 0:
user_system_log.warn(
_(u"{order_book_id} is expired, close all positions by system").format(order_book_id=order_book_id))
del self._positions[order_book_id]
elif position.buy_quantity == 0 and position.sell_quantity == 0:
del self._positions[order_book_id]
else:
position.apply_settlement()
self._total_cash = total_value - self.margin - self.holding_pnl
self._transaction_cost = 0 # todo:修改settlement
# 如果 total_value <= 0 则认为已爆仓,清空仓位,资金归0
if total_value <= 0:
self._positions.clear()
self._total_cash = 0
self._backward_trade_set.clear()
def _on_bar(self, event):
for position in self._positions.values():
position.update_last_price()
def _on_tick(self, event):
for position in self._positions.values():
position.update_last_price()
def _on_order_pending_new(self, event):
if self != event.account:
return
self._frozen_cash += self._frozen_cash_of_order(event.order)
def _on_order_creation_reject(self, event):
if self != event.account:
return
self._frozen_cash -= self._frozen_cash_of_order(event.order)
def _on_order_unsolicited_update(self, event):
if self != event.account:
return
self._frozen_cash -= self._frozen_cash_of_order(event.order)
def _on_trade(self, event):
if self != event.account:
return
self._apply_trade(event.trade)
def _apply_trade(self, trade):
if trade.exec_id in self._backward_trade_set:
return
order_book_id = trade.order_book_id
position = self._positions.get_or_create(order_book_id)
delta_cash = position.apply_trade(trade)
self._transaction_cost += trade.transaction_cost
self._total_cash -= trade.transaction_cost
self._total_cash += delta_cash
self._frozen_cash -= self._frozen_cash_of_trade(trade)
self._backward_trade_set.add(trade.exec_id)
# ------------------------------------ Abandon Property ------------------------------------
@property
def daily_holding_pnl(self):
"""
[已弃用] 请使用 holding_pnl
"""
user_system_log.warn(_(u"[abandon] {} is no longer used.").format('future_account.daily_holding_pnl'))
return self.holding_pnl
@property
def daily_realized_pnl(self):
"""
[已弃用] 请使用 realized_pnl
"""
user_system_log.warn(_(u"[abandon] {} is no longer used.").format('future_account.daily_realized_pnl'))
return self.realized_pnl
|
py | 1a3ed488c611dfee1d83b9b214ec322c3b4531cf | # -*- coding: utf-8 -*-
import numpy as np
from .metrics import accuracy_score
class LogisticRegression(object):
def __init__(self):
"""初始化逻辑回归模型"""
self.coef_ = None
self.intercept_ = None
self._theta = None
def _sigmoid(self, t):
return 1. / (1. + np.exp(-t))
def fit(self, X_train, y_train, eta=0.01, n_iters=1e4):
"""根据训练数据集X_train,y_train,使用梯度下降法训练逻辑回归模型"""
assert X_train.shape[0] == y_train.shape[0], '训练集与结果集的样本数必须一致'
def J(theta, X_b, y):
"""定义损失函数"""
y_hat = self._sigmoid(X_b.dot(theta))
try:
return np.sum(np.dot(y, np.log(y_hat)) + np.dot((1 - y), np.log(1 - y_hat))) / -len(y)
except:
return float('inf')
def derivative_J(theta, X_b, y):
"""求逻辑回归的梯度"""
return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
"""梯度下降法求θ"""
theta = initial_theta
iters = 0
while iters < n_iters:
gradient = derivative_J(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
iters += 1
return theta
X_b = np.hstack((np.ones((len(X_train), 1)), X_train))
initial_theta = np.zeros(X_b.shape[1]) # 初始的θ向量都是0
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
self.intercept_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def predict_proba(self, X_predict):
"""给定待预测数据集X_predict,返回表示X_predict的结果概率向量"""
X_b = np.hstack([np.ones(shape=(X_predict.shape[0], 1)), X_predict])
return self._sigmoid(X_b.dot(self._theta))
def predict(self, X_predict):
proba = self.predict_proba(X_predict)
return np.array(proba >= .5, dtype=int) # 把True/False的向量转化为1,0的向量
def score(self, X_test, y_test):
y_predict = self.predict(X_test)
return accuracy_score(y_test, y_predict)
def __repr__(self):
return 'LogisticRegression()' |
py | 1a3ed4b6b0dad5fdd4c5cfade2b0d76c1ba9a86a | def build_poetry_assistant(words_to_phonemes):
# complete the function body (8 MARKS)
''' (dict of {str: list of str}) -> dict of {tuple of str: list of str}
Return a poetry assistant dictionary from the words to phonemes in
words_to_phonemes.
>>> word_to_phonemes = {'BEFORE': ['B', 'IH0', 'F', 'AO1', 'R'],'THE': ['DH', 'AH0'],
... 'A': ['AH0'],
... 'POEM': ['P', 'OW1', 'AH0', 'M'],
... 'OR': ['AO1', 'R']}
>>> actual = build_poetry_assistant(words_to_phonemes)
>>> expected = {('AH0',): ['THE', 'A'], ('AH0', M'): ['POEM'],
... ('AO1', 'R'): ['BEFORE', 'OR']}
>>> actual == expected
True
'''
rhyme_dict = {}
for word in words_to_phomemes:
# extract all phonemes for each key, as a tuple.
end = tuple(last_phonemes(words_to_phonemes[word]))
if end not in rhyme_dict:
# insert an entry of the tuple in the dictionary
rhyme_dict[end] = []
# if word's values contains the tuple, append the word to the list.
rhyme_dict[end].append(word)
return rhyme_dict
def last_phonemes(phoneme_list):
''' (list of str) -> list of str
Return the last vowel phoneme and any subequent consonant phoneme(s) from
phoneme_list, in the same order as they appear in phoneme_list.
>>> last_phonemes(['AE1', 'B', 'S', 'IH0', 'N', 'TH'])
['IH0', 'N', 'TH']
>>> last_phonemes(['IH0', 'N'])
['IH0', 'N']
'''
vowels = 'AEIOU'
last_phonemes_list = []
candidate_phoneme = ''
for i in range(len(phoneme_list)):
if phoneme_list[i][0] in vowels:
if phoneme_list[i] > candidate_phoneme:
candidate_phoneme = phoneme_list[i]
last_phonemes_list = phoneme_list[phoneme_list.index(candidate_phoneme):]
return last_phonemes_list
def find_rhymes(phonemes_to_words, word):
# complete the function body (4 MARKS)
''' (dict of {tuple of str: list of str}, str) -> list of str
Precondition: word.isalpha() and word.isupper() are True, and word appears
in exactly one value list in the phonemes_to_words dictionary
Return a list of all words in phonemes_to_words that rhyme with word.
Do not include word in the list.
>>> phonemes_to_words = {('AO1', 'R'): ['BEFORE', 'OR'],
... ('AH0', 'M'): ['POEM'], ('AH0',): ['THE', 'A']}
>>> find_rhymes(phonemes_to_words, 'OR')
['BEFORE']
>>> find_rhymes(phonemes_to_words, 'POEM')
[]
'''
rhymes = []
for phoneme in phonemes_to_words:
if word in phonemes_to_words[phoneme]:
for words in phonemse_to_words[phoneme]:
if words != word:
rhymes.append[words]
return rhymes
|
py | 1a3ed4ffc6bfde3b09c3d833c0b038c0b466b8ca | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import gc
import imp
import os
import re
import textwrap
import types
import numpy as np
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
tf = utils.fake_tf()
global_n = 2
class TestResource(object):
def __init__(self):
self.x = 3
class ApiTest(test.TestCase):
@test_util.run_deprecated_v1
def test_decorator_recursive(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_decorator_not_recursive(self):
class TestClass(object):
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_convert_then_do_not_convert(self):
class TestClass(object):
@api.do_not_convert
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant((2, 4)), constant_op.constant(1),
constant_op.constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
@test_util.run_deprecated_v1
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_decorator_preserves_argspec(self):
class TestClass(object):
def test_method(self, a):
if a < 0:
a = -a
return a
test_method_converted = api.convert()(test_method)
tc = TestClass()
self.assertListEqual(
list(tf_inspect.getfullargspec(tc.test_method)),
list(tf_inspect.getfullargspec(tc.test_method_converted)))
def test_do_not_convert_argspec(self):
class TestClass(object):
def test_method(self, x, y):
z = x + y
return z
test_method_whitelisted = api.do_not_convert(test_method)
tc = TestClass()
self.assertTrue(tf_inspect.ismethod(tc.test_method_whitelisted))
# Because the wrapped function is not generated, we can't preserve its
# arg spec.
self.assertEqual((),
tuple(function_utils.fn_args(tc.test_method_whitelisted)))
def test_do_not_convert_callable_object(self):
class TestClass(object):
def __call__(self):
return 1
tc = TestClass()
self.assertEqual(1, api.do_not_convert(tc)())
@test_util.run_deprecated_v1
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= api.converted_call(self.called_member,
converter.ConversionOptions(recursive=True),
(a,), {})
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, converter.ConversionOptions(recursive=True),
(3,), {})
self.assertEqual((0, 1, 2), tuple(x))
x = api.converted_call(re.compile,
converter.ConversionOptions(recursive=True),
('mnas_v4_a.*\\/.*(weights|kernel):0$',), {})
self.assertIsNotNone(x.match('mnas_v4_a/weights:0'))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
x = api.converted_call(test_fn, converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
self.assertEqual(1, self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
x = api.converted_call(
functools.partial(test_fn, constant_op.constant(-1), z=-3),
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
functools.partial(
functools.partial(test_fn, constant_op.constant(-1)), z=-3),
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_synthetic_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_function(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
test_method = types.MethodType(test_function, tc)
x = api.converted_call(test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_wrapper(self):
class TestClass(object):
def foo(self):
pass
tc = TestClass()
# `method.__get__()` returns a so-called method-wrapper.
wrapper = api.converted_call(tc.foo.__get__,
converter.ConversionOptions(recursive=True),
(tc,), {})
self.assertEqual(wrapper, tc.foo)
def test_converted_call_method_as_object_attribute(self):
class AnotherClass(object):
def __init__(self):
self.another_class_attr = constant_op.constant(1)
def method(self):
if self.another_class_attr > 0:
return self.another_class_attr + 1
return self.another_class_attr + 10
class TestClass(object):
def __init__(self, another_obj_method):
self.another_obj_method = another_obj_method
obj = AnotherClass()
tc = TestClass(obj.method)
x = api.converted_call(tc.another_obj_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(self.evaluate(x), 2)
def test_converted_call_method_converts_recursively(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def other_method(self):
if self.x < 0:
return -self.x
return self.x
def test_method(self):
return self.other_method()
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(TestClass.test_method,
converter.ConversionOptions(recursive=True), (tc,),
{})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc, converter.ConversionOptions(recursive=True), (),
{})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_metaclass(self):
class TestMetaclass(type):
x = constant_op.constant(-1)
def __call__(cls):
if cls.x < 0:
cls.x = -cls.x
return cls
tc = TestMetaclass('TestClass', (), {})
# This functools.partial will hide the class form the constructor
# check. Not ideal. See b/120224672.
tc = functools.partial(tc)
converted_tc = api.converted_call(
tc, converter.ConversionOptions(recursive=True), (), {})
self.assertIsInstance(converted_tc, TestMetaclass)
self.assertEqual(1, self.evaluate(converted_tc.x))
@test_util.run_deprecated_v1
def test_converted_call_constructor(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = api.converted_call(TestClass,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
# tc is still a TestClass - constructors are whitelisted.
# TODO(b/124016764): Support this use case.
# The error below is specific to the `if` statement not being converted.
with self.assertRaises(TypeError):
tc.test_method()
def test_converted_call_mangled_properties(self):
class TestClass(object):
def __init__(self, x):
self.__private = x
def test_method(self):
if self.__private < 0:
return self.__private
return self.__private
tc = TestClass(constant_op.constant(-1))
# The error below is specific to the `if` statement not being converted.
with self.assertRaisesRegex(NotImplementedError, 'Mangled names'):
api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
tc.test_method()
def test_converted_call_already_converted(self):
def f(x):
return x == 0
x = api.converted_call(f, converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f,
converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
def test_converted_call_then_already_converted_dynamic(self):
@api.convert()
def g(x):
if x > 0:
return x
else:
return -x
def f(g, x):
return g(x)
x = api.converted_call(f, converter.ConversionOptions(recursive=True),
(g, constant_op.constant(1)), {})
self.assertEqual(self.evaluate(x), 1)
def test_converted_call_forced_when_explicitly_whitelisted(self):
@api.do_not_convert()
def f(x):
return x + 1
x = api.converted_call(
f, converter.ConversionOptions(recursive=True, user_requested=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f,
converter.ConversionOptions(recursive=True), (0,),
{})
self.assertEqual(x, 1)
@test_util.run_deprecated_v1
def test_converted_call_no_user_code(self):
def f(x):
return len(x)
opts = converter.ConversionOptions(internal_convert_user_code=False)
# f should not be converted, causing len to error out.
with self.assertRaisesRegexp(Exception, 'len is not well defined'):
api.converted_call(f, opts, (constant_op.constant([0]),), {})
# len on the other hand should work fine.
x = api.converted_call(len, opts, (constant_op.constant([0]),), {})
# The constant has static shape so the result is a primitive not a Tensor.
self.assertEqual(x, 1)
def test_converted_call_no_kwargs_allowed(self):
def f(*args):
# Note: np.broadcast rejects any **kwargs, even *{}
return np.broadcast(args[:1])
opts = converter.ConversionOptions(internal_convert_user_code=False)
self.assertIsNotNone(api.converted_call(f, opts, (1, 2, 3, 4), None))
def test_converted_call_whitelisted_method(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([core.Dense(2)])
x = api.converted_call(model.call, opts, (constant_op.constant([[0.0]]),),
{'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_whitelisted_method_via_owner(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([core.Dense(2)])
x = api.converted_call(model.call, opts, (constant_op.constant([[0.0]]),),
{'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_numpy(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(np.arange, opts, (5,), {})
self.assertAllEqual(x, list(range(5)))
def test_converted_call_tf_op_forced(self):
# TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS.
opts = converter.ConversionOptions(
user_requested=True, optional_features=None)
x = api.converted_call(gen_math_ops.add, opts, (1, 1), {})
self.assertAllEqual(self.evaluate(x), 2)
def test_converted_call_exec_generated_code(self):
temp_mod = imp.new_module('test_module')
dynamic_code = """
def foo(x):
return x + 1
"""
exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used
opts = converter.ConversionOptions(optional_features=None)
x = api.converted_call(temp_mod.foo, opts, (1,), {})
self.assertAllEqual(x, 2)
def test_converted_call_namedtuple(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(collections.namedtuple, opts,
('TestNamedtuple', ('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_via_collections(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(collections.namedtuple, opts,
('TestNamedtuple', ('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_subclass_bound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while tf.reduce_sum(x) > self.a:
x //= self.b
return x
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
x = api.converted_call(obj.test_method, opts,
(constant_op.constant([2, 4]),), {})
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_namedtuple_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
pass
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
# _asdict is a documented method of namedtuple.
x = api.converted_call(obj._asdict, opts, (), {})
self.assertDictEqual(x, {'a': 5, 'b': 2})
def test_converted_call_namedtuple_subclass_unbound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while tf.reduce_sum(x) > self.a:
x //= self.b
return x
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
x = api.converted_call(TestClass.test_method, opts,
(obj, constant_op.constant([2, 4])), {})
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_lambda(self):
opts = converter.ConversionOptions(recursive=True)
l = lambda x: x == 0
x = api.converted_call(l, opts, (constant_op.constant(0),), {})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(True, self.evaluate(x))
def test_converted_call_defun_object_method(self):
opts = converter.ConversionOptions(recursive=True)
# pylint:disable=method-hidden
class TestClass(object):
def method(self):
return 1
def prepare(self):
self.method = function.defun(self.method)
# pylint:enable=method-hidden
tc = TestClass()
tc.prepare()
x = api.converted_call(tc.method, opts, (), {})
self.assertAllEqual(1, self.evaluate(x))
def test_converted_call_through_tf_dataset(self):
def other_fn(x):
if x > 0:
return x
return -x
def f():
return dataset_ops.Dataset.range(-3, 3).map(other_fn)
# Dataset iteration only works inside tf.
@def_function.function
def graph_fn():
opts = converter.ConversionOptions(recursive=True)
ds = api.converted_call(f, opts, (), {})
itr = iter(ds)
return next(itr), next(itr), next(itr)
self.assertAllEqual(self.evaluate(graph_fn()), (3, 2, 1))
def assertNoMemoryLeaks(self, f):
object_ids_before = {id(o) for o in gc.get_objects()}
f()
gc.collect()
objects_after = tuple(
o for o in gc.get_objects() if id(o) not in object_ids_before)
self.assertEmpty(
tuple(o for o in objects_after if isinstance(o, TestResource)))
def test_converted_call_no_leaks_via_closure(self):
def test_fn():
res = TestResource()
def f(y):
return res.x + y
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, opts, (1,), {})
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_leaks_via_inner_function_closure(self):
def test_fn():
res = TestResource()
def f(y):
def inner_f():
return res.x + y
return inner_f
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, opts, (1,), {})()
self.assertNoMemoryLeaks(test_fn)
def test_context_tracking_direct_calls(self):
@api.do_not_convert()
def unconverted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.DISABLED)
@api.convert()
def converted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
unconverted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
converted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
@api.call_with_unspecified_conversion_status
def unspecified_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
unspecified_fn()
def test_to_graph_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with tf.Graph().as_default():
x = compiled_fn(constant_op.constant((4, 8)), 4)
self.assertAllEqual(self.evaluate(x), (1, 2))
@test_util.run_deprecated_v1
def test_to_graph_with_defaults(self):
foo = 4
def test_fn(x, s=foo):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with self.cached_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]))
self.assertListEqual([1, 2], self.evaluate(x).tolist())
def test_to_graph_with_globals(self):
def test_fn(x):
global global_n
global_n = x + global_n
return global_n
converted_fn = api.to_graph(test_fn)
prev_val = global_n
converted_fn(10)
self.assertGreater(global_n, prev_val)
def test_to_graph_with_kwargs_clashing_converted_call(self):
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match converted_call's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_with_kwargs_clashing_unconverted_call(self):
@api.do_not_convert
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match _call_unconverted's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_caching(self):
def test_fn(x):
if x > 0:
return x
else:
return -x
converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1))
# All outputs are from the same module. We can't use __module__ because
# that's reset when we instantiate the function (see conversion.py).
# TODO(mdan): Can and should we overwrite __module__ instead?
module_names = frozenset(f.ag_module for f in converted_functions)
self.assertEqual(len(module_names), 1)
self.assertNotIn('__main__', module_names)
self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3)
def test_to_graph_caching_different_options(self):
def called_fn():
pass
def test_fn():
return called_fn()
converted_recursive = api.to_graph(test_fn, recursive=True)
converted_non_recursive = api.to_graph(test_fn, recursive=False)
self.assertNotEqual(converted_recursive.ag_module,
converted_non_recursive.ag_module)
self.assertRegex(tf_inspect.getsource(converted_recursive),
'FunctionScope(.*recursive=True.*)')
self.assertRegex(tf_inspect.getsource(converted_non_recursive),
'FunctionScope(.*recursive=False.*)')
def test_to_graph_preserves_bindings(self):
y = 3
def test_fn():
return y
converted = api.to_graph(test_fn)
self.assertEqual(converted(), 3)
y = 7
self.assertEqual(converted(), 7)
def test_to_graph_source_map(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
def test_to_graph_sets_conversion_context(self):
def g():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
return 0
# Note: the autograph=False sets the contect to Status.DISABLED. The test
# verifies that to_graph overrides that.
@def_function.function(autograph=False)
def f():
converted_g = api.to_graph(g)
converted_g()
f()
def test_to_code_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
# Just check that the output is parseable Python code.
self.assertIsNotNone(parser.parse_str(api.to_code(test_fn)))
def test_to_code_with_wrapped_function(self):
@def_function.function
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
with self.assertRaisesRegex(Exception, 'try passing.*python_function'):
api.to_code(test_fn)
def test_tf_convert_direct(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
# Note: the autograph setting of tf.function has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_tf_convert_unspecified_not_converted_by_default(self):
def f():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
@def_function.function
def test_fn(ctx):
return api.tf_convert(f, ctx, convert_by_default=False)()
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED))
def test_tf_convert_whitelisted_method(self):
model = sequential.Sequential([core.Dense(2)])
converted_call = api.tf_convert(
model.call, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
_, converted_target = tf_decorator.unwrap(converted_call)
self.assertIs(converted_target.__func__, model.call.__func__)
def test_tf_convert_wrapped(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
@functools.wraps(f)
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
decorated_f = tf_decorator.make_decorator(f, wrapper)
# Note: the autograph setting of tf has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(decorated_f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
# tf_convert mutates the decorator, so we need to create a new one for
# another test.
decorated_f = tf_decorator.make_decorator(f, wrapper)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_super_with_one_arg(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def one_arg(self, x):
test_base_unbound = super(TestSubclass)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_three(x)
tc = api.converted_call(TestSubclass,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(5, tc.one_arg(2))
def test_super_with_two_args(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def two_args(self, x):
return super(TestSubclass, self).plus_three(x)
tc = api.converted_call(TestSubclass,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(5, tc.two_args(2))
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
|
py | 1a3ed6b739786eeca92f4907406029a44b750bbb | import datetime
import functools
import json
import operator
import re
import requests
from django.conf import settings
from django.contrib import auth
from django.core import signing
from django.db import transaction
from django.db.models import Q, F
from django.http import Http404, HttpResponseForbidden, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from password_reset.views import Recover
from tagging.models import Tag, TaggedItem
from tagging.utils import calculate_cloud, get_tag
from . import utils
from .constants import (MACHINETAGS_FROM_FIELDS, IMPROVIDERS_DICT,
SERVICES_DICT)
from .forms import (SkillsForm, SignupForm, PortfolioForm, BioForm,
LocationForm, FindingForm, AccountForm, PasswordForm,
DeletionRequestForm, AccountDeletionForm)
from .models import DjangoPerson, Country, User, Region, PortfolioSite
from ..django_openidauth.models import associate_openid, UserOpenID
from ..machinetags.utils import tagdict
from ..machinetags.models import MachineTaggedItem
NOTALPHA_RE = re.compile('[^a-zA-Z0-9]')
@utils.simple_decorator
def must_be_owner(view):
def inner(request, *args, **kwargs):
if 'username' in kwargs:
if (not request.user or request.user.is_anonymous or
request.user.username != kwargs['username']):
return HttpResponseForbidden('Not allowed')
else:
if (
not request.user or
request.user.is_anonymous or
request.user.username != args[0]
):
return HttpResponseForbidden('Not allowed')
return view(request, *args, **kwargs)
return inner
class IndexView(generic.TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
people = DjangoPerson.objects.all().select_related()
people = people.order_by('-id')[:100]
ctx = super().get_context_data(**kwargs)
ctx.update({
'people_list': people,
'people_list_limited': people[:4],
'total_people': DjangoPerson.objects.count(),
'countries': Country.objects.top_countries(),
'home': True,
})
return ctx
index = IndexView.as_view()
class AboutView(generic.TemplateView):
template_name = 'about.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'total_people': DjangoPerson.objects.count(),
'countries': Country.objects.top_countries(),
})
return ctx
about = AboutView.as_view()
class RecentView(generic.TemplateView):
template_name = 'recent.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
people = DjangoPerson.objects.all().select_related()
ctx.update({
'people': people.order_by('-auth_user.date_joined')[:50],
})
return ctx
recent = RecentView.as_view()
def redirect_to_logged_in_user_profile(request):
if request.user.is_authenticated:
url = reverse('user_profile', kwargs={'username': request.user})
else:
url = reverse('index')
return redirect(url)
def logout(request):
auth.logout(request)
request.session['openids'] = []
return redirect(reverse('index'))
class RecoverView(Recover):
search_fields = ['username']
recover = RecoverView.as_view()
class OpenIDWhatNext(generic.RedirectView):
"""
If user is already logged in, send them to /openid/associations/
Otherwise, send them to the signup page
"""
permanent = False
def get_redirect_url(self):
if not self.request.openid:
return reverse('index')
if self.request.user.is_anonymous:
# Have they logged in with an OpenID that matches an account?
try:
user_openid = UserOpenID.objects.get(
openid=str(self.request.openid),
)
except UserOpenID.DoesNotExist:
return reverse('signup')
# Log the user in
user = user_openid.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(self.request, user)
return reverse('user_profile', args=[user.username])
return reverse('openid_associations')
openid_whatnext = OpenIDWhatNext.as_view()
class SignupView(generic.FormView):
form_class = SignupForm
template_name = 'signup.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return redirect(reverse('index'))
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
creation_args = {
'username': form.cleaned_data['username'],
'email': form.cleaned_data['email'],
}
user = User.objects.create(**creation_args)
if form.cleaned_data.get('password1'):
user.set_password(form.cleaned_data['password1'])
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.save()
if self.request.openid:
associate_openid(user, str(self.request.openid))
region = None
if form.cleaned_data['region']:
region = Region.objects.get(
country__iso_code=form.cleaned_data['country'],
code=form.cleaned_data['region'],
)
# Now create the DjangoPerson
person = DjangoPerson.objects.create(
user=user,
bio=form.cleaned_data['bio'],
country=Country.objects.get(
iso_code=form.cleaned_data['country'],
),
region=region,
latitude=form.cleaned_data['latitude'],
longitude=form.cleaned_data['longitude'],
location_description=form.cleaned_data['location_description'],
)
# Set up the various machine tags
for fieldname, (namespace,
predicate) in MACHINETAGS_FROM_FIELDS.items():
if (
fieldname in form.cleaned_data and
form.cleaned_data[fieldname].strip()
):
value = form.cleaned_data[fieldname].strip()
person.add_machinetag(namespace, predicate, value)
# Finally, set their skill tags
person.skilltags = form.cleaned_data['skilltags']
# Log them in and redirect to their profile page
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(self.request, user)
self.person = person
return super().form_valid(form)
def get_success_url(self):
return self.person.get_absolute_url()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.openid:
kwargs['openid'] = self.request.openid
return kwargs
def get_initial(self):
initial = super().get_initial()
if self.request.openid and self.request.openid.sreg:
sreg = self.request.openid.sreg
first_name = ''
last_name = ''
username = ''
if sreg.get('fullname'):
bits = sreg['fullname'].split()
first_name = bits[0]
if len(bits) > 1:
last_name = ' '.join(bits[1:])
# Find a not-taken username
if sreg.get('nickname'):
username = derive_username(sreg['nickname'])
initial.update({
'first_name': first_name,
'last_name': last_name,
'email': sreg.get('email', ''),
'username': username,
})
return initial
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'openid': self.request.openid,
})
return ctx
signup = SignupView.as_view()
signup = transaction.atomic(signup)
def derive_username(nickname):
nickname = NOTALPHA_RE.sub('', nickname)
if not nickname:
return ''
base_nickname = nickname
to_add = 1
while True:
try:
DjangoPerson.objects.get(user__username=nickname)
except DjangoPerson.DoesNotExist:
break
nickname = base_nickname + str(to_add)
to_add += 1
return nickname
class CleverPaginator(object):
"""
A paginator that triggers pagination only if the 2nd page is
worth displaying.
"""
paginate_by = 100
def get_count(self):
raise NotImplementedError
def get_paginate_by(self, queryset):
count = self.get_count()
if count > self.paginate_by * 1.5:
return self.paginate_by
return count
class CountryView(CleverPaginator, generic.ListView):
template_name = 'country.html'
context_object_name = 'people_list'
def get_queryset(self):
self.country = get_object_or_404(
Country,
iso_code=self.kwargs['country_code'].upper()
)
self.all_people = self.country.djangoperson_set.select_related(
'country', 'user'
).order_by('user__first_name', 'user__last_name')
return self.all_people
def get_count(self):
return self.country.num_people
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'regions': self.country.top_regions(),
'country': self.country,
'people_list': self.all_people,
})
return context
country = CountryView.as_view()
class RegionView(CleverPaginator, generic.ListView):
template_name = 'country.html'
def get_queryset(self):
self.region = get_object_or_404(
Region,
country__iso_code=self.kwargs['country_code'].upper(),
code=self.kwargs['region_code'].upper(),
)
self.all_people = self.region.djangoperson_set.select_related(
'user', 'country',
).order_by('user__first_name', 'user__last_name')
return self.all_people
def get_count(self):
return self.region.num_people
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.region,
'people_list': self.all_people,
})
return context
region = RegionView.as_view()
class CountrySitesView(generic.ListView):
context_object_name = 'sites'
template_name = 'country_sites.html'
def get_queryset(self):
self.country = get_object_or_404(
Country, iso_code=self.kwargs['country_code'].upper(),
)
return PortfolioSite.objects.select_related().filter(
contributor__country=self.country,
).order_by('contributor')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.country,
})
return context
country_sites = CountrySitesView.as_view()
class ProfileView(generic.DetailView):
context_object_name = 'person'
template_name = 'profile.html'
def get_object(self):
person = get_object_or_404(DjangoPerson,
user__username=self.kwargs['username'])
DjangoPerson.objects.filter(pk=person.pk).update(
profile_views=F('profile_views') + 1,
)
return person
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
mtags = tagdict(self.object.machinetags.all())
# Set up convenient iterables for IM and services
ims = []
for key, value in mtags.get('im', {}).items():
shortname, name, icon = IMPROVIDERS_DICT.get(key, ('', '', ''))
if not shortname:
continue # Bad machinetag
ims.append({
'shortname': shortname,
'name': name,
'value': value,
})
ims.sort(key=lambda x: x['shortname'])
services = []
for key, value in mtags.get('services', {}).items():
shortname, name, icon = SERVICES_DICT.get(key, ('', '', ''))
if not shortname:
continue # Bad machinetag
services.append({
'shortname': shortname,
'name': name,
'value': value,
})
services.sort(key=lambda x: x['shortname'])
# Set up vars that control privacy stuff
privacy = {
'show_im': (
mtags['privacy']['im'] == 'public' or
not self.request.user.is_anonymous
),
'show_email': (
mtags['privacy']['email'] == 'public' or
(not self.request.user.is_anonymous and
mtags['privacy']['email'] == 'private')
),
'hide_from_search': mtags['privacy']['search'] != 'public',
'show_last_irc_activity': bool(self.object.last_active_on_irc and
self.object.irc_tracking_allowed()),
}
# Should we show the 'Finding X' section at all?
show_finding = (services or privacy['show_email'] or
(privacy['show_im'] and ims))
context.update({
'is_owner': self.request.user.username == self.kwargs['username'],
'skills_form': SkillsForm(instance=self.object),
'mtags': mtags,
'ims': ims,
'services': services,
'privacy': privacy,
'show_finding': show_finding,
'people_list': self.object.get_nearest(),
})
return context
profile = ProfileView.as_view()
class DjangoPersonEditViewBase(generic.UpdateView):
def get_object(self):
return get_object_or_404(DjangoPerson,
user__username=self.kwargs['username'])
def get_success_url(self):
return reverse('user_profile', args=[self.kwargs['username']])
class EditFindingView(DjangoPersonEditViewBase):
form_class = FindingForm
template_name = 'edit_finding.html'
def get_initial(self):
mtags = tagdict(self.object.machinetags.all())
initial = {
'email': self.object.user.email,
'first_name': self.object.user.first_name,
'last_name': self.object.user.last_name,
}
# Fill in other initial fields from machinetags
for fieldname, (namespace, predicate) in \
MACHINETAGS_FROM_FIELDS.items():
initial[fieldname] = mtags[namespace][predicate]
return initial
edit_finding = must_be_owner(EditFindingView.as_view())
class EditPortfolioView(DjangoPersonEditViewBase):
form_class = PortfolioForm
template_name = 'edit_portfolio.html'
edit_portfolio = must_be_owner(EditPortfolioView.as_view())
class EditAccountView(DjangoPersonEditViewBase):
form_class = AccountForm
template_name = 'edit_account.html'
edit_account = must_be_owner(EditAccountView.as_view())
class EditSkillsView(DjangoPersonEditViewBase):
form_class = SkillsForm
template_name = 'edit_skills.html'
edit_skills = must_be_owner(EditSkillsView.as_view())
class EditPassword(generic.UpdateView):
form_class = PasswordForm
template_name = 'edit_password.html'
def get_object(self):
return get_object_or_404(User, username=self.kwargs['username'])
def get_success_url(self):
return reverse('user_profile', args=[self.kwargs['username']])
edit_password = must_be_owner(EditPassword.as_view())
class EditBioView(DjangoPersonEditViewBase):
form_class = BioForm
template_name = 'edit_bio.html'
edit_bio = must_be_owner(EditBioView.as_view())
class EditLocationView(DjangoPersonEditViewBase):
form_class = LocationForm
template_name = 'edit_location.html'
def get_initial(self):
initial = super().get_initial()
initial.update({
'country': self.object.country.iso_code,
})
return initial
edit_location = must_be_owner(EditLocationView.as_view())
class SkillCloudView(generic.TemplateView):
template_name = 'skills.html'
def get_context_data(self, **kwargs):
tags = DjangoPerson.skilltags.cloud(steps=5)
calculate_cloud(tags, 5)
context = super().get_context_data(**kwargs)
context.update({
'tags': tags,
})
return context
skill_cloud = SkillCloudView.as_view()
class CountrySkillCloudView(generic.DetailView):
context_object_name = 'country'
template_name = 'skills.html'
def get_object(self):
return get_object_or_404(Country,
iso_code=self.kwargs['country_code'].upper())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tags = Tag.objects.cloud_for_model(DjangoPerson, steps=5, filters={
'country': self.object,
})
calculate_cloud(tags, 5)
context.update({
'tags': tags,
})
return context
country_skill_cloud = CountrySkillCloudView.as_view()
class TaggedObjectList(generic.ListView):
related_tags = False
related_tag_counts = True
select_related = False
def get_queryset(self):
self.tag_instance = get_tag(self.kwargs['tag'])
if self.tag_instance is None:
raise Http404(
_('No Tag found matching "%s".') % self.kwargs['tag']
)
queryset = TaggedItem.objects.get_by_model(self.model,
self.tag_instance)
if self.select_related:
queryset = queryset.select_related(*self.select_related)
filter_args = self.get_extra_filter_args()
if filter_args:
queryset = queryset.filter(**filter_args)
return queryset
def get_extra_filter_args(self):
return {}
def get_context_data(self, **kwargs):
kwargs.update({
'tag': self.kwargs['tag'],
})
if self.related_tags:
kwargs['related_tags'] = Tag.objects.related_for_model(
self.tag_instance,
self.model,
counts=self.related_tag_counts
)
ctx = super().get_context_data(**kwargs)
return ctx
class Skill(TaggedObjectList):
model = DjangoPerson
related_tags = True
template_name = 'skill.html'
context_object_name = 'people_list'
select_related = ['user', 'country']
skill = Skill.as_view()
class CountrySkill(TaggedObjectList):
model = DjangoPerson
related_tags = True
template_name = 'skill.html'
context_object_name = 'people_list'
def get_context_data(self, **kwargs):
kwargs['country'] = Country.objects.get(
iso_code=self.kwargs['country_code'].upper()
)
return super().get_context_data(**kwargs)
def get_extra_filter_args(self):
filters = super().get_extra_filter_args()
filters['country__iso_code'] = self.kwargs['country_code'].upper()
return filters
country_skill = CountrySkill.as_view()
class CountryLookingForView(generic.ListView):
context_object_name = 'people'
template_name = 'country_looking_for.html'
def get_queryset(self):
self.country = get_object_or_404(
Country, iso_code=self.kwargs['country_code'].upper(),
)
ids = [
o['object_id'] for o in MachineTaggedItem.objects.filter(
namespace='profile',
predicate='looking_for_work',
value=self.kwargs['looking_for'],
).values('object_id')
]
return DjangoPerson.objects.filter(country=self.country, id__in=ids)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'country': self.country,
'looking_for': self.kwargs['looking_for'],
})
return context
country_looking_for = CountryLookingForView.as_view()
class SearchView(generic.ListView):
context_object_name = 'people_list'
template_name = 'search.html'
def get_queryset(self):
self.q = self.request.GET.get('q', '')
self.has_badwords = [
w.strip() for w in self.q.split() if len(w.strip()) in (1, 2)
]
if self.q:
return self.search_people()
return []
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'q': self.q,
'has_badwords': self.has_badwords
})
return context
def search_people(self):
words = [w.strip() for w in self.q.split() if len(w.strip()) > 2]
if not words:
return []
terms = []
for word in words:
terms.append(Q(
user__username__icontains=word) |
Q(user__first_name__icontains=word) |
Q(user__last_name__icontains=word)
)
combined = functools.reduce(operator.and_, terms)
return DjangoPerson.objects.filter(
combined,
).select_related().distinct()
search = SearchView.as_view()
class IRCActiveView(generic.ListView):
context_object_name = 'people_list'
template_name = 'irc_active.html'
def get_queryset(self):
results = DjangoPerson.objects.filter(
last_active_on_irc__gt=(timezone.now() -
datetime.timedelta(hours=1))
).order_by('-last_active_on_irc')
# Filter out the people who don't want to be tracked (inefficient)
return [r for r in results if r.irc_tracking_allowed()]
irc_active = IRCActiveView.as_view()
class RequestFormMixin(object):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class DeletionRequestView(RequestFormMixin, generic.FormView):
form_class = DeletionRequestForm
template_name = 'delete_account_request.html'
def form_valid(self, form):
form.save()
return redirect(reverse('delete_account_next',
args=[self.request.user.username]))
delete_account_request = must_be_owner(DeletionRequestView.as_view())
class DeletionNext(generic.TemplateView):
template_name = 'delete_account_next.html'
delete_account_next = must_be_owner(DeletionNext.as_view())
class AccountDeletionView(RequestFormMixin, generic.FormView):
form_class = AccountDeletionForm
template_name = 'delete_account.html'
def dispatch(self, request, *args, **kwargs):
try:
self.key = signing.loads(kwargs['key'], max_age=3600,
salt='delete_account')
except signing.SignatureExpired:
return redirect(reverse('delete_account_request',
args=[request.user.username]))
except signing.BadSignature:
raise Http404
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.save()
return redirect(reverse('delete_account_done',
args=[self.request.user.username]))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['key'] = self.kwargs['key']
return ctx
delete_account = must_be_owner(AccountDeletionView.as_view())
class DeletionDone(generic.TemplateView):
template_name = 'delete_account_done.html'
def dispatch(self, request, *args, **kwargs):
if User.objects.filter(username=kwargs['username']).exists():
raise Http404
return super().dispatch(request, *args, **kwargs)
delete_account_done = DeletionDone.as_view()
def geonames(request):
params = dict(request.GET)
params['username'] = settings.GEONAMES_USERNAME
response = requests.get('http://ws.geonames.org/findNearbyPlaceNameJSON',
params=params)
return HttpResponse(json.dumps(response.json()),
content_type='application/json')
|
py | 1a3ed72e4e17afa3fb62c9b3cc5cffeb11c21e51 | import game, server, menu_utils, df_utils, items
from srabuilder import rules
import functools
import dragonfly as df
wrapper = menu_utils.InventoryMenuWrapper()
async def get_shipping_menu():
menu = await menu_utils.get_active_menu(menu_type='itemsToGrabMenu')
if not menu['shippingBin']:
raise menu_utils.InvalidMenuOption()
return menu
async def focus_item(new_row, new_col):
menu = await get_shipping_menu()
submenu = menu['inventoryMenu']
await wrapper.focus_box(submenu, new_row, new_col)
mapping = {
"item <positive_index>": df_utils.async_action(focus_item, None, 'positive_index'),
"row <positive_index>": df_utils.async_action(focus_item, 'positive_index', None),
"ok": df_utils.async_action(menu_utils.click_menu_button, 'okButton', get_shipping_menu),
"undo": df_utils.async_action(menu_utils.click_menu_button, 'lastShippedHolder', get_shipping_menu),
}
@menu_utils.valid_menu_test
def is_active():
menu = game.get_context_menu('itemsToGrabMenu')
return menu['shippingBin']
def load_grammar():
grammar = df.Grammar("shipping_bin_menu")
main_rule = df.MappingRule(
name="shipping_bin_menu_rule",
mapping=mapping,
extras=[rules.num, df_utils.positive_index],
context=is_active
)
grammar.add_rule(main_rule)
grammar.load() |
py | 1a3ed76d0f25035671cd0db6916c2280410a7c9c | import pytest
from django.urls import resolve, reverse
from pinterest.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
py | 1a3ed8296e337ea9c6dcb4ff2fe92990c1606359 | """SIGMET"""
# stdlib
from collections import defaultdict
# 3rd Party
import pytest
# this
from pyiem.exceptions import SIGMETException
from pyiem.nws.products.sigmet import parser, compute_esol
from pyiem.util import utc, get_test_file
def mydict():
"""return dict."""
return dict(lon=-85.50, lat=42.79)
NWSLI_PROVIDER = defaultdict(mydict)
def test_opairs():
"""Test that exception is raised."""
utcnow = utc(2021, 1, 9, 7, 58)
with pytest.raises(SIGMETException):
parser(
get_test_file("SIGMETS/SIGAK3.txt"),
utcnow,
nwsli_provider=NWSLI_PROVIDER,
)
def test_190503_badgeom():
"""This SIGMET produced a traceback in prod."""
utcnow = utc(2019, 5, 3, 18, 25)
tp = parser(
get_test_file("SIGMETS/SIGC_badgeom.txt"),
utcnow,
nwsli_provider=NWSLI_PROVIDER,
)
assert len(tp.sigmets) == 4
def test_170815_pywwa_issue3():
"""This example was in pyWWA issues list, so lets test here"""
utcnow = utc(2015, 9, 30, 16, 56)
tp = parser(
get_test_file("SIGMETS/SIGE.txt"),
utcnow,
nwsli_provider=NWSLI_PROVIDER,
)
assert len(tp.sigmets) == 4
def test_150930_sigak2():
"""Got an error with this product"""
utcnow = utc(2015, 9, 30, 16, 56)
tp = parser(get_test_file("SIGMETS/SIGAK2.txt"), utcnow)
assert not tp.sigmets
def test_150921_sigpas():
"""Got an error with this product"""
utcnow = utc(2015, 9, 21, 10, 57)
tp = parser(get_test_file("SIGMETS/SIGPAS.txt"), utcnow)
assert len(tp.sigmets) == 1
def test_150917_cancel():
"""Don't error out on a CANCELs SIGMET"""
utcnow = utc(2015, 9, 17, 0, 0)
tp = parser(get_test_file("SIGMETS/SIGPAP_cancel.txt"), utcnow)
assert not tp.sigmets
def test_compute_esol():
"""Test our algo on either side of a line"""
pts = [[0, 0], [5, 0]]
pts = compute_esol(pts, 111)
print(pts)
assert abs(pts[0][0] - 0.00) < 0.01
assert abs(pts[0][1] - 1.00) < 0.01
assert abs(pts[1][0] - 5.00) < 0.01
assert abs(pts[1][1] - 1.00) < 0.01
assert abs(pts[2][0] - 5.00) < 0.01
assert abs(pts[2][1] - -1.00) < 0.01
assert abs(pts[3][0] - 0.00) < 0.01
assert abs(pts[3][1] - -1.00) < 0.01
assert abs(pts[4][0] - 0.00) < 0.01
assert abs(pts[4][1] - 1.00) < 0.01
def test_150915_line():
"""See about parsing a SIGMET LINE"""
utcnow = utc(2015, 9, 15, 2, 55)
ugc_provider = {}
nwsli_provider = {
"MSP": dict(lon=-83.39, lat=44.45),
"MCW": dict(lon=-85.50, lat=42.79),
}
tp = parser(
get_test_file("SIGMETS/SIGC_line.txt"),
utcnow,
ugc_provider,
nwsli_provider,
)
assert abs(tp.sigmets[0].geom.area - 0.47) < 0.01
def test_150915_isol():
"""See about parsing a SIGMET ISOL"""
utcnow = utc(2015, 9, 12, 23, 55)
ugc_provider = {}
nwsli_provider = {
"FTI": dict(lon=-83.39, lat=44.45),
"CME": dict(lon=-85.50, lat=42.79),
}
tp = parser(
get_test_file("SIGMETS/SIGC_ISOL.txt"),
utcnow,
ugc_provider,
nwsli_provider,
)
assert abs(tp.sigmets[0].geom.area - 0.30) < 0.01
assert abs(tp.sigmets[1].geom.area - 0.30) < 0.01
def test_150915_nospace():
"""See about parsing a SIGMET that has no spaces"""
utcnow = utc(2015, 9, 15, 15, 41)
tp = parser(get_test_file("SIGMETS/SIGAX.txt"), utcnow)
assert abs(tp.sigmets[0].geom.area - 23.47) < 0.01
def test_140907_circle():
"""See about parsing a SIGMET that is circle?"""
utcnow = utc(2014, 9, 6, 22, 15)
tp = parser(get_test_file("SIGMETS/SIGP0H.txt"), utcnow)
assert abs(tp.sigmets[0].geom.area - 11.70) < 0.01
def test_140813_line():
"""See about parsing a SIGMET that is a either side of line"""
utcnow = utc(2014, 8, 12, 13, 15)
tp = parser(get_test_file("SIGMETS/SIGP0A_line.txt"), utcnow)
assert abs(tp.sigmets[0].geom.area - 4.32) < 0.01
def test_140815_cancel():
"""See about parsing a SIGMET that is a either side of line"""
utcnow = utc(2014, 8, 15, 23, 41)
tp = parser(get_test_file("SIGMETS/SIG_cancel.txt"), utcnow)
assert not tp.sigmets
def test_sigaoa():
"""SIGAOA"""
utcnow = utc(2014, 8, 11, 19, 15)
tp = parser(get_test_file("SIGMETS/SIGA0A.txt"), utcnow)
assert abs(tp.sigmets[0].geom.area - 24.35) < 0.01
def test_sigaob():
"""See about parsing 50E properly"""
utcnow = utc(2014, 8, 11, 19, 15)
tp = parser(get_test_file("SIGMETS/SIGA0B.txt"), utcnow)
assert not tp.sigmets
@pytest.mark.parametrize("database", ["postgis"])
def test_50e(dbcursor):
"""See about parsing 50E properly"""
utcnow = utc(2014, 8, 11, 18, 55)
ugc_provider = {}
nwsli_provider = {
"ASP": dict(lon=-83.39, lat=44.45),
"ECK": dict(lon=-82.72, lat=43.26),
"GRR": dict(lon=-85.50, lat=42.79),
}
tp = parser(
get_test_file("SIGMETS/SIGE3.txt"),
utcnow,
ugc_provider,
nwsli_provider,
)
assert abs(tp.sigmets[0].geom.area - 2.15) < 0.01
tp.sql(dbcursor)
def test_sigc():
"""See about parsing SIGC"""
utcnow = utc(2014, 8, 11, 16, 55)
ugc_provider = {}
nwsli_provider = {}
for sid in (
"MSL,SJI,MLU,LIT,BTR,LEV,LCH,IAH,YQT,SAW,SAT,DYC,AXC,"
"ODI,DEN,TBE,ADM,JCT,INK,ELP"
).split(","):
nwsli_provider[sid] = dict(lon=-99, lat=45)
tp = parser(
get_test_file("SIGMETS/SIGC.txt"), utcnow, ugc_provider, nwsli_provider
)
j = tp.get_jabbers("http://localhost", "http://localhost")
assert tp.sigmets[0].ets == utc(2014, 8, 11, 18, 55)
ans = "KKCI issues SIGMET 62C for AL MS LA AR till 1855 UTC"
assert j[0][0] == ans
ans = (
"KKCI issues SIGMET 63C for LA TX AND MS LA TX CSTL WTRS till 1855 UTC"
)
assert j[1][0] == ans
def test_sigpat():
"""Make sure we don't have another failure with geom parsing"""
utcnow = utc(2014, 8, 11, 12, 34)
tp = parser(get_test_file("SIGMETS/SIGPAT.txt"), utcnow)
j = tp.get_jabbers("http://localhost", "http://localhost")
assert abs(tp.sigmets[0].geom.area - 33.71) < 0.01
assert tp.sigmets[0].sts == utc(2014, 8, 11, 12, 35)
assert tp.sigmets[0].ets == utc(2014, 8, 11, 16, 35)
assert j[0][0] == "PHFO issues SIGMET TANGO 1 till 1635 UTC"
|
py | 1a3ed86d8e83a518a2231e53d5edd49316f18ca3 | import hashlib
import json
from sanic import response
from datasette.utils import (
CustomJSONEncoder,
InterruptedError,
detect_primary_keys,
detect_fts,
)
from datasette.version import __version__
from .base import HASH_LENGTH, RenderMixin
class IndexView(RenderMixin):
name = "index"
def __init__(self, datasette):
self.ds = datasette
async def get(self, request, as_format):
databases = []
for name, db in self.ds.databases.items():
table_counts = await db.table_counts(5)
views = await db.view_names()
tables = {}
hidden_table_names = set(await db.hidden_table_names())
for table in table_counts:
table_columns = await self.ds.table_columns(name, table)
tables[table] = {
"name": table,
"columns": table_columns,
"primary_keys": await self.ds.execute_against_connection_in_thread(
name, lambda conn: detect_primary_keys(conn, table)
),
"count": table_counts[table],
"hidden": table in hidden_table_names,
"fts_table": await self.ds.execute_against_connection_in_thread(
name, lambda conn: detect_fts(conn, table)
),
}
hidden_tables = [t for t in tables.values() if t["hidden"]]
databases.append(
{
"name": name,
"hash": db.hash,
"color": db.hash[:6]
if db.hash
else hashlib.md5(name.encode("utf8")).hexdigest()[:6],
"path": self.database_url(name),
"tables_truncated": sorted(
tables.values(), key=lambda t: t["count"] or 0, reverse=True
)[:5],
"tables_count": len(tables),
"tables_more": len(tables) > 5,
"table_rows_sum": sum((t["count"] or 0) for t in tables.values()),
"hidden_table_rows_sum": sum(t["count"] for t in hidden_tables),
"hidden_tables_count": len(hidden_tables),
"views_count": len(views),
}
)
if as_format:
headers = {}
if self.ds.cors:
headers["Access-Control-Allow-Origin"] = "*"
return response.HTTPResponse(
json.dumps({db["name"]: db for db in databases}, cls=CustomJSONEncoder),
content_type="application/json",
headers=headers,
)
else:
return self.render(
["index.html"],
databases=databases,
metadata=self.ds.metadata(),
datasette_version=__version__,
)
|
py | 1a3ed88aab8d4b25ea5ef4df106d215b8be2bdab | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2ODeepLearningEstimator(H2OEstimator):
"""
Deep Learning
Build a Deep Neural Network model using CPUs
Builds a feed-forward multilayer artificial neural network on an H2OFrame
Examples
--------
>>> import h2o
>>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
>>> h2o.connect()
>>> rows = [[1,2,3,4,0], [2,1,2,4,1], [2,1,4,2,1], [0,1,2,34,1], [2,3,4,1,0]] * 50
>>> fr = h2o.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2ODeepLearningEstimator()
>>> model.train(x=range(4), y=4, training_frame=fr)
"""
algo = "deeplearning"
def __init__(self, **kwargs):
super(H2ODeepLearningEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models",
"keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "fold_assignment",
"fold_column", "response_column", "ignored_columns", "ignore_const_cols", "score_each_iteration",
"weights_column", "offset_column", "balance_classes", "class_sampling_factors",
"max_after_balance_size", "max_confusion_matrix_size", "max_hit_ratio_k", "checkpoint",
"pretrained_autoencoder", "overwrite_with_best_model", "use_all_factor_levels", "standardize",
"activation", "hidden", "epochs", "train_samples_per_iteration", "target_ratio_comm_to_comp",
"seed", "adaptive_rate", "rho", "epsilon", "rate", "rate_annealing", "rate_decay",
"momentum_start", "momentum_ramp", "momentum_stable", "nesterov_accelerated_gradient",
"input_dropout_ratio", "hidden_dropout_ratios", "l1", "l2", "max_w2",
"initial_weight_distribution", "initial_weight_scale", "initial_weights", "initial_biases",
"loss", "distribution", "quantile_alpha", "tweedie_power", "huber_alpha", "score_interval",
"score_training_samples", "score_validation_samples", "score_duty_cycle", "classification_stop",
"regression_stop", "stopping_rounds", "stopping_metric", "stopping_tolerance", "max_runtime_secs",
"score_validation_sampling", "diagnostics", "fast_mode", "force_load_balance",
"variable_importances", "replicate_training_data", "single_node_mode", "shuffle_training_data",
"missing_values_handling", "quiet_mode", "autoencoder", "sparse", "col_major",
"average_activation", "sparsity_beta", "max_categorical_features", "reproducible",
"export_weights_and_biases", "mini_batch_size", "categorical_encoding", "elastic_averaging",
"elastic_averaging_moving_rate", "elastic_averaging_regularization", "export_checkpoints_dir"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
if isinstance(self, H2OAutoEncoderEstimator): self._parms['autoencoder'] = True
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
assert_is_type(training_frame, None, H2OFrame)
self._parms["training_frame"] = training_frame
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
assert_is_type(validation_frame, None, H2OFrame)
self._parms["validation_frame"] = validation_frame
@property
def nfolds(self):
"""
Number of folds for K-fold cross-validation (0 to disable or >= 2).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def keep_cross_validation_models(self):
"""
Whether to keep the cross-validation models.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("keep_cross_validation_models")
@keep_cross_validation_models.setter
def keep_cross_validation_models(self, keep_cross_validation_models):
assert_is_type(keep_cross_validation_models, None, bool)
self._parms["keep_cross_validation_models"] = keep_cross_validation_models
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_predictions")
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
assert_is_type(keep_cross_validation_predictions, None, bool)
self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
@property
def keep_cross_validation_fold_assignment(self):
"""
Whether to keep the cross-validation fold assignment.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_fold_assignment")
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
assert_is_type(keep_cross_validation_fold_assignment, None, bool)
self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"`` (default: ``"auto"``).
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def response_column(self):
"""
Response variable column.
Type: ``str``.
"""
return self._parms.get("response_column")
@response_column.setter
def response_column(self, response_column):
assert_is_type(response_column, None, str)
self._parms["response_column"] = response_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def weights_column(self):
"""
Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
During training, rows with higher weights matter more, due to the larger loss function pre-factor.
Type: ``str``.
"""
return self._parms.get("weights_column")
@weights_column.setter
def weights_column(self, weights_column):
assert_is_type(weights_column, None, str)
self._parms["weights_column"] = weights_column
@property
def offset_column(self):
"""
Offset column. This will be added to the combination of columns before applying the link function.
Type: ``str``.
"""
return self._parms.get("offset_column")
@offset_column.setter
def offset_column(self, offset_column):
assert_is_type(offset_column, None, str)
self._parms["offset_column"] = offset_column
@property
def balance_classes(self):
"""
Balance training data class counts via over/under-sampling (for imbalanced data).
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("balance_classes")
@balance_classes.setter
def balance_classes(self, balance_classes):
assert_is_type(balance_classes, None, bool)
self._parms["balance_classes"] = balance_classes
@property
def class_sampling_factors(self):
"""
Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will
be automatically computed to obtain class balance during training. Requires balance_classes.
Type: ``List[float]``.
"""
return self._parms.get("class_sampling_factors")
@class_sampling_factors.setter
def class_sampling_factors(self, class_sampling_factors):
assert_is_type(class_sampling_factors, None, [float])
self._parms["class_sampling_factors"] = class_sampling_factors
@property
def max_after_balance_size(self):
"""
Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires
balance_classes.
Type: ``float`` (default: ``5``).
"""
return self._parms.get("max_after_balance_size")
@max_after_balance_size.setter
def max_after_balance_size(self, max_after_balance_size):
assert_is_type(max_after_balance_size, None, float)
self._parms["max_after_balance_size"] = max_after_balance_size
@property
def max_confusion_matrix_size(self):
"""
[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs.
Type: ``int`` (default: ``20``).
"""
return self._parms.get("max_confusion_matrix_size")
@max_confusion_matrix_size.setter
def max_confusion_matrix_size(self, max_confusion_matrix_size):
assert_is_type(max_confusion_matrix_size, None, int)
self._parms["max_confusion_matrix_size"] = max_confusion_matrix_size
@property
def max_hit_ratio_k(self):
"""
Max. number (top K) of predictions to use for hit ratio computation (for multi-class only, 0 to disable).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("max_hit_ratio_k")
@max_hit_ratio_k.setter
def max_hit_ratio_k(self, max_hit_ratio_k):
assert_is_type(max_hit_ratio_k, None, int)
self._parms["max_hit_ratio_k"] = max_hit_ratio_k
@property
def checkpoint(self):
"""
Model checkpoint to resume training with.
Type: ``str``.
"""
return self._parms.get("checkpoint")
@checkpoint.setter
def checkpoint(self, checkpoint):
assert_is_type(checkpoint, None, str, H2OEstimator)
self._parms["checkpoint"] = checkpoint
@property
def pretrained_autoencoder(self):
"""
Pretrained autoencoder model to initialize this model with.
Type: ``str``.
"""
return self._parms.get("pretrained_autoencoder")
@pretrained_autoencoder.setter
def pretrained_autoencoder(self, pretrained_autoencoder):
assert_is_type(pretrained_autoencoder, None, str, H2OEstimator)
self._parms["pretrained_autoencoder"] = pretrained_autoencoder
@property
def overwrite_with_best_model(self):
"""
If enabled, override the final model with the best model found during training.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("overwrite_with_best_model")
@overwrite_with_best_model.setter
def overwrite_with_best_model(self, overwrite_with_best_model):
assert_is_type(overwrite_with_best_model, None, bool)
self._parms["overwrite_with_best_model"] = overwrite_with_best_model
@property
def use_all_factor_levels(self):
"""
Use all factor levels of categorical variables. Otherwise, the first factor level is omitted (without loss of
accuracy). Useful for variable importances and auto-enabled for autoencoder.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("use_all_factor_levels")
@use_all_factor_levels.setter
def use_all_factor_levels(self, use_all_factor_levels):
assert_is_type(use_all_factor_levels, None, bool)
self._parms["use_all_factor_levels"] = use_all_factor_levels
@property
def standardize(self):
"""
If enabled, automatically standardize the data. If disabled, the user must provide properly scaled input data.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("standardize")
@standardize.setter
def standardize(self, standardize):
assert_is_type(standardize, None, bool)
self._parms["standardize"] = standardize
@property
def activation(self):
"""
Activation function.
One of: ``"tanh"``, ``"tanh_with_dropout"``, ``"rectifier"``, ``"rectifier_with_dropout"``, ``"maxout"``,
``"maxout_with_dropout"`` (default: ``"rectifier"``).
"""
return self._parms.get("activation")
@activation.setter
def activation(self, activation):
assert_is_type(activation, None, Enum("tanh", "tanh_with_dropout", "rectifier", "rectifier_with_dropout", "maxout", "maxout_with_dropout"))
self._parms["activation"] = activation
@property
def hidden(self):
"""
Hidden layer sizes (e.g. [100, 100]).
Type: ``List[int]`` (default: ``[200, 200]``).
"""
return self._parms.get("hidden")
@hidden.setter
def hidden(self, hidden):
assert_is_type(hidden, None, [int])
self._parms["hidden"] = hidden
@property
def epochs(self):
"""
How many times the dataset should be iterated (streamed), can be fractional.
Type: ``float`` (default: ``10``).
"""
return self._parms.get("epochs")
@epochs.setter
def epochs(self, epochs):
assert_is_type(epochs, None, numeric)
self._parms["epochs"] = epochs
@property
def train_samples_per_iteration(self):
"""
Number of training samples (globally) per MapReduce iteration. Special values are 0: one epoch, -1: all
available data (e.g., replicated training data), -2: automatic.
Type: ``int`` (default: ``-2``).
"""
return self._parms.get("train_samples_per_iteration")
@train_samples_per_iteration.setter
def train_samples_per_iteration(self, train_samples_per_iteration):
assert_is_type(train_samples_per_iteration, None, int)
self._parms["train_samples_per_iteration"] = train_samples_per_iteration
@property
def target_ratio_comm_to_comp(self):
"""
Target ratio of communication overhead to computation. Only for multi-node operation and
train_samples_per_iteration = -2 (auto-tuning).
Type: ``float`` (default: ``0.05``).
"""
return self._parms.get("target_ratio_comm_to_comp")
@target_ratio_comm_to_comp.setter
def target_ratio_comm_to_comp(self, target_ratio_comm_to_comp):
assert_is_type(target_ratio_comm_to_comp, None, numeric)
self._parms["target_ratio_comm_to_comp"] = target_ratio_comm_to_comp
@property
def seed(self):
"""
Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def adaptive_rate(self):
"""
Adaptive learning rate.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("adaptive_rate")
@adaptive_rate.setter
def adaptive_rate(self, adaptive_rate):
assert_is_type(adaptive_rate, None, bool)
self._parms["adaptive_rate"] = adaptive_rate
@property
def rho(self):
"""
Adaptive learning rate time decay factor (similarity to prior updates).
Type: ``float`` (default: ``0.99``).
"""
return self._parms.get("rho")
@rho.setter
def rho(self, rho):
assert_is_type(rho, None, numeric)
self._parms["rho"] = rho
@property
def epsilon(self):
"""
Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
Type: ``float`` (default: ``1e-08``).
"""
return self._parms.get("epsilon")
@epsilon.setter
def epsilon(self, epsilon):
assert_is_type(epsilon, None, numeric)
self._parms["epsilon"] = epsilon
@property
def rate(self):
"""
Learning rate (higher => less stable, lower => slower convergence).
Type: ``float`` (default: ``0.005``).
"""
return self._parms.get("rate")
@rate.setter
def rate(self, rate):
assert_is_type(rate, None, numeric)
self._parms["rate"] = rate
@property
def rate_annealing(self):
"""
Learning rate annealing: rate / (1 + rate_annealing * samples).
Type: ``float`` (default: ``1e-06``).
"""
return self._parms.get("rate_annealing")
@rate_annealing.setter
def rate_annealing(self, rate_annealing):
assert_is_type(rate_annealing, None, numeric)
self._parms["rate_annealing"] = rate_annealing
@property
def rate_decay(self):
"""
Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
Type: ``float`` (default: ``1``).
"""
return self._parms.get("rate_decay")
@rate_decay.setter
def rate_decay(self, rate_decay):
assert_is_type(rate_decay, None, numeric)
self._parms["rate_decay"] = rate_decay
@property
def momentum_start(self):
"""
Initial momentum at the beginning of training (try 0.5).
Type: ``float`` (default: ``0``).
"""
return self._parms.get("momentum_start")
@momentum_start.setter
def momentum_start(self, momentum_start):
assert_is_type(momentum_start, None, numeric)
self._parms["momentum_start"] = momentum_start
@property
def momentum_ramp(self):
"""
Number of training samples for which momentum increases.
Type: ``float`` (default: ``1000000``).
"""
return self._parms.get("momentum_ramp")
@momentum_ramp.setter
def momentum_ramp(self, momentum_ramp):
assert_is_type(momentum_ramp, None, numeric)
self._parms["momentum_ramp"] = momentum_ramp
@property
def momentum_stable(self):
"""
Final momentum after the ramp is over (try 0.99).
Type: ``float`` (default: ``0``).
"""
return self._parms.get("momentum_stable")
@momentum_stable.setter
def momentum_stable(self, momentum_stable):
assert_is_type(momentum_stable, None, numeric)
self._parms["momentum_stable"] = momentum_stable
@property
def nesterov_accelerated_gradient(self):
"""
Use Nesterov accelerated gradient (recommended).
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("nesterov_accelerated_gradient")
@nesterov_accelerated_gradient.setter
def nesterov_accelerated_gradient(self, nesterov_accelerated_gradient):
assert_is_type(nesterov_accelerated_gradient, None, bool)
self._parms["nesterov_accelerated_gradient"] = nesterov_accelerated_gradient
@property
def input_dropout_ratio(self):
"""
Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
Type: ``float`` (default: ``0``).
"""
return self._parms.get("input_dropout_ratio")
@input_dropout_ratio.setter
def input_dropout_ratio(self, input_dropout_ratio):
assert_is_type(input_dropout_ratio, None, numeric)
self._parms["input_dropout_ratio"] = input_dropout_ratio
@property
def hidden_dropout_ratios(self):
"""
Hidden layer dropout ratios (can improve generalization), specify one value per hidden layer, defaults to 0.5.
Type: ``List[float]``.
"""
return self._parms.get("hidden_dropout_ratios")
@hidden_dropout_ratios.setter
def hidden_dropout_ratios(self, hidden_dropout_ratios):
assert_is_type(hidden_dropout_ratios, None, [numeric])
self._parms["hidden_dropout_ratios"] = hidden_dropout_ratios
@property
def l1(self):
"""
L1 regularization (can add stability and improve generalization, causes many weights to become 0).
Type: ``float`` (default: ``0``).
"""
return self._parms.get("l1")
@l1.setter
def l1(self, l1):
assert_is_type(l1, None, numeric)
self._parms["l1"] = l1
@property
def l2(self):
"""
L2 regularization (can add stability and improve generalization, causes many weights to be small.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("l2")
@l2.setter
def l2(self, l2):
assert_is_type(l2, None, numeric)
self._parms["l2"] = l2
@property
def max_w2(self):
"""
Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
Type: ``float`` (default: ``3.4028235e+38``).
"""
return self._parms.get("max_w2")
@max_w2.setter
def max_w2(self, max_w2):
assert_is_type(max_w2, None, float)
self._parms["max_w2"] = max_w2
@property
def initial_weight_distribution(self):
"""
Initial weight distribution.
One of: ``"uniform_adaptive"``, ``"uniform"``, ``"normal"`` (default: ``"uniform_adaptive"``).
"""
return self._parms.get("initial_weight_distribution")
@initial_weight_distribution.setter
def initial_weight_distribution(self, initial_weight_distribution):
assert_is_type(initial_weight_distribution, None, Enum("uniform_adaptive", "uniform", "normal"))
self._parms["initial_weight_distribution"] = initial_weight_distribution
@property
def initial_weight_scale(self):
"""
Uniform: -value...value, Normal: stddev.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("initial_weight_scale")
@initial_weight_scale.setter
def initial_weight_scale(self, initial_weight_scale):
assert_is_type(initial_weight_scale, None, numeric)
self._parms["initial_weight_scale"] = initial_weight_scale
@property
def initial_weights(self):
"""
A list of H2OFrame ids to initialize the weight matrices of this model with.
Type: ``List[H2OFrame]``.
"""
return self._parms.get("initial_weights")
@initial_weights.setter
def initial_weights(self, initial_weights):
assert_is_type(initial_weights, None, [H2OFrame, None])
self._parms["initial_weights"] = initial_weights
@property
def initial_biases(self):
"""
A list of H2OFrame ids to initialize the bias vectors of this model with.
Type: ``List[H2OFrame]``.
"""
return self._parms.get("initial_biases")
@initial_biases.setter
def initial_biases(self, initial_biases):
assert_is_type(initial_biases, None, [H2OFrame, None])
self._parms["initial_biases"] = initial_biases
@property
def loss(self):
"""
Loss function.
One of: ``"automatic"``, ``"cross_entropy"``, ``"quadratic"``, ``"huber"``, ``"absolute"``, ``"quantile"``
(default: ``"automatic"``).
"""
return self._parms.get("loss")
@loss.setter
def loss(self, loss):
assert_is_type(loss, None, Enum("automatic", "cross_entropy", "quadratic", "huber", "absolute", "quantile"))
self._parms["loss"] = loss
@property
def distribution(self):
"""
Distribution function
One of: ``"auto"``, ``"bernoulli"``, ``"multinomial"``, ``"gaussian"``, ``"poisson"``, ``"gamma"``,
``"tweedie"``, ``"laplace"``, ``"quantile"``, ``"huber"`` (default: ``"auto"``).
"""
return self._parms.get("distribution")
@distribution.setter
def distribution(self, distribution):
assert_is_type(distribution, None, Enum("auto", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"))
self._parms["distribution"] = distribution
@property
def quantile_alpha(self):
"""
Desired quantile for Quantile regression, must be between 0 and 1.
Type: ``float`` (default: ``0.5``).
"""
return self._parms.get("quantile_alpha")
@quantile_alpha.setter
def quantile_alpha(self, quantile_alpha):
assert_is_type(quantile_alpha, None, numeric)
self._parms["quantile_alpha"] = quantile_alpha
@property
def tweedie_power(self):
"""
Tweedie power for Tweedie regression, must be between 1 and 2.
Type: ``float`` (default: ``1.5``).
"""
return self._parms.get("tweedie_power")
@tweedie_power.setter
def tweedie_power(self, tweedie_power):
assert_is_type(tweedie_power, None, numeric)
self._parms["tweedie_power"] = tweedie_power
@property
def huber_alpha(self):
"""
Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be between 0 and 1).
Type: ``float`` (default: ``0.9``).
"""
return self._parms.get("huber_alpha")
@huber_alpha.setter
def huber_alpha(self, huber_alpha):
assert_is_type(huber_alpha, None, numeric)
self._parms["huber_alpha"] = huber_alpha
@property
def score_interval(self):
"""
Shortest time interval (in seconds) between model scoring.
Type: ``float`` (default: ``5``).
"""
return self._parms.get("score_interval")
@score_interval.setter
def score_interval(self, score_interval):
assert_is_type(score_interval, None, numeric)
self._parms["score_interval"] = score_interval
@property
def score_training_samples(self):
"""
Number of training set samples for scoring (0 for all).
Type: ``int`` (default: ``10000``).
"""
return self._parms.get("score_training_samples")
@score_training_samples.setter
def score_training_samples(self, score_training_samples):
assert_is_type(score_training_samples, None, int)
self._parms["score_training_samples"] = score_training_samples
@property
def score_validation_samples(self):
"""
Number of validation set samples for scoring (0 for all).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("score_validation_samples")
@score_validation_samples.setter
def score_validation_samples(self, score_validation_samples):
assert_is_type(score_validation_samples, None, int)
self._parms["score_validation_samples"] = score_validation_samples
@property
def score_duty_cycle(self):
"""
Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
Type: ``float`` (default: ``0.1``).
"""
return self._parms.get("score_duty_cycle")
@score_duty_cycle.setter
def score_duty_cycle(self, score_duty_cycle):
assert_is_type(score_duty_cycle, None, numeric)
self._parms["score_duty_cycle"] = score_duty_cycle
@property
def classification_stop(self):
"""
Stopping criterion for classification error fraction on training data (-1 to disable).
Type: ``float`` (default: ``0``).
"""
return self._parms.get("classification_stop")
@classification_stop.setter
def classification_stop(self, classification_stop):
assert_is_type(classification_stop, None, numeric)
self._parms["classification_stop"] = classification_stop
@property
def regression_stop(self):
"""
Stopping criterion for regression error (MSE) on training data (-1 to disable).
Type: ``float`` (default: ``1e-06``).
"""
return self._parms.get("regression_stop")
@regression_stop.setter
def regression_stop(self, regression_stop):
assert_is_type(regression_stop, None, numeric)
self._parms["regression_stop"] = regression_stop
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int`` (default: ``5``).
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression). Note that custom
and custom_increasing can only be used in GBM and DRF with the Python client.
One of: ``"auto"``, ``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``,
``"lift_top_group"``, ``"misclassification"``, ``"mean_per_class_error"``, ``"custom"``, ``"custom_increasing"``
(default: ``"auto"``).
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"))
self._parms["stopping_metric"] = stopping_metric
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float`` (default: ``0``).
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def score_validation_sampling(self):
"""
Method used to sample validation dataset for scoring.
One of: ``"uniform"``, ``"stratified"`` (default: ``"uniform"``).
"""
return self._parms.get("score_validation_sampling")
@score_validation_sampling.setter
def score_validation_sampling(self, score_validation_sampling):
assert_is_type(score_validation_sampling, None, Enum("uniform", "stratified"))
self._parms["score_validation_sampling"] = score_validation_sampling
@property
def diagnostics(self):
"""
Enable diagnostics for hidden layers.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("diagnostics")
@diagnostics.setter
def diagnostics(self, diagnostics):
assert_is_type(diagnostics, None, bool)
self._parms["diagnostics"] = diagnostics
@property
def fast_mode(self):
"""
Enable fast mode (minor approximation in back-propagation).
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("fast_mode")
@fast_mode.setter
def fast_mode(self, fast_mode):
assert_is_type(fast_mode, None, bool)
self._parms["fast_mode"] = fast_mode
@property
def force_load_balance(self):
"""
Force extra load balancing to increase training speed for small datasets (to keep all cores busy).
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("force_load_balance")
@force_load_balance.setter
def force_load_balance(self, force_load_balance):
assert_is_type(force_load_balance, None, bool)
self._parms["force_load_balance"] = force_load_balance
@property
def variable_importances(self):
"""
Compute variable importances for input features (Gedeon method) - can be slow for large networks.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("variable_importances")
@variable_importances.setter
def variable_importances(self, variable_importances):
assert_is_type(variable_importances, None, bool)
self._parms["variable_importances"] = variable_importances
@property
def replicate_training_data(self):
"""
Replicate the entire training dataset onto every node for faster training on small datasets.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("replicate_training_data")
@replicate_training_data.setter
def replicate_training_data(self, replicate_training_data):
assert_is_type(replicate_training_data, None, bool)
self._parms["replicate_training_data"] = replicate_training_data
@property
def single_node_mode(self):
"""
Run on a single node for fine-tuning of model parameters.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("single_node_mode")
@single_node_mode.setter
def single_node_mode(self, single_node_mode):
assert_is_type(single_node_mode, None, bool)
self._parms["single_node_mode"] = single_node_mode
@property
def shuffle_training_data(self):
"""
Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is
close to #nodes x #rows, of if using balance_classes).
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("shuffle_training_data")
@shuffle_training_data.setter
def shuffle_training_data(self, shuffle_training_data):
assert_is_type(shuffle_training_data, None, bool)
self._parms["shuffle_training_data"] = shuffle_training_data
@property
def missing_values_handling(self):
"""
Handling of missing values. Either MeanImputation or Skip.
One of: ``"mean_imputation"``, ``"skip"`` (default: ``"mean_imputation"``).
"""
return self._parms.get("missing_values_handling")
@missing_values_handling.setter
def missing_values_handling(self, missing_values_handling):
assert_is_type(missing_values_handling, None, Enum("mean_imputation", "skip"))
self._parms["missing_values_handling"] = missing_values_handling
@property
def quiet_mode(self):
"""
Enable quiet mode for less output to standard output.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("quiet_mode")
@quiet_mode.setter
def quiet_mode(self, quiet_mode):
assert_is_type(quiet_mode, None, bool)
self._parms["quiet_mode"] = quiet_mode
@property
def autoencoder(self):
"""
Auto-Encoder.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("autoencoder")
@autoencoder.setter
def autoencoder(self, autoencoder):
assert_is_type(autoencoder, None, bool)
self._parms["autoencoder"] = autoencoder
@property
def sparse(self):
"""
Sparse data handling (more efficient for data with lots of 0 values).
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("sparse")
@sparse.setter
def sparse(self, sparse):
assert_is_type(sparse, None, bool)
self._parms["sparse"] = sparse
@property
def col_major(self):
"""
#DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow
down backpropagation.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("col_major")
@col_major.setter
def col_major(self, col_major):
assert_is_type(col_major, None, bool)
self._parms["col_major"] = col_major
@property
def average_activation(self):
"""
Average activation for sparse auto-encoder. #Experimental
Type: ``float`` (default: ``0``).
"""
return self._parms.get("average_activation")
@average_activation.setter
def average_activation(self, average_activation):
assert_is_type(average_activation, None, numeric)
self._parms["average_activation"] = average_activation
@property
def sparsity_beta(self):
"""
Sparsity regularization. #Experimental
Type: ``float`` (default: ``0``).
"""
return self._parms.get("sparsity_beta")
@sparsity_beta.setter
def sparsity_beta(self, sparsity_beta):
assert_is_type(sparsity_beta, None, numeric)
self._parms["sparsity_beta"] = sparsity_beta
@property
def max_categorical_features(self):
"""
Max. number of categorical features, enforced via hashing. #Experimental
Type: ``int`` (default: ``2147483647``).
"""
return self._parms.get("max_categorical_features")
@max_categorical_features.setter
def max_categorical_features(self, max_categorical_features):
assert_is_type(max_categorical_features, None, int)
self._parms["max_categorical_features"] = max_categorical_features
@property
def reproducible(self):
"""
Force reproducibility on small data (will be slow - only uses 1 thread).
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("reproducible")
@reproducible.setter
def reproducible(self, reproducible):
assert_is_type(reproducible, None, bool)
self._parms["reproducible"] = reproducible
@property
def export_weights_and_biases(self):
"""
Whether to export Neural Network weights and biases to H2O Frames.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("export_weights_and_biases")
@export_weights_and_biases.setter
def export_weights_and_biases(self, export_weights_and_biases):
assert_is_type(export_weights_and_biases, None, bool)
self._parms["export_weights_and_biases"] = export_weights_and_biases
@property
def mini_batch_size(self):
"""
Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
Type: ``int`` (default: ``1``).
"""
return self._parms.get("mini_batch_size")
@mini_batch_size.setter
def mini_batch_size(self, mini_batch_size):
assert_is_type(mini_batch_size, None, int)
self._parms["mini_batch_size"] = mini_batch_size
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``).
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
@property
def elastic_averaging(self):
"""
Elastic averaging between compute nodes can improve distributed model convergence. #Experimental
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("elastic_averaging")
@elastic_averaging.setter
def elastic_averaging(self, elastic_averaging):
assert_is_type(elastic_averaging, None, bool)
self._parms["elastic_averaging"] = elastic_averaging
@property
def elastic_averaging_moving_rate(self):
"""
Elastic averaging moving rate (only if elastic averaging is enabled).
Type: ``float`` (default: ``0.9``).
"""
return self._parms.get("elastic_averaging_moving_rate")
@elastic_averaging_moving_rate.setter
def elastic_averaging_moving_rate(self, elastic_averaging_moving_rate):
assert_is_type(elastic_averaging_moving_rate, None, numeric)
self._parms["elastic_averaging_moving_rate"] = elastic_averaging_moving_rate
@property
def elastic_averaging_regularization(self):
"""
Elastic averaging regularization strength (only if elastic averaging is enabled).
Type: ``float`` (default: ``0.001``).
"""
return self._parms.get("elastic_averaging_regularization")
@elastic_averaging_regularization.setter
def elastic_averaging_regularization(self, elastic_averaging_regularization):
assert_is_type(elastic_averaging_regularization, None, numeric)
self._parms["elastic_averaging_regularization"] = elastic_averaging_regularization
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
"""
Examples
--------
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> ml.init()
>>> rows = [[1,2,3,4,0]*50, [2,1,2,4,1]*50, [2,1,4,2,1]*50, [0,1,2,34,1]*50, [2,3,4,1,0]*50]
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2OAutoEncoderEstimator()
>>> model.train(x=range(4), training_frame=fr)
"""
pass
|
py | 1a3ed8a7867e85c8e00392f7b69ac1a1ab5a692f | from django.db import models
# Create your models here.
class BookInfo(models.Model):
"""书本类"""
name = models.CharField(max_length=20)
def __str__(self):
"""
重写str方法修改类返回信息
"""
return self.name
class PeopleInfo(models.Model):
"""人物类"""
name = models.CharField(max_length=10)
gender = models.BooleanField()
book = models.ForeignKey(BookInfo, on_delete=models.CASCADE)
def __str__(self):
"""
重写str方法修改类返回信息
"""
return self.name
|
py | 1a3eda385a15f05e3df149d039a2413fb0229ab5 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""pydoc widget"""
# Standard library imports
import os.path as osp
import sys
# Third party imports
from qtpy.QtCore import Qt, QThread, QUrl, Signal
from qtpy.QtGui import QCursor
from qtpy.QtWidgets import QApplication
# Local imports
from spyder.config.base import _
from spyder.py3compat import PY3, to_text_string
from spyder.utils.misc import select_port
from spyder.widgets.browser import WebBrowser
class PydocServer(QThread):
"""Pydoc server"""
server_started = Signal()
def __init__(self, port=7464):
QThread.__init__(self)
self.port = port
self.server = None
self.complete = False
def run(self):
import pydoc
if PY3:
# Python 3
try:
self.callback(pydoc._start_server(pydoc._url_handler,
port=self.port))
except TypeError:
# Python 3.7
self.callback(pydoc._start_server(pydoc._url_handler,
hostname='localhost',
port=self.port))
else:
# Python 2
pydoc.serve(self.port, self.callback, self.completer)
def callback(self, server):
self.server = server
self.server_started.emit()
def completer(self):
self.complete = True
def quit_server(self):
if PY3:
# Python 3
if self.server.serving:
self.server.stop()
else:
# Python 2
self.server.quit = 1
class PydocBrowser(WebBrowser):
"""
pydoc widget
"""
DEFAULT_PORT = 30128
def __init__(self, parent, options_button=None):
WebBrowser.__init__(self, parent, options_button=options_button)
self.server = None
self.port = None
def initialize(self):
"""Start pydoc server"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.start_server()
# Initializing continues in `initialize_continued` method...
def initialize_continued(self):
"""Load home page"""
self.go_home()
QApplication.restoreOverrideCursor()
def is_server_running(self):
"""Return True if pydoc server is already running"""
return self.server is not None
def closeEvent(self, event):
self.server.quit_server()
# while not self.server.complete: #XXX Is it really necessary?
# pass
event.accept()
#------ Public API -----------------------------------------------------
def start_server(self):
"""Start pydoc server"""
if self.server is None:
self.port = select_port(default_port=self.DEFAULT_PORT)
self.set_home_url('http://localhost:%d/' % self.port)
elif self.server.isRunning():
self.server.server_started.disconnect(self.initialize_continued)
self.server.quit()
self.server = PydocServer(port=self.port)
self.server.server_started.connect(self.initialize_continued)
self.server.start()
#------ WebBrowser API -----------------------------------------------------
def get_label(self):
"""Return address label text"""
return _("Module or package:")
def reload(self):
"""Reload page"""
self.start_server()
WebBrowser.reload(self)
def text_to_url(self, text):
"""Convert text address into QUrl object"""
if text.startswith('/'):
text = text[1:]
return QUrl(self.home_url.toString()+text+'.html')
def url_to_text(self, url):
"""Convert QUrl object to displayed text in combo box"""
return osp.splitext(to_text_string(url.path()))[0][1:]
def test():
"""Run web browser"""
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=8)
widget = PydocBrowser(None)
widget.show()
widget.initialize()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
|
py | 1a3edaa5aaec0303f9c50b676d9b20e8dbe4fdcb | import numpy as np
import zarr
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
from tqdm import tqdm
if (__name__ == '__main__'):
cat = np.loadtxt('sky_ldev_truthcat.txt', skiprows=1)[:, 1:]
n_sources = cat.shape[0]
f = fits.open('sky_ldev.fits')
wcs = WCS(f[0].header)
nf, ny, nx = f[0].data.shape
fout = zarr.open('training.zarr', 'w')
ds_images = fout.create_dataset('images', shape=(n_sources, 32*32*128), dtype=np.float32)
batchsize = 64
n_batches = n_sources // batchsize
n_remaining = n_sources % batchsize
images = np.zeros((batchsize, 32*32*128), dtype=np.float32)
loop = 0
# All batches
for batch in tqdm(range(n_batches), desc='batch'):
lowb = loop
highb = loop + batchsize
for i in tqdm(range(batchsize), desc='object', leave=False):
ra = cat[loop, 0] * u.deg
dec = cat[loop, 1] * u.deg
coords = SkyCoord(ra, dec, unit="deg")
freq = cat[loop, 4] * u.Hz
coords = wcs.world_to_pixel(coords, freq)
lowx = int(coords[0]) - 16
highx = int(coords[0]) + 16
if (lowx < 0):
delta = -lowx
lowx += delta
highx += delta
if (highx >= nx):
delta = highx - nx
lowx -= delta
highx -= delta
lowy = int(coords[1]) - 16
highy = int(coords[1]) + 16
if (lowy < 0):
delta = -lowy
lowy += delta
highy += delta
if (highy >= ny):
delta = highy - ny
lowy -= delta
highy -= delta
lowf = int(coords[2]) - 64
highf = int(coords[2]) + 64
if (lowy < 0):
delta = -lowf
lowf += delta
highf += delta
if (highf >= nf):
delta = highf - nf
lowf -= delta
highf -= delta
cube = f[0].data[lowf:lowf+128, lowy:highy, lowx:highx].reshape((128*32*32))
images[i, :] = cube
loop += 1
ds_images[lowb:highb, :] = images[:]
# Remaining images
lowb = loop
highb = loop + n_remaining
images = np.zeros((n_remaining, 32*32*128), dtype=np.float32)
for i in tqdm(range(n_remaining), desc='object', leave=False):
ra = cat[loop, 0] * u.deg
dec = cat[loop, 1] * u.deg
coords = SkyCoord(ra, dec, unit="deg")
freq = cat[loop, 4] * u.Hz
coords = wcs.world_to_pixel(coords, freq)
lowx = int(coords[0]) - 16
highx = int(coords[0]) + 16
if (lowx < 0):
delta = -lowx
lowx += delta
highx += delta
if (highx >= nx):
delta = highx - nx
lowx -= delta
highx -= delta
lowy = int(coords[1]) - 16
highy = int(coords[1]) + 16
if (lowy < 0):
delta = -lowy
lowy += delta
highy += delta
if (highy >= ny):
delta = highy - ny
lowy -= delta
highy -= delta
lowf = int(coords[2]) - 64
highf = int(coords[2]) + 64
if (lowy < 0):
delta = -lowf
lowf += delta
highf += delta
if (highf >= nf):
delta = highf - nf
lowf -= delta
highf -= delta
cube = f[0].data[lowf:lowf+128, lowy:highy, lowx:highx].reshape((128*32*32))
images[i, :] = cube
loop += 1
ds_images[lowb:highb, :] = images[:]
|
py | 1a3edaed6e0ccb7736222effa19be944f55c259c | import search
from base import BaseCrawler
from crawler import FBCrawler
from scraper import FBScraper
|
py | 1a3edb9fdcdf2a75e0033bb06fcf903b38234c5b | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains the classes representing the GNDS documentation nodes author and authors.
"""
from .. import ancestry as ancestryModule
from .. import suite as suiteModule
from .. import text as textModule
from . import abstractClasses as abstractClassesModule
class RelationType:
allowed = ( 'IsCitedBy', 'Cites', 'IsSupplementTo', 'IsSupplementedBy', 'IsContinuedBy', 'Continues', 'Describes', 'IsDescribedBy',
'HasMetadata', 'IsMetadataFor', 'HasVersion', 'IsVersionOf', 'IsNewVersionOf', 'IsPreviousVersionOf', 'IsPartOf',
'HasPart', 'IsPublishedIn', 'IsReferencedBy', 'References', 'IsDocumentedBy', 'Documents', 'IsCompiledBy', 'Complies',
'IsVariantFormOf', 'IsOriginalFormOf', 'IsIdenticalTo', 'IsReviewedBy', 'Reviews', 'IsDerivedFrom', 'IsSourceOf',
'IsRequiredBy', 'Requires', 'Obsoletes', 'IsObsoletedBy' )
class RelatedItem(ancestryModule.Ancestry2):
moniker = 'relatedItem'
keyName = 'name'
def __init__(self, name, href, relationType):
ancestryModule.Ancestry2.__init__(self)
self.__name = textModule.raiseIfNotString(name, 'name')
self.__href = textModule.raiseIfNotString(href, 'href')
self.__relationType = abstractClassesModule.raiseIfNotInList(relationType, RelationType.allowed, 'relationType')
@property
def name(self):
"""."""
return self.__name
@property
def href(self):
"""."""
return self.__href
@property
def relationType(self):
"""."""
return self.__relationType
def toXMLList(self, **kwargs):
indent = kwargs.get('indent', '')
attributes = ' name="%s"' % self.__name
if len(self.__href) > 0: attributes += ' href="%s"' % self.__href
if len(self.__relationType) > 0: attributes += ' relationType="%s"' % self.__relationType
return [ '%s<%s%s/>' % ( indent, self.moniker, attributes ) ]
@staticmethod
def parseConstructBareNodeInstance(node, xPath, linkData, **kwargs):
name = node.get('name')
href = node.get('href', '')
relationType = node.get('relationType', '')
return RelatedItem(name, href, relationType)
class RelatedItems(suiteModule.Suite):
moniker = 'relatedItems'
def __init__(self):
suiteModule.Suite.__init__(self, [ RelatedItem ])
def toXML(self, indent = '', **kwargs):
return '\n'.join(self.toXMLList(**kwargs))
|
py | 1a3edc3836d230190a6f22aedeb7291f47f92426 |
original_int = 21000
forward_str = str(original_int)
reverse_str = ''
for char in forward_str:
reverse_str = char + reverse_str
print(reverse_str)
|
py | 1a3eddab1b2cc326d8444065aa699f1d00d35f7c | """
Implement numerical maxabs scaler.
"""
from typing import Any, Union
import dask.dataframe as dd
class MaxAbsScaler:
"""Max Absolute Value Scaler for scaling numerical values
Attributes:
name
Name of scaler
maxabs
Max absolute value of provided data column
"""
def __init__(self) -> None:
"""
This function initiate numerical scaler.
"""
self.name = "maxabsScaler"
self.maxabs = 0
def fit(self, col_df: dd.Series) -> Any:
"""
Extract max absolute value for MaxAbs Scaler according to the provided column.
Parameters
----------
col_df
Provided data column.
"""
self.maxabs = max(abs(col_df.drop_duplicates().values.tolist()))
return self
def transform(self, col_df: dd.Series) -> dd.Series:
"""
Transform the provided data column with the extracted max absolute value.
Parameters
----------
col_df
Provided data column.
"""
result = col_df.map(self.compute_val)
return result
def fit_transform(self, col_df: dd.Series) -> dd.Series:
"""
Extract max absolute value for MaxAbs Scaler according to the provided column.
Transform the provided data column with the extracted max absolute value.
Parameters
----------
col_df
Data column.
"""
return self.fit(col_df).transform(col_df)
def compute_val(self, val: Union[int, float]) -> Union[int, float]:
"""
Compute scaling value of provided value with fitted max absolute value.
Parameters
----------
val
Value should be scaled.
"""
return val / self.maxabs
|
py | 1a3edfc6b6bbc920abeecdfbdf74036d01793f9b | import datetime
import json
import logging
import os
from http.cookies import SimpleCookie
from time import time
from typing import Any # noqa
from typing import Dict # noqa
from typing import cast
from unittest.mock import Mock
from unittest.mock import patch
from urllib.parse import parse_qs
from urllib.parse import urlencode
from urllib.parse import urlparse
import pytest
import responses
from freezegun import freeze_time
from jwkest.jwe import JWEException
from jwkest.jwe import JWEnc
from requests import ConnectionError
from requests.exceptions import MissingSchema
from testfixtures import LogCapture
from oic import rndstr
from oic.exception import FailedAuthentication
from oic.exception import InvalidRequest
from oic.exception import RedirectURIError
from oic.oauth2.message import ErrorResponse
from oic.oic import DEF_SIGN_ALG
from oic.oic import make_openid_request
from oic.oic.consumer import Consumer
from oic.oic.message import AccessTokenRequest
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationRequest
from oic.oic.message import AuthorizationResponse
from oic.oic.message import CheckSessionRequest
from oic.oic.message import IdToken
from oic.oic.message import Message
from oic.oic.message import OpenIDSchema
from oic.oic.message import RefreshAccessTokenRequest
from oic.oic.message import RegistrationRequest
from oic.oic.message import RegistrationResponse
from oic.oic.message import TokenErrorResponse
from oic.oic.message import UserInfoRequest
from oic.oic.provider import InvalidRedirectURIError
from oic.oic.provider import InvalidSectorIdentifier
from oic.oic.provider import Provider
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authn.user import UserAuthnMethod
from oic.utils.authz import AuthzHandling
from oic.utils.http_util import CookieDealer
from oic.utils.http_util import Response
from oic.utils.http_util import SeeOther
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.keyio import ec_init
from oic.utils.keyio import keybundle_from_local_file
from oic.utils.sdb import AuthnEvent
from oic.utils.time_util import epoch_in_a_while
from oic.utils.userinfo import UserInfo
__author__ = 'rohe0002'
CONSUMER_CONFIG = {
"authz_page": "/authz",
"scope": ["openid"],
"response_type": ["code"],
"user_info": {
"name": None,
"email": None,
"nickname": None
},
"request_method": "param"
}
SERVER_INFO = {
"version": "3.0",
"issuer": "https://connect-op.heroku.com",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"flows_supported": ["code", "token", "code token"],
}
CLIENT_CONFIG = {
"client_id": "number5",
'config': {'issuer': SERVER_INFO["issuer"]}
}
CLIENT_CONFIG_2 = {
"client_id": "client0",
'config': {'issuer': SERVER_INFO["issuer"]}
}
CLIENT_SECRET = "abcdefghijklmnop"
CLIENT_ID = "client_1"
KC_SYM = KeyBundle([{"kty": "oct", "key": CLIENT_SECRET, "use": "ver"},
{"kty": "oct", "key": CLIENT_SECRET, "use": "sig"}])
KC_SYM2 = KeyBundle([{"kty": "oct", "key": "drickyoughurt", "use": "sig"},
{"kty": "oct", "key": "drickyoughurt", "use": "ver"}])
BASE_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "data/keys"))
KC_RSA = keybundle_from_local_file(os.path.join(BASE_PATH, "rsa.key"),
"RSA", ["ver", "sig"])
KEYJAR = KeyJar()
KEYJAR[CLIENT_ID] = [KC_SYM, KC_RSA]
KEYJAR["number5"] = [KC_SYM2, KC_RSA]
KEYJAR[""] = KC_RSA
CDB = {
"number5": {
"password": "hemligt",
"client_secret": "drickyoughurt",
"redirect_uris": [("http://localhost:8087/authz", None)],
"post_logout_redirect_uris": [
("https://example.com/post_logout", None)],
"client_salt": "salted",
'response_types': ['code', 'token', 'code id_token', 'none',
'code token', 'id_token']
},
"a1b2c3": {
"redirect_uris": [("http://localhost:8087/authz", None)],
"client_salt": "salted",
'client_secret': 'very_secret',
'response_types': ['code', 'token', 'code id_token']
},
"client0": {
"redirect_uris": [("http://www.example.org/authz", None)],
'client_secret': 'very_secret',
"post_logout_redirect_uris": [
("https://www.example.org/post_logout", None)],
"client_salt": "salted",
'response_types': ['code', 'token', 'code id_token']
},
CLIENT_ID: {
"client_secret": CLIENT_SECRET,
"redirect_uris": [("http://localhost:8087/authz", None)],
"client_salt": "salted",
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token', 'code id_token']
}
} # type: Dict[str, Dict[str, Any]]
USERDB = {
"user": {
"name": "Hans Granberg",
"nickname": "Hasse",
"email": "[email protected]",
"verified": False,
"sub": "user"
},
"username": {
"name": "Linda Lindgren",
"nickname": "Linda",
"email": "[email protected]",
"verified": True,
"sub": "username",
"extra_claim": "extra_claim_value",
}
}
URLMAP = {CLIENT_ID: ["https://example.com/authz"]}
def _eq(l1, l2):
return set(l1) == set(l2)
class DummyAuthn(UserAuthnMethod):
def __init__(self, srv, user):
UserAuthnMethod.__init__(self, srv)
self.user = user
def authenticated_as(self, cookie=None, **kwargs):
if cookie == "FAIL":
return None, 0
else:
return {"uid": self.user}, time()
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add("UNDEFINED", DummyAuthn(None, "username"))
# dealing with authorization
AUTHZ = AuthzHandling()
SYMKEY = rndstr(16) # symmetric key used to encrypt cookie info
USERINFO = UserInfo(USERDB)
class TestProvider(object):
@pytest.fixture(autouse=True)
def create_provider(self, session_db_factory):
self.provider = Provider(SERVER_INFO["issuer"], session_db_factory(SERVER_INFO["issuer"]),
CDB,
AUTHN_BROKER, USERINFO,
AUTHZ, verify_client, SYMKEY, urlmap=URLMAP,
keyjar=KEYJAR)
self.provider.baseurl = self.provider.name
self.cons = Consumer({}, CONSUMER_CONFIG.copy(), CLIENT_CONFIG,
server_info=SERVER_INFO, )
self.cons.behaviour = {
"request_object_signing_alg": DEF_SIGN_ALG["openid_request_object"]}
self.cons.keyjar[""] = KC_RSA
self.cons.keyjar.import_jwks(self.provider.keyjar.export_jwks(),
self.cons.issuer)
self.cons2 = Consumer({}, CONSUMER_CONFIG.copy(), CLIENT_CONFIG_2,
server_info=SERVER_INFO, )
self.cons2.behaviour = {
"request_object_signing_alg": DEF_SIGN_ALG["openid_request_object"]}
self.cons2.keyjar[""] = KC_RSA
def test_authorization_endpoint(self):
bib = {"scope": ["openid"],
"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
"redirect_uri": "http://localhost:8087/authz",
"response_type": ["code"],
"client_id": "a1b2c3",
"nonce": "Nonce"}
arq = AuthorizationRequest(**bib)
resp = self.provider.authorization_endpoint(request=arq.to_urlencoded())
parsed = parse_qs(urlparse(resp.message).query)
assert parsed["scope"] == ["openid"]
assert parsed["state"][0] == "id-6da9ca0cc23959f5f33e8becd9b08cae"
assert "code" in parsed
def test_provider_features_extra_claims(self):
self.provider.extra_claims = ['claim_1', 'claim_2']
features = self.provider.provider_features()
assert 'claim_1' in features['claims_supported']
assert 'claim_2' in features['claims_supported']
def test_provider_features_extra_scopes(self):
self.provider.extra_scope_dict = {'my_scope': ['claim_1', 'claim_2']}
features = self.provider.provider_features()
assert 'my_scope' in features['scopes_supported']
assert 'claim_1' in features['claims_supported']
assert 'claim_2' in features['claims_supported']
def test_authorization_endpoint_request(self):
bib = {"scope": ["openid"],
"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
"redirect_uri": "http://localhost:8087/authz",
"response_type": ["code", "id_token"],
"client_id": "a1b2c3",
"nonce": "Nonce",
"prompt": ["none"]}
req = AuthorizationRequest(**bib)
# want to be someone else !
ic = {"sub": {"value": "userX"}}
_keys = self.provider.keyjar.get_signing_key(key_type="RSA")
req["request"] = make_openid_request(req, _keys, idtoken_claims=ic,
request_object_signing_alg="RS256")
with pytest.raises(FailedAuthentication):
self.provider.authorization_endpoint(request=req.to_urlencoded())
def test_authorization_endpoint_id_token(self):
bib = {"scope": ["openid"],
"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
"redirect_uri": "http://localhost:8087/authz",
"response_type": ["code", "id_token"],
"client_id": "a1b2c3",
"nonce": "Nonce",
"prompt": ["none"]}
req = AuthorizationRequest(**bib)
areq = AuthorizationRequest(response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid"], state="state000")
sdb = self.provider.sdb
ae = AuthnEvent("userX", "salt")
sid = sdb.create_authz_session(ae, areq)
sdb.do_sub(sid, "client_salt")
_info = sdb[sid]
# All this is jut removed when the id_token is constructed
# The proper information comes from the session information
_user_info = IdToken(iss="https://foo.example.om", sub="foo",
aud=bib["client_id"],
exp=epoch_in_a_while(minutes=10),
acr="2", nonce=bib["nonce"])
idt = self.provider.id_token_as_signed_jwt(_info,
access_token="access_token",
user_info=_user_info)
req["id_token"] = idt
query_string = req.to_urlencoded()
# client_id not in id_token["aud"] so login required
resp = self.provider.authorization_endpoint(request=query_string,
cookie="FAIL")
parsed_resp = parse_qs(urlparse(resp.message).fragment)
assert parsed_resp["error"][0] == "login_required"
req["client_id"] = "client_1"
query_string = req.to_urlencoded()
# client_id is in id_token["aud"] so no login required
resp = self.provider.authorization_endpoint(request=query_string,
cookie="FAIL")
assert resp.message.startswith("http://localhost:8087/authz")
def test_authorization_endpoint_bad_scope(self):
bib = {"scope": ["openid", "offline_access"],
"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
"redirect_uri": "http://localhost:8087/authz",
"response_type": ["code"],
"client_id": "a1b2c3"}
arq = AuthorizationRequest(**bib)
resp = self.provider.authorization_endpoint(request=arq.to_urlencoded())
assert resp.status_code == 303
parsed = parse_qs(urlparse(resp.message).query)
assert parsed["error"][0] == "invalid_request"
assert parsed["error_description"][0] == "consent in prompt"
def test_authenticated(self):
_state, location = self.cons.begin("openid", "code",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
parsed = urlparse(resp.message)
assert "{}://{}{}".format(parsed.scheme, parsed.netloc,
parsed.path) == "http://localhost:8087/authz"
part = self.cons.parse_authz(query=resp.message)
aresp = part[0]
assert part[1] is None
assert part[2] is None
assert isinstance(aresp, AuthorizationResponse)
assert _eq(aresp.keys(), ['code', 'state', 'scope', 'client_id', 'iss'])
assert _eq(self.cons.grant[_state].keys(),
['code', 'tokens', 'id_token', 'exp_in', 'seed',
'grant_expiration_time'])
def test_authenticated_url(self):
state, location = self.cons.begin("openid", "code",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
aresp = self.cons.parse_response(AuthorizationResponse, resp.message,
sformat="urlencoded")
assert isinstance(aresp, AuthorizationResponse)
assert _eq(aresp.keys(), ['code', 'state', 'scope', 'client_id', 'iss'])
def test_authenticated_hybrid(self):
_state, location = self.cons.begin(
scope="openid email claims_in_id_token",
response_type="code id_token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
part = self.cons.parse_authz(resp.message)
aresp = part[0]
assert part[1] is None
assert part[2] is not None
assert isinstance(aresp, AuthorizationResponse)
assert _eq(aresp.keys(), ['scope', 'state', 'id_token', 'client_id',
'code'])
assert _eq(self.cons.grant[_state].keys(),
['code', 'id_token', 'tokens',
'exp_in',
'grant_expiration_time', 'seed'])
id_token = part[2]
assert isinstance(id_token, IdToken)
assert _eq(id_token.keys(),
['nonce', 'c_hash', 'sub', 'iss', 'acr', 'exp', 'auth_time',
'iat', 'aud'])
def test_authenticated_token(self):
_state, location = self.cons.begin("openid", response_type="token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
parsed = parse_qs(urlparse(resp.message).fragment)
assert parsed["token_type"][0] == "Bearer"
assert "access_token" in parsed
def test_authenticated_none(self):
_state, location = self.cons.begin("openid", response_type="none",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=location.split("?")[1])
parsed = urlparse(resp.message)
assert "{}://{}{}".format(parsed.scheme, parsed.netloc,
parsed.path) == "http://localhost:8087/authz"
assert "state" in parse_qs(parsed.query)
def test_code_grant_type_ok(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid"])
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": False,
"scope": ["openid"],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, 'client_salt')
# Construct Access token request
areq = AccessTokenRequest(code=access_grant, client_id=CLIENT_ID,
redirect_uri='http://example.com/authz',
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = AccessTokenResponse().deserialize(resp.message, 'json')
assert _eq(atr.keys(), ['token_type', 'id_token', 'access_token', 'scope'])
def test_code_grant_type_missing_code(self):
# Construct Access token request
areq = AccessTokenRequest(client_id=CLIENT_ID,
redirect_uri='http://example.com/authz',
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = TokenErrorResponse().deserialize(resp.message, 'json')
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Missing code'
def test_code_grant_type_revoked(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid"])
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"revoked": True,
"scope": ["openid"],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, 'client_salt')
# Construct Access token request
areq = AccessTokenRequest(code=access_grant, client_id=CLIENT_ID,
redirect_uri='http://example.com/authz',
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = TokenErrorResponse().deserialize(resp.message, 'json')
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Token is revoked'
def test_code_grant_type_no_session(self):
# Construct Access token request
areq = AccessTokenRequest(code='some grant', client_id=CLIENT_ID,
redirect_uri='http://example.com/authz',
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = TokenErrorResponse().deserialize(resp.message, 'json')
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Code is invalid'
def test_code_grant_type_missing_redirect_uri(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid"])
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": False,
"scope": ["openid"],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, 'client_salt')
# Construct Access token request
areq = AccessTokenRequest(code=access_grant, client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = TokenErrorResponse().deserialize(resp.message, 'json')
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Missing redirect_uri'
def test_code_grant_type_used(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid"])
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": True,
"scope": ["openid"],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, 'client_salt')
# Construct Access token request
areq = AccessTokenRequest(code=access_grant, client_id=CLIENT_ID,
redirect_uri='http://example.com/authz',
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = TokenErrorResponse().deserialize(resp.message, 'json')
assert atr['error'] == 'access_denied'
assert atr['error_description'] == 'Access Code already used'
def test_code_grant_type_refresh(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid offline_access"],
prompt="consent")
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": False,
"scope": ["openid", "offline_access"],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, "client_salt")
# Construct Access token request
areq = AccessTokenRequest(code=access_grant, client_id=CLIENT_ID,
redirect_uri="http://example.com/authz",
client_secret=CLIENT_SECRET,
grant_type='authorization_code')
resp = self.provider.code_grant_type(areq)
atr = AccessTokenResponse().deserialize(resp.message, "json")
assert _eq(atr.keys(), ['token_type', 'id_token', 'access_token', 'scope', 'refresh_token'])
def test_client_credentials_grant_type(self):
resp = self.provider.client_credentials_grant_type(Message())
parsed = ErrorResponse().from_json(resp.message)
assert parsed['error'] == 'unsupported_grant_type'
assert parsed['error_description'] == 'Unsupported grant_type'
def test_password_grant_type(self):
resp = self.provider.password_grant_type(Message())
parsed = ErrorResponse().from_json(resp.message)
assert parsed['error'] == 'unsupported_grant_type'
assert parsed['error_description'] == 'Unsupported grant_type'
def test_authz_endpoint(self):
_state, location = self.cons.begin("openid",
response_type=["code", "token"],
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
parsed = parse_qs(urlparse(resp.message).fragment)
assert parsed["token_type"][0] == "Bearer"
assert "code" in parsed
def test_idtoken(self):
AREQ = AuthorizationRequest(response_type="code", client_id=CLIENT_ID,
redirect_uri="http://example.com/authz",
scope=["openid"], state="state000")
ae = AuthnEvent("sub", "salt")
sid = self.provider.sdb.create_authz_session(ae, AREQ)
self.provider.sdb.do_sub(sid, "client_salt")
session = self.provider.sdb[sid]
id_token = self.provider.id_token_as_signed_jwt(session)
assert len(id_token.split(".")) == 3
def test_idtoken_with_extra_claims(self):
areq = AuthorizationRequest(response_type="code", client_id=CLIENT_ID,
redirect_uri="http://example.com/authz",
scope=["openid"], state="state000")
aevent = AuthnEvent("sub", "salt")
sid = self.provider.sdb.create_authz_session(aevent, areq)
self.provider.sdb.do_sub(sid, "client_salt")
session = self.provider.sdb[sid]
claims = {'k1': 'v1', 'k2': 32}
id_token = self.provider.id_token_as_signed_jwt(session,
extra_claims=claims)
parsed = IdToken().from_jwt(id_token, keyjar=self.provider.keyjar)
for key, value in claims.items():
assert parsed[key] == value
def test_userinfo_endpoint(self):
self.cons.client_secret = "drickyoughurt"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
state, location = self.cons.begin("openid", "token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(
urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(access_token=atr["access_token"], schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['nickname', 'sub', 'name', 'email'])
def test_userinfo_endpoint_expired(self):
self.cons.client_secret = "drickyoughurt"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
state, location = self.cons.begin("openid", "token", path="http://localhost:8087")
initial_datetime = datetime.datetime(2018, 2, 5, 10, 0, 0, 0)
final_datetime = datetime.datetime(2018, 2, 9, 10, 0, 0, 0)
with freeze_time(initial_datetime) as frozen:
resp = self.provider.authorization_endpoint(request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(urlparse(resp.message).fragment, "urlencoded")
frozen.move_to(final_datetime)
uir = UserInfoRequest(access_token=atr["access_token"], schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
message = json.loads(resp.message)
assert message['error'] == 'invalid_token'
assert message['error_description'] == 'Token is expired'
def test_userinfo_endpoint_extra_claim(self):
# We have to recreate the cache again
self.provider.extra_claims = ['extra_claim']
self.provider.capabilities = self.provider.provider_features()
self.cons.client_secret = "drickyoughurt"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
# Request the extra claim
self.cons.consumer_config['user_info'] = {'extra_claim': None}
state, location = self.cons.begin("openid", "token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(
urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(access_token=atr["access_token"], schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['sub', 'extra_claim'])
def test_userinfo_endpoint_unknown_claim(self):
self.cons.client_secret = "drickyoughurt"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
# Request the extra claim
self.cons.consumer_config['user_info'] = {'extra_claim': None}
state, location = self.cons.begin("openid", "token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(
urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(access_token=atr["access_token"], schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['sub'])
def test_userinfo_endpoint_extra_scopes(self):
# We have to recreate the cache again
self.provider.extra_scope_dict = {'extra_scope': ['extra_claim']}
self.provider.capabilities = self.provider.provider_features()
self.cons.client_secret = "drickyoughurt"
self.cons.consumer_config['user_info'] = {'extra_claim': None}
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
# Request the extra scope
state, location = self.cons.begin("openid extra_scope", "token", path="http://localhost:8087")
resp = self.provider.authorization_endpoint(request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(access_token=atr["access_token"], schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['sub', 'extra_claim'])
def test_userinfo_endpoint_authn(self):
self.cons.client_secret = "drickyoughurt"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
state, location = self.cons.begin("openid", "token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(
urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded(),
authn='Bearer ' + atr[
'access_token'])
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['nickname', 'sub', 'name', 'email'])
def test_userinfo_endpoint_missing_client(self):
self.provider.cdb["unknownclient"] = {
"client_secret": "unknownclient",
"redirect_uris": [("http://localhost:8087/authz", None)],
"post_logout_redirect_uris": [("https://example.com/post_logout", None)],
"client_salt": "salted",
"response_types": ["code", "token", "code id_token", "none", "code token", "id_token"]
}
self.cons.client_id = "unknownclient"
self.cons.client_secret = "unknownclient"
self.cons.config["response_type"] = ["token"]
self.cons.config["request_method"] = "parameter"
state, location = self.cons.begin("openid", "token", path="http://localhost:8087")
resp = self.provider.authorization_endpoint(request=urlparse(location).query)
# redirect
atr = AuthorizationResponse().deserialize(urlparse(resp.message).fragment, "urlencoded")
uir = UserInfoRequest(schema="openid")
del self.provider.cdb["unknownclient"]
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded(), authn="Bearer " + atr["access_token"])
ident = OpenIDSchema().deserialize(resp.message, "json")
assert ident["error"] == "unauthorized_client"
def test_userinfo_endpoint_malformed(self):
uir = UserInfoRequest(schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded(),
authn='Not a token')
assert json.loads(resp.message) == {
'error_description': 'Token is malformed',
'error': 'invalid_request'}
def test_userinfo_endpoint_mising_authn(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid", 'offline_access'],
prompt='consent')
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
# authn_event is missing - this can happen for offline requests
_sdb[sid] = {
"sub": "my_sub",
"oauth_state": "authz",
"uid": "user",
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": False,
"scope": ["openid", 'offline_access'],
"redirect_uri": "http://example.com/authz",
}
uir = UserInfoRequest(access_token=access_grant, schema="openid")
resp = self.provider.userinfo_endpoint(request=uir.to_urlencoded())
ident = OpenIDSchema().deserialize(resp.message, "json")
assert _eq(ident.keys(), ['sub'])
def test_check_session_endpoint(self):
session = {"sub": "UserID", "client_id": "number5"}
idtoken = self.provider.id_token_as_signed_jwt(session)
csr = CheckSessionRequest(id_token=idtoken)
info = self.provider.check_session_endpoint(request=csr.to_urlencoded())
idt = IdToken().deserialize(info.message, "json")
assert _eq(idt.keys(), ['sub', 'aud', 'iss', 'acr', 'exp', 'iat'])
assert idt["iss"] == self.provider.name
def test_response_mode_fragment(self):
areq = {'response_mode': 'fragment'}
assert self.provider.response_mode(areq, True) is None
with pytest.raises(InvalidRequest):
self.provider.response_mode(areq, False)
def test_response_mode_query(self):
areq = {'response_mode': 'query'}
assert self.provider.response_mode(areq, False) is None
with pytest.raises(InvalidRequest):
self.provider.response_mode(areq, True)
def test_response_mode_form_post(self):
areq = {'response_mode': 'form_post'}
aresp = AuthorizationResponse()
aresp['state'] = 'state'
response = self.provider.response_mode(areq, False, redirect_uri='http://example.com',
aresp=aresp, headers='')
assert 'Submit This Form' in response.message
assert 'http://example.com' in response.message
assert '<input type="hidden" name="state" value="state"/>' in response.message
def test_auth_init_invalid(self):
areq = {'response_mode': 'unknown',
'redirect_uri': 'http://localhost:8087/authz',
'client_id': 'number5',
'scope': 'openid',
'response_type': 'code',
'client_secret': 'drickyoghurt'}
response = self.provider.auth_init(areq)
assert isinstance(response, Response)
assert response.status_code == 400
assert json.loads(response.message) == {'error': 'invalid_request',
'error_description': 'Contains unsupported response mode'}
@patch('oic.oic.provider.utc_time_sans_frac', Mock(return_value=123456))
def test_client_secret_expiration_time(self):
exp_time = self.provider.client_secret_expiration_time()
assert exp_time == 209856
def test_registration_endpoint_post(self):
req = RegistrationRequest()
req["application_type"] = "web"
req["client_name"] = "My super service"
req["redirect_uris"] = ["http://example.com/authz"]
req["contacts"] = ["[email protected]"]
req["response_types"] = ["code"]
resp = self.provider.registration_endpoint(request=req.to_json())
regresp = RegistrationResponse().deserialize(resp.message, "json")
assert _eq(regresp.keys(),
['redirect_uris', 'contacts', 'application_type',
'client_name', 'registration_client_uri',
'client_secret_expires_at',
'registration_access_token',
'client_id', 'client_secret',
'client_id_issued_at', 'response_types'])
def test_registration_endpoint_post_unicode(self):
data = 'application_type=web&client_name=M%C3%A1+supe%C5%99+service&' \
'redirect_uris=http%3A%2F%2Fexample.com%2Fauthz&response_types=code'
resp = self.provider.registration_endpoint(request=data)
regresp = RegistrationResponse().deserialize(resp.message, "json")
assert _eq(regresp.keys(),
['redirect_uris', 'application_type',
'client_name', 'registration_client_uri',
'client_secret_expires_at',
'registration_access_token',
'client_id', 'client_secret',
'client_id_issued_at', 'response_types'])
def test_registration_endpoint_get(self):
rr = RegistrationRequest(operation="register", redirect_uris=["http://example.org/new"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
authn = ' '.join(['Bearer', regresp['registration_access_token']])
query = '='.join(['client_id', regresp['client_id']])
resp = self.provider.registration_endpoint(request=query, authn=authn, method='GET')
assert json.loads(resp.message) == regresp.to_dict()
def test_registration_endpoint_delete(self):
resp = self.provider.registration_endpoint(request='', method='PUT')
assert json.loads(resp.message) == {'error': 'Unsupported operation',
'error_description': 'Altering of the registration is not supported'}
def test_registration_endpoint_put(self):
resp = self.provider.registration_endpoint(request='', method='DELETE')
assert json.loads(resp.message) == {'error': 'Unsupported operation',
'error_description': 'Deletion of the registration is not supported'}
def test_registration_endpoint_unsupported(self):
resp = self.provider.registration_endpoint(request='', method='HEAD')
assert json.loads(resp.message) == {'error': 'Unsupported method',
'error_description': 'Unsupported HTTP method'}
def test_do_client_registration_invalid_sector_uri(self):
rr = RegistrationRequest(operation='register', sector_identifier_uri='https://example.com',
redirect_uris=['http://example.com/changed'])
redirects = ['http://example.com/present']
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, 'https://example.com', body=json.dumps(redirects))
resp = self.provider.do_client_registration(rr, 'client0')
assert resp.status_code == 400
error = json.loads(resp.message)
assert error['error'] == 'invalid_configuration_parameter'
def test_registration_endpoint_with_non_https_redirect_uri_implicit_flow(
self):
params = {"application_type": "web",
"redirect_uris": ["http://example.com/authz"],
"response_types": ["id_token", "token"]}
req = RegistrationRequest(**params)
resp = self.provider.registration_endpoint(request=req.to_json())
assert resp.status_code == 400
error = json.loads(resp.message)
assert error["error"] == "invalid_redirect_uri"
def test_verify_redirect_uris_with_https_code_flow(self):
params = {"application_type": "web",
"redirect_uris": ["http://example.com/authz"],
"response_types": ["code"]}
request = RegistrationRequest(**params)
verified_uris = self.provider.verify_redirect_uris(request)
assert verified_uris == [("http://example.com/authz", None)]
def test_verify_redirect_uris_with_non_https_redirect_uri_implicit_flow(
self):
params = {"application_type": "web",
"redirect_uris": ["http://example.com/authz"],
"response_types": ["id_token", "token"]}
request = RegistrationRequest(**params)
with pytest.raises(InvalidRedirectURIError) as exc_info:
self.provider.verify_redirect_uris(request)
assert str(exc_info.value) == "None https redirect_uri not allowed"
def test_verify_redirect_uris_unicode(self):
url = 'http://example.com/a\xc5\xaf\xc5\xa5h\xc5\xbe'
params = {"application_type": "web",
"redirect_uris": [url],
"response_types": ["code"]}
request = RegistrationRequest(**params)
verified_uris = self.provider.verify_redirect_uris(request)
assert verified_uris == [(url, None)]
def test_provider_key_setup(self, tmpdir, session_db_factory):
path = tmpdir.strpath
# Path is actually just a random name we turn into a subpath of
# our current directory, that doesn't work with drive letters on
# windows, so we throw them away and add a '.' for a local path.
path = "." + os.path.splitdrive(path)[1].replace(os.path.sep, '/')
provider = Provider("pyoicserv", session_db_factory(SERVER_INFO["issuer"]), {},
None, None, None, None, None)
provider.baseurl = "http://www.example.com"
provider.key_setup(path, path, sig={"format": "jwk", "alg": "RSA"})
keys = provider.keyjar.get_signing_key("RSA")
assert len(keys) == 1
assert provider.jwks_uri == "http://www.example.com/{}/jwks".format(path)
@pytest.mark.parametrize("uri", [
"http://example.org/foo",
"http://example.com/cb",
"http://example.org/cb?got=you",
"http://example.org/cb/foo?got=you"
])
def test_verify_redirect_uri_faulty_without_query(self, uri):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/cb"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(redirect_uri=uri,
client_id=cid,
response_type="code",
scope="openid")
with pytest.raises(RedirectURIError):
self.provider._verify_redirect_uri(areq)
@pytest.mark.parametrize("uri", [
"http://example.org/foo",
"http://example.com/cb",
"http://example.org/cb?got=you",
"http://example.org/cb?test=fail",
"http://example.org/cb/foo?got=you"
])
def test_verify_redirect_uri_faulty_with_query(self, uri):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/cb?test=test"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(redirect_uri=uri,
client_id=cid,
response_type="code",
scope="openid")
with pytest.raises(RedirectURIError):
self.provider._verify_redirect_uri(areq)
@pytest.mark.parametrize("uri", [
"http://example.org/cb",
])
def test_verify_redirect_uri_correct_without_query(self, uri):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/cb"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(redirect_uri=uri,
client_id=cid,
response_type="code",
scope="openid")
self.provider._verify_redirect_uri(areq)
@pytest.mark.parametrize("uri", [
"http://example.org/cb",
"http://example.org/cb?test=test"
])
def test_verify_redirect_uri_correct_with_query(self, uri):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/cb", "http://example.org/cb?test=test"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(redirect_uri=uri,
client_id=cid,
response_type="code",
scope="openid")
self.provider._verify_redirect_uri(areq)
def test_verify_sector_identifier_no_scheme(self):
rr = RegistrationRequest(operation="register", sector_identifier_uri="example.com")
with LogCapture(level=logging.DEBUG) as logcap:
message = "Couldn't open sector_identifier_uri"
with pytest.raises(InvalidSectorIdentifier, match=message):
self.provider._verify_sector_identifier(rr)
assert len(logcap.records) == 2
# First log record is from server...
assert isinstance(logcap.records[1].msg, MissingSchema)
error = "Invalid URL 'example.com': No schema supplied. Perhaps you meant http://example.com?"
assert str(logcap.records[1].msg) == error
def test_verify_sector_identifier_nonreachable(self):
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com")
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com", status=404)
message = "Couldn't open sector_identifier_uri"
with pytest.raises(InvalidSectorIdentifier, match=message):
self.provider._verify_sector_identifier(rr)
assert len(logcap.records) == 0
def test_verify_sector_identifier_error(self):
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com")
error = ConnectionError('broken connection')
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com", body=error)
with pytest.raises(InvalidSectorIdentifier, match="Couldn't open sector_identifier_uri"):
self.provider._verify_sector_identifier(rr)
assert len(logcap.records) == 2
# First log record is from server...
assert logcap.records[1].msg == error
def test_verify_sector_identifier_malformed(self):
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com")
body = "This is not the JSON you are looking for"
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com", body=body)
with pytest.raises(InvalidSectorIdentifier, match="Error deserializing sector_identifier_uri content"):
self.provider._verify_sector_identifier(rr)
assert len(logcap.records) == 1
assert logcap.records[0].msg == "sector_identifier_uri => %s"
assert logcap.records[0].args == (body,)
def test_verify_sector_identifier_ru_missing_in_si(self):
"""Redirect_uris is not present in the sector_identifier_uri content."""
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com",
redirect_uris=["http://example.com/missing"])
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com",
body=json.dumps(["http://example.com/present"]))
with pytest.raises(InvalidSectorIdentifier, match="redirect_uri missing from sector_identifiers"):
self.provider._verify_sector_identifier(rr)
assert len(logcap.records) == 2
assert logcap.records[0].msg == "sector_identifier_uri => %s"
assert logcap.records[0].args == ('["http://example.com/present"]',)
assert logcap.records[1].msg == "redirect_uris: %s"
assert logcap.records[1].args == (["http://example.com/missing"],)
def test_verify_sector_identifier_ru_missing(self):
"""Redirect_uris is not present in the request."""
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com")
redirects = ["http://example.com/present"]
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com", body=json.dumps(redirects))
si_redirects, si_url = self.provider._verify_sector_identifier(rr)
assert si_url == "https://example.com"
assert si_redirects == redirects
assert len(logcap.records) == 1
assert logcap.records[0].msg == "sector_identifier_uri => %s"
assert logcap.records[0].args == ('["http://example.com/present"]',)
def test_verify_sector_identifier_ru_ok(self):
"""Redirect_uris is present in the sector_identifier_uri content."""
rr = RegistrationRequest(operation="register", sector_identifier_uri="https://example.com",
redirect_uris=["http://example.com/present"])
redirects = ["http://example.com/present"]
with responses.RequestsMock() as rsps, LogCapture(level=logging.DEBUG) as logcap:
rsps.add(rsps.GET, "https://example.com", body=json.dumps(redirects))
si_redirects, si_url = self.provider._verify_sector_identifier(rr)
assert si_url == "https://example.com"
assert si_redirects == redirects
assert len(logcap.records) == 2
assert logcap.records[0].msg == "sector_identifier_uri => %s"
assert logcap.records[0].args == ('["http://example.com/present"]',)
assert logcap.records[1].msg == "redirect_uris: %s"
assert logcap.records[1].args == (["http://example.com/present"],)
@pytest.mark.parametrize("uri", [
"http://example.org/cb",
"http://example.org/cb?got=you",
"http://example.org/cb?foo=you"
"http://example.org/cb?foo=bar&got=you",
"http://example.org/cb?foo=you&foo=bar"
])
def test_registered_redirect_uri_faulty_with_query_component(self, uri):
rr = RegistrationRequest(operation="register",
redirect_uris=[
"http://example.org/cb?foo=bar"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(redirect_uri=uri,
client_id=cid,
scope="openid",
response_type="code")
with pytest.raises(RedirectURIError):
self.provider._verify_redirect_uri(areq)
def test_registered_redirect_uri_correct_with_query_component(self):
rr = RegistrationRequest(operation="register",
redirect_uris=[
"http://example.org/cb?foo=bar"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
cid = regresp["client_id"]
areq = AuthorizationRequest(
redirect_uri="http://example.org/cb?foo=bar",
client_id=cid, scope="openid",
response_type="code")
self.provider._verify_redirect_uri(areq)
def test_verify_redirect_uri_native_http_localhost(self):
areq = RegistrationRequest(
redirect_uris=["http://localhost/cb"],
application_type='native')
self.provider.verify_redirect_uris(areq)
def test_verify_redirect_uri_native_loopback(self):
areq = RegistrationRequest(
redirect_uris=["http://127.0.0.1/cb"],
application_type='native')
self.provider.verify_redirect_uris(areq)
def test_verify_redirect_uri_native_http_non_localhost(self):
areq = RegistrationRequest(
redirect_uris=["http://example.org/cb"],
application_type='native')
try:
self.provider.verify_redirect_uris(areq)
except InvalidRedirectURIError:
assert True
def test_verify_redirect_uri_native_custom(self):
areq = RegistrationRequest(
redirect_uris=["com.example.app:/oauth2redirect"],
application_type='native')
self.provider.verify_redirect_uris(areq)
def test_verify_redirect_uri_native_https(self):
areq = RegistrationRequest(
redirect_uris=["https://example.org/cb"],
application_type='native')
try:
self.provider.verify_redirect_uris(areq)
except InvalidRedirectURIError:
assert True
def test_read_registration(self):
rr = RegistrationRequest(operation="register",
redirect_uris=[
"http://example.org/new"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
authn = ' '.join(['Bearer', regresp['registration_access_token']])
query = '='.join(['client_id', regresp['client_id']])
resp = self.provider.read_registration(authn, query)
assert json.loads(resp.message) == regresp.to_dict()
def test_read_registration_malformed_authn(self):
resp = self.provider.read_registration('wrong string', 'request')
assert resp.status_code == 400
assert json.loads(resp.message) == {'error': 'invalid_request',
'error_description': None}
def test_read_registration_missing_clientid(self):
resp = self.provider.read_registration('Bearer wrong string', 'request')
assert resp.status_code == 401
def test_read_registration_wrong_cid(self):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/new"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
authn = ' '.join(['Bearer', regresp['registration_access_token']])
query = '='.join(['client_id', '123456789012'])
resp = self.provider.read_registration(authn, query)
assert resp.status_code == 401
def test_read_registration_wrong_rat(self):
rr = RegistrationRequest(operation="register",
redirect_uris=["http://example.org/new"],
response_types=["code"])
registration_req = rr.to_json()
resp = self.provider.registration_endpoint(request=registration_req)
regresp = RegistrationResponse().from_json(resp.message)
authn = ' '.join(['Bearer', 'registration_access_token'])
query = '='.join(['client_id', regresp['client_id']])
resp = self.provider.read_registration(authn, query)
assert resp.status_code == 401
def test_key_rollover(self):
provider2 = Provider("FOOP", {}, {}, None, None, None, None, None)
provider2.keyjar = KEYJAR
# Number of KeyBundles
assert len(provider2.keyjar.issuer_keys[""]) == 1
kb = ec_init({"type": "EC", "crv": "P-256", "use": ["sig"]})
provider2.do_key_rollover(json.loads(kb.jwks()), "b%d")
assert len(provider2.keyjar.issuer_keys[""]) == 2
kb = ec_init({"type": "EC", "crv": "P-256", "use": ["sig"]})
provider2.do_key_rollover(json.loads(kb.jwks()), "b%d")
assert len(provider2.keyjar.issuer_keys[""]) == 3
provider2.remove_inactive_keys(-1)
assert len(provider2.keyjar.issuer_keys[""]) == 2
def test_end_session_endpoint(self):
# End session not allowed if no cookie and no id_token_hint is sent
# (can't determine session)
resp = self.provider.end_session_endpoint("", cookie="FAIL")
assert resp.status_code == 400
def _create_cookie(self, user, client_id, c_type='sso'):
cd = CookieDealer(self.provider)
set_cookie = cd.create_cookie('{}][{}'.format(user, client_id), c_type,
self.provider.sso_cookie_name)
cookies_string = set_cookie[1]
all_cookies = SimpleCookie()
try:
cookies_string = cookies_string.decode()
except (AttributeError, UnicodeDecodeError):
pass
all_cookies.load(cookies_string)
return all_cookies
def _code_auth(self):
state, location = self.cons.begin("openid", "code",
path="http://localhost:8087")
return self.provider.authorization_endpoint(
request=location.split("?")[1])
def _code_auth2(self):
state, location = self.cons2.begin("openid", "code",
path="http://www.example.org")
return self.provider.authorization_endpoint(
request=location.split("?")[1])
def test_end_session_endpoint_with_cookie(self):
self._code_auth()
cookie = self._create_cookie("username", "number5")
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, SeeOther)
assert 'state=abcde' in resp.message
assert 'username' not in self.provider.sdb.uid2sid
self._assert_cookies_expired(resp.headers)
def test_end_session_endpoint_with_wrong_cookie(self):
self._code_auth()
cookie = self._create_cookie("username", "number5", c_type='session')
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, Response)
_err = ErrorResponse().from_json(resp.message)
assert _err['error'] == "invalid_request"
def test_end_session_endpoint_with_cookie_wrong_user(self):
self._code_auth()
cookie = self._create_cookie("diggins", "number5")
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, Response)
_err = ErrorResponse().from_json(resp.message)
assert _err['error'] == "invalid_request"
def test_end_session_endpoint_with_cookie_wrong_client(self):
self._code_auth()
cookie = self._create_cookie("username", "a1b2c3")
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, Response)
_err = ErrorResponse().from_json(resp.message)
assert _err['error'] == "invalid_request"
def test_end_session_endpoint_with_cookie_dual_login(self):
self._code_auth()
self._code_auth2()
cookie = self._create_cookie("username", "client0")
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, SeeOther)
assert 'state=abcde' in resp.message
assert 'username' not in self.provider.sdb.uid2sid
self._assert_cookies_expired(resp.headers)
def test_end_session_endpoint_with_cookie_dual_login_wrong_client(self):
self._code_auth()
self._code_auth2()
cookie = self._create_cookie("username", "a1b2c3")
resp = self.provider.end_session_endpoint(
urlencode({"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, Response)
_err = ErrorResponse().from_json(resp.message)
assert _err['error'] == "invalid_request"
def test_end_session_endpoint_with_id_token_hint_only(self):
id_token = self._auth_with_id_token()
assert self.provider.sdb.get_sids_by_sub(
id_token["sub"]) # verify we got valid session
id_token_hint = id_token.to_jwt(algorithm="none")
resp = self.provider.end_session_endpoint(
urlencode({"id_token_hint": id_token_hint}))
assert isinstance(resp, SeeOther)
assert not self.provider.sdb.get_sids_by_sub(
id_token["sub"]) # verify session has been removed
self._assert_cookies_expired(resp.headers)
def test_end_session_endpoint_with_id_token_hint_and_cookie(self):
id_token = self._auth_with_id_token()
assert self.provider.sdb.get_sids_by_sub(
id_token["sub"]) # verify we got valid session
id_token_hint = id_token.to_jwt(algorithm="none")
cookie = self._create_cookie("username", "number5")
resp = self.provider.end_session_endpoint(
urlencode({"id_token_hint": id_token_hint}),
cookie=cookie)
assert isinstance(resp, SeeOther)
assert not self.provider.sdb.get_sids_by_sub(
id_token["sub"]) # verify session has been removed
self._assert_cookies_expired(resp.headers)
def test_end_session_endpoint_with_post_logout_redirect_uri(self):
self._code_auth()
# verify we got valid session
assert 'username' in self.provider.sdb.uid2sid
cookie = self._create_cookie("username", "number5")
client_id = cast(str, CLIENT_CONFIG["client_id"]) # type: str
post_logout_redirect_uri = \
CDB[client_id]["post_logout_redirect_uris"][0][0]
resp = self.provider.end_session_endpoint(urlencode(
{"post_logout_redirect_uri": post_logout_redirect_uri,
"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, SeeOther)
assert 'username' not in self.provider.sdb.uid2sid
self._assert_cookies_expired(resp.headers)
def test_end_session_endpoint_with_wrong_post_logout_redirect_uri(self):
self._code_auth()
# verify we got valid session
assert 'username' in self.provider.sdb.uid2sid
cookie = self._create_cookie("username", "number5")
post_logout_redirect_uri = 'https://www.example.com/logout'
resp = self.provider.end_session_endpoint(urlencode(
{"post_logout_redirect_uri": post_logout_redirect_uri,
"state": 'abcde'}),
cookie=cookie)
assert isinstance(resp, Response)
_err = ErrorResponse().from_json(resp.message)
assert _err['error'] == "invalid_request"
def test_session_state_in_auth_req_for_session_support(self, session_db_factory):
provider = Provider(SERVER_INFO["issuer"], session_db_factory(SERVER_INFO["issuer"]), CDB,
AUTHN_BROKER, USERINFO,
AUTHZ, verify_client, SYMKEY, urlmap=URLMAP,
keyjar=KEYJAR)
provider.capabilities.update({
"check_session_iframe": "https://op.example.com/check_session"})
req_args = {"scope": ["openid"],
"redirect_uri": "http://localhost:8087/authz",
"response_type": ["code"],
"client_id": "number5"
}
areq = AuthorizationRequest(**req_args)
resp = provider.authorization_endpoint(
request=areq.to_urlencoded())
aresp = self.cons.parse_response(AuthorizationResponse, resp.message,
sformat="urlencoded")
assert "session_state" in aresp
def _assert_cookies_expired(self, http_headers):
cookies_string = ";".join(
[c[1] for c in http_headers if c[0] == "Set-Cookie"])
all_cookies = SimpleCookie()
all_cookies.load(cookies_string)
now = datetime.datetime.utcnow()
for c in [self.provider.cookie_name, self.provider.session_cookie_name]:
dt = datetime.datetime.strptime(all_cookies[c]["expires"],
"%a, %d-%b-%Y %H:%M:%S GMT")
assert dt < now # make sure the cookies have expired to be cleared
def _auth_with_id_token(self):
state, location = self.cons.begin("openid", "id_token",
path="http://localhost:8087")
resp = self.provider.authorization_endpoint(
request=location.split("?")[1])
aresp = self.cons.parse_response(AuthorizationResponse, resp.message,
sformat="urlencoded")
return aresp["id_token"]
def test_id_token_RS512_sign(self):
self.provider.capabilities[
'id_token_signing_alg_values_supported'] = ['RS512']
self.provider.build_jwx_def()
id_token = self._auth_with_id_token()
assert id_token.jws_header['alg'] == "RS512"
def test_refresh_token_grant_type_ok(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id=CLIENT_ID,
response_type="code",
scope=["openid", 'offline_access'],
prompt='consent')
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.access_token(sid=sid)
ae = AuthnEvent("user", "salt")
_sdb[sid] = {
"oauth_state": "authz",
"authn_event": ae.to_json(),
"authzreq": authreq.to_json(),
"client_id": CLIENT_ID,
"code": access_grant,
"code_used": False,
"scope": ["openid", 'offline_access'],
"redirect_uri": "http://example.com/authz",
}
_sdb.do_sub(sid, "client_salt")
info = _sdb.upgrade_to_token(access_grant, issue_refresh=True)
rareq = RefreshAccessTokenRequest(grant_type="refresh_token",
refresh_token=info['refresh_token'],
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=['openid'])
resp = self.provider.refresh_token_grant_type(rareq)
atr = AccessTokenResponse().deserialize(resp.message, "json")
assert atr['refresh_token'] is not None
assert atr['token_type'] == 'Bearer'
def test_refresh_token_grant_type_wrong_token(self):
rareq = RefreshAccessTokenRequest(grant_type="refresh_token",
refresh_token='some_other_refresh_token',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=['openid'])
resp = self.provider.refresh_token_grant_type(rareq)
atr = TokenErrorResponse().deserialize(resp.message, "json")
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Not a refresh token'
def test_refresh_token_grant_type_expired(self):
# Missing refresh_token also raises Expired
rareq = RefreshAccessTokenRequest(grant_type="refresh_token",
refresh_token='Refresh_some_other_refresh_token',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=['openid'])
resp = self.provider.refresh_token_grant_type(rareq)
atr = TokenErrorResponse().deserialize(resp.message, "json")
assert atr['error'] == 'invalid_request'
assert atr['error_description'] == 'Refresh token is expired'
def test_authorization_endpoint_faulty_request_uri(self):
bib = {"scope": ["openid"],
"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
"redirect_uri": "http://localhost:8087/authz",
"request_uri": "https://some-non-resolving.hostname.com/request_uri#1234",
# faulty request_uri
"response_type": ["code"],
"client_id": "a1b2c3"}
arq = AuthorizationRequest(**bib)
resp = self.provider.authorization_endpoint(request=arq.to_urlencoded())
assert resp.status_code == 400
msg = json.loads(resp.message)
assert msg["error"] == "invalid_request_uri"
def test_encrypt_missing_info(self):
payload = self.provider.encrypt('payload', {}, 'some_client')
assert payload == 'payload'
def test_encrypt_missing_recuperated(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
with open(os.path.join(BASE_PATH, 'jwk_enc.json')) as keyf:
key = keyf.read()
info = {
'id_token_encrypted_response_alg': 'A128KW',
'id_token_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret',
'jwks_uri': 'http://example.com/key'}
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, 'http://example.com/key', body=key, content_type='application/json')
payload = self.provider.encrypt('payload', info, 'some_client')
token = JWEnc().unpack(payload)
headers = json.loads(token.protected_header().decode())
assert headers['alg'] == 'A128KW'
assert headers['enc'] == 'A128CBC-HS256'
def test_encrypt_missing_not_recuperated(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
info = {
'id_token_encrypted_response_alg': 'RSA1_5',
'id_token_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret'}
with pytest.raises(JWEException):
self.provider.encrypt('payload', info, 'some_client')
def test_encrypt_userinfo_missing_recuperated(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
with open(os.path.join(BASE_PATH, 'jwk_enc.json')) as keyf:
key = keyf.read()
info = {
'userinfo_encrypted_response_alg': 'A128KW',
'userinfo_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret',
'jwks_uri': 'http://example.com/key'}
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, 'http://example.com/key', body=key, content_type='application/json')
payload = self.provider.encrypt('payload', info, 'some_client', val_type='userinfo')
token = JWEnc().unpack(payload)
headers = json.loads(token.protected_header().decode())
assert headers['alg'] == 'A128KW'
assert headers['enc'] == 'A128CBC-HS256'
def test_encrypt_missing_userinfo_not_recuperated(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
info = {
'userinfo_encrypted_response_alg': 'RSA1_5',
'userinfo_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret'}
with pytest.raises(JWEException):
self.provider.encrypt('payload', info, 'some_client', val_type='userinfo')
def test_recuperate_jwks(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
with open(os.path.join(BASE_PATH, 'jwk_enc.json')) as keyf:
key = keyf.read()
info = {
'id_token_encrypted_response_alg': 'A128KW',
'id_token_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret',
'jwks': json.loads(key)}
self.provider.recuperate_keys('some_client', info)
assert len(self.provider.keyjar.get_issuer_keys('some_client')) == 3
def test_recuperate_jwks_uri(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
with open(os.path.join(BASE_PATH, 'jwk_enc.json')) as keyf:
key = keyf.read()
info = {
'id_token_encrypted_response_alg': 'A128KW',
'id_token_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret',
'jwks_uri': 'http://example.com/key'}
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, 'http://example.com/key', body=key, content_type='application/json')
self.provider.recuperate_keys('some_client', info)
assert len(self.provider.keyjar.get_issuer_keys('some_client')) == 3
def test_recuperate_none(self):
self.provider.keyjar = KeyJar() # Empty keyjar, all keys are lost
info = {
'id_token_encrypted_response_alg': 'A128KW',
'id_token_encrypted_response_enc': 'A128CBC-HS256',
'client_secret': 'some_secret'}
self.provider.recuperate_keys('some_client', info)
assert len(self.provider.keyjar.get_issuer_keys('some_client')) == 2
|
py | 1a3ee045da785c3f7257a5ab7ebf817bd50cc676 | import calendar
import itertools
# https://docs.python.org/2.7/library/itertools.html#recipes
astr = 'abc'
num_list = [42, 2001, 2112]
adict = {'key1': 0, 'key2': 1}
months = calendar.month_name[1:]
month_ends_in_r = [m[-1] == 'r' for m in months]
month_ends_in_y = [1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0]
#chain - iterator from multiple iterables
print list(itertools.chain(astr, num_list, adict))
#flatten
lst = [[0, 1], [2, 3], [4], [5, 6]]
tup_of_tups = ((0, 1), (2, 3), (4,), (5, 6))
print list(itertools.chain(*lst))
print list(itertools.chain.from_iterable(lst))
print list(itertools.chain.from_iterable(tup_of_tups))
#compress - iterator that returns elements if corresponding
#element in selector arg is true
print list(itertools.compress(months, month_ends_in_r))
print list(itertools.compress(months, month_ends_in_y))
#ifilter
print list(itertools.ifilter(lambda x: x[-1] == 'r', months))
#if predicate is None then return elements that are true
print list(itertools.ifilter(None, [True, 0, 1, [0], {}, (9,)]))
#ifilterfalse - same as ifilter but returns False elements
print list(itertools.ifilterfalse(lambda x: x[-1] == 'r', months))
#if predicate is None then return elements that are false
print list(itertools.ifilterfalse(None, [True, 0, 1, [0], {}, (9,), '']))
#takewhile- returns an iterator that returns elements from iterable
#as long as predicate is true (then stops)
lst = [1, 2, 3, 4, 5, 6, 0, 0, 0]
for val in itertools.takewhile(lambda x: x < 4, lst):
print val
print '--'
#dropwhile- returns an iterator that returns all remaining elements from iterable
#once predicate is false
for val in itertools.dropwhile(lambda x: x < 4, lst):
print val
#imap
print list(itertools.imap(lambda x: x*2, num_list))
#islice - negative start/stop/step disallowed
#itertools.islice(iterable, stop)
#itertools.islice(iterable, start, stop[, step])
print list(itertools.islice(months, 3))
print list(itertools.islice(months, 9, None))
print list(itertools.islice(months, 5, 8))
print list(itertools.islice(months, None, None, 2))
#izip - aggregate elements from iterables
print list(itertools.izip('abc', [1, 2, 3], ('i.', 'ii.', 'iii.')))
#izip_longest - iterates over longest iterable in case of uneven len
print list(itertools.izip_longest('abc', [1, 2], ('i.', 'ii.', 'iii.')))
print list(itertools.izip_longest('abc', [1, 2], ('i.', 'ii.', 'iii.'), fillvalue='fv'))
#Cartesian product, permutations, combinations
print list(itertools.product('abc', [42, 2112]))
print list(itertools.permutations('abc'))
print list(itertools.permutations('abc', 2))
print list(itertools.combinations('abc', 2))
#count, cycle, repeat are infinite iterators
#itertools.count(start=0, step=1)
print list(itertools.islice(itertools.count(), 5))
print list(itertools.islice(itertools.count(1, 0.25), 5))
print list(itertools.islice(itertools.cycle('abc'), 7))
#itertools.repeat(object[, times])
print list(itertools.repeat(9, 10))
print list(itertools.islice(itertools.repeat(9), 5))
#itertools.tee(iterable[, n=2])
a, b = itertools.tee(iter([42, 2112]))
print a.next()
print a.next()
print b.next()
|
py | 1a3ee0ef39e90105e9aae43460548243e3ac5e86 | """Definition of the Element Summation Component."""
import collections
import numpy as np
from scipy import sparse as sp
from six import string_types
from openmdao.core.explicitcomponent import ExplicitComponent
class SumComp(ExplicitComponent):
r"""
Compute a vectorized summation.
Use the add_equation method to define any number of summations
User defines the names of the input and output variables using
add_equation(output_name='my_output', input_name='my_input')
Use option axis = None to sum over all array elements. Default
behavior sums along the columns.
.. math::
\textrm{result}_j = \sum_{i=1} ^\text{vec_size} a_{ij} * \textrm{scaling factor}
where
- a is shape (vec_size, n)
- b is of shape (vec_size, n)
- c is of shape (vec_size, n)
Result is of shape (1, n) or (1, )
Attributes
----------
_add_systems : list
List of equation systems to be initialized with the system.
"""
def __init__(self, output_name=None, input_name=None, vec_size=1, length=1,
val=1.0, scaling_factor=1, **kwargs):
"""
Allow user to create an multiplication system with one-liner.
Parameters
----------
output_name : str
(required) name of the result variable in this component's namespace.
input_name : str
(required) name of the input variable for this system
vec_size : int
Length of the first dimension of the input and output vectors
(i.e number of rows, or vector length for a 1D vector)
Default is 1
length : int
Length of the second dimension of the input and ouptut vectors (i.e. number of columns)
Default is 1 which results in input/output vectors of size (vec_size,)
scaling_factor : numeric
Scaling factor to apply to the whole system
Default is 1
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
**kwargs : str
Any other arguments to pass to the addition system
(same as add_output method for ExplicitComponent)
Examples include units (str or None), desc (str)
"""
axis = kwargs.pop('axis', 0)
super(SumComp, self).__init__(axis=axis)
self._add_systems = []
if isinstance(output_name, string_types):
self._add_systems.append((output_name, input_name, vec_size, length, val,
scaling_factor, kwargs))
elif isinstance(output_name, collections.Iterable):
raise NotImplementedError('Declaring multiple systems '
'on initiation is not implemented.'
'Use a string to name a single addition relationship or use '
'multiple add_equation calls')
elif output_name is None:
pass
else:
raise ValueError(
"first argument to init must be either of type "
"`str' or 'None'")
def initialize(self):
"""
Declare options.
Parameters
----------
axis : int or None
Sum along this axis. Default 0 sums along first dimension.
None sums all elements into a scalar.
1 sums along rows.
"""
self.options.declare('axis', default=0,
desc="Axis along which to sum")
def add_equation(self, output_name, input_name, vec_size=1, length=1, val=1.0,
units=None, res_units=None, desc='', lower=None, upper=None, ref=1.0,
ref0=0.0, res_ref=None, scaling_factor=1):
"""
Add a multiplication relation.
Parameters
----------
output_name : str
(required) name of the result variable in this component's namespace.
input_name : iterable of str
(required) names of the input variables for this system
vec_size : int
Length of the first dimension of the input and output vectors
(i.e number of rows, or vector length for a 1D vector)
Default is 1
length : int
Length of the second dimension of the input and ouptut vectors (i.e. number of columns)
Default is 1 which results in input/output vectors of size (vec_size,)
scaling_factor : numeric
Scaling factor to apply to the whole system
Default is 1
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
res_units : str or None
Units in which the residuals of this output will be given to the user when requested.
Default is None, which means it has no units.
desc : str
description of the variable.
lower : float or list or tuple or ndarray or Iterable or None
lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no lower bound.
Default is None.
upper : float or list or tuple or ndarray or or Iterable None
upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no upper bound.
Default is None.
ref : float or ndarray
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 1. Default is 1.
ref0 : float or ndarray
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 0. Default is 0.
res_ref : float or ndarray
Scaling parameter. The value in the user-defined res_units of this output's residual
when the scaled value is 1. Default is 1.
"""
kwargs = {'units': units, 'res_units': res_units, 'desc': desc,
'lower': lower, 'upper': upper, 'ref': ref, 'ref0': ref0,
'res_ref': res_ref}
self._add_systems.append((output_name, input_name, vec_size, length, val,
scaling_factor, kwargs))
def add_output(self):
"""
Use add_equation instead of add_output to define equation systems.
"""
raise NotImplementedError('Use add_equation method, not add_output method'
'to create an multliplication/division relation')
def setup(self):
"""
Set up the addition/subtraction system at run time.
"""
axis = self.options['axis']
for (output_name, input_name, vec_size, length, val,
scaling_factor, kwargs) in self._add_systems:
units = kwargs.get('units', None)
desc = kwargs.get('desc', '')
if length == 1:
shape = (vec_size,)
else:
shape = (vec_size, length)
self.add_input(input_name, shape=shape, units=units,
desc=desc + '_inp_' + input_name)
if axis is None:
rowidx = np.zeros(vec_size * length)
output_shape = (1,)
elif axis == 0:
output_arange = np.arange(0, length)
rowidx = np.tile(output_arange, vec_size)
if length == 1:
output_shape = (1,)
else:
output_shape = (1, length)
elif axis == 1:
output_arange = np.arange(0, vec_size)
rowidx = np.repeat(output_arange, length)
output_shape = (vec_size,)
else:
raise ValueError('Summation is allowed only over axis=0, 1 or None')
colidx = np.arange(0, vec_size * length)
self.declare_partials([output_name], [input_name],
rows=rowidx, cols=colidx,
val=scaling_factor * np.ones(vec_size * length))
super(SumComp, self).add_output(output_name, val,
shape=output_shape,
**kwargs)
def compute(self, inputs, outputs):
"""
Compute the summation using numpy.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
"""
axis = self.options['axis']
for (output_name, input_name, vec_size, length, val, scaling_factor,
kwargs) in self._add_systems:
if axis is None:
output_shape = (1,)
elif axis == 0:
if length == 1:
output_shape = (1,)
else:
output_shape = (1, length)
elif axis == 1:
output_shape = (vec_size,)
result = np.sum(inputs[input_name], axis=axis) * scaling_factor
outputs[output_name] = result.reshape(output_shape)
|
py | 1a3ee1b3774ff2ac17892aabf997768820750cf5 | # Each items Text Image separately processed in early pipelines and then fused in attention blocks
# Try Fusing each items text early using [SEP] tokens as well
|
py | 1a3ee24010a46a1439a5ea25e1f1de24f44eea56 | # 不觉得代码顶头没有几句`import`很难受吗?
# 有条件者可使用PyPy运行。
result = set()
with open('words_alpha.txt', encoding='utf-8') as f:
for word in f.read().splitlines():
result.add(word)
with open('out.txt', 'wb') as f:
for word in sorted(result):
if len(word) >= 5: # 过滤单词!
try:
f.write(word.encode('ascii'))
f.write(b'\n')
except Exception as e:
print(e, word)
exit() |
py | 1a3ee2a2ad198789e29ad46261de396e591306bc | """
Django settings for test_32680 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_32680.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_32680.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | 1a3ee2b737a22a182f40921e036fabccb57434d5 | # Copyright (c) 2020, NVIDIA CORPORATION.
import itertools
import warnings
import numpy as np
import pandas as pd
import cudf
import cudf._lib as libcudf
from cudf._lib.join import compute_result_col_names
from cudf.core.dtypes import CategoricalDtype
class Merge(object):
def __init__(
self,
lhs,
rhs,
on,
left_on,
right_on,
left_index,
right_index,
how,
sort,
lsuffix,
rsuffix,
method,
indicator,
suffixes,
):
"""
Manage the merging of two Frames.
Parameters
----------
lhs : Series or DataFrame
The left operand of the merge
rhs : Series or DataFrame
The right operand of the merge
on : string or list like
A set of key columns in the left and right operands
elements must be common to both frames
left_on : string or list like
A set of key columns in the left operand. Must be
specified with right_on or right_index concurrently
right_on : string or list like
A set of key columns in the right operand. Must be
specified with left_on or left_index concurrently
left_index : bool
Boolean flag indicating the left index column or columns
are to be used as join keys in order.
right_index : bool
Boolean flag indicating the right index column or coumns
are to be used as join keys in order.
how : string
The type of join. Possible values are
'inner', 'outer', 'left', 'leftsemi' and 'leftanti'
sort : bool
Boolean flag indicating if the output Frame is to be
sorted on the output's join keys, in left to right order.
lsuffix : string
The suffix to be appended to left hand column names that
are found to exist in the right frame, but are not specified
as join keys themselves.
rsuffix : string
The suffix to be appended to right hand column names that
are found to exist in the left frame, but are not specified
as join keys themselves.
suffixes : list like
Left and right suffixes specified together, unpacked into lsuffix
and rsuffix.
"""
self.lhs = lhs
self.rhs = rhs
self.left_index = left_index
self.right_index = right_index
self.method = method
self.sort = sort
# check that the merge is valid
self.validate_merge_cfg(
lhs,
rhs,
on,
left_on,
right_on,
left_index,
right_index,
how,
lsuffix,
rsuffix,
suffixes,
)
self.how = how
self.preprocess_merge_params(
on, left_on, right_on, lsuffix, rsuffix, suffixes
)
def perform_merge(self):
"""
Call libcudf to perform a merge between the operands. If
necessary, cast the input key columns to compatible types.
Potentially also cast the output back to categorical.
"""
output_dtypes = self.compute_output_dtypes()
self.typecast_input_to_libcudf()
libcudf_result = libcudf.join.join(
self.lhs,
self.rhs,
self.how,
self.method,
left_on=self.left_on,
right_on=self.right_on,
left_index=self.left_index,
right_index=self.right_index,
)
result = self.out_class._from_table(libcudf_result)
result = self.typecast_libcudf_to_output(result, output_dtypes)
return result[compute_result_col_names(self.lhs, self.rhs, self.how)]
def preprocess_merge_params(
self, on, left_on, right_on, lsuffix, rsuffix, suffixes
):
"""
Translate a valid configuration of user input parameters into
the subset of input configurations handled by the cython layer.
Apply suffixes to columns.
"""
self.out_class = cudf.DataFrame
if on:
on = [on] if isinstance(on, str) else list(on)
left_on = right_on = on
else:
if left_on:
left_on = (
[left_on] if isinstance(left_on, str) else list(left_on)
)
if right_on:
right_on = (
[right_on] if isinstance(right_on, str) else list(right_on)
)
same_named_columns = set(self.lhs._data.keys()) & set(
self.rhs._data.keys()
)
if not (left_on or right_on) and not (
self.left_index and self.right_index
):
left_on = right_on = list(same_named_columns)
no_suffix_cols = []
if left_on and right_on:
no_suffix_cols = [
left_name
for left_name, right_name in zip(left_on, right_on)
if left_name == right_name and left_name in same_named_columns
]
if suffixes:
lsuffix, rsuffix = suffixes
for name in same_named_columns:
if name not in no_suffix_cols:
self.lhs.rename({name: f"{name}{lsuffix}"}, inplace=True)
self.rhs.rename({name: f"{name}{rsuffix}"}, inplace=True)
if left_on and name in left_on:
left_on[left_on.index(name)] = f"{name}{lsuffix}"
if right_on and name in right_on:
right_on[right_on.index(name)] = f"{name}{rsuffix}"
self.left_on = left_on if left_on is not None else []
self.right_on = right_on if right_on is not None else []
self.lsuffix = lsuffix
self.rsuffix = rsuffix
@staticmethod
def validate_merge_cfg(
lhs,
rhs,
on,
left_on,
right_on,
left_index,
right_index,
how,
lsuffix,
rsuffix,
suffixes,
):
"""
Error for various invalid combinations of merge input parameters
"""
# must actually support the requested merge type
if how not in {"left", "inner", "outer", "leftanti", "leftsemi"}:
raise NotImplementedError(f"{how} merge not supported yet")
# Passing 'on' with 'left_on' or 'right_on' is ambiguous
if on and (left_on or right_on):
raise ValueError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
# Can't merge on unnamed Series
if (isinstance(lhs, cudf.Series) and not lhs.name) or (
isinstance(rhs, cudf.Series) and not rhs.name
):
raise ValueError("Can not merge on unnamed Series")
# Keys need to be in their corresponding operands
if on:
if isinstance(on, str):
on_keys = [on]
elif isinstance(on, tuple):
on_keys = list(on)
else:
on_keys = on
for key in on_keys:
if not (key in lhs._data.keys() and key in rhs._data.keys()):
raise KeyError(f"on key {on} not in both operands")
elif left_on and right_on:
left_on_keys = (
[left_on] if not isinstance(left_on, list) else left_on
)
right_on_keys = (
[right_on] if not isinstance(right_on, list) else right_on
)
for key in left_on_keys:
if key not in lhs._data.keys():
raise KeyError(f'Key "{key}" not in left operand')
for key in right_on_keys:
if key not in rhs._data.keys():
raise KeyError(f'Key "{key}" not in right operand')
# Require same total number of columns to join on in both operands
len_left_on = 0
len_right_on = 0
if left_on:
len_left_on += (
len(left_on) if pd.api.types.is_list_like(left_on) else 1
)
if right_on:
len_right_on += (
len(right_on) if pd.api.types.is_list_like(right_on) else 1
)
if not (len_left_on + left_index * lhs._num_indices) == (
len_right_on + right_index * rhs._num_indices
):
raise ValueError(
"Merge operands must have same number of join key columns"
)
# If nothing specified, must have common cols to use implicitly
same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())
if (
not (left_index or right_index)
and not (left_on or right_on)
and len(same_named_columns) == 0
):
raise ValueError("No common columns to perform merge on")
if suffixes:
lsuffix, rsuffix = suffixes
for name in same_named_columns:
if name == left_on == right_on:
continue
elif left_on and right_on:
if (name in left_on and name in right_on) and (
left_on.index(name) == right_on.index(name)
):
continue
else:
if not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
def typecast_input_to_libcudf(self):
"""
Check each pair of join keys in the left and right hand
operands and apply casting rules to match their types
before passing the result to libcudf.
"""
lhs_keys, rhs_keys, lhs_cols, rhs_cols = [], [], [], []
if self.left_index:
lhs_keys.append(self.lhs.index._data.keys())
lhs_cols.append(self.lhs.index)
if self.right_index:
rhs_keys.append(self.rhs.index._data.keys())
rhs_cols.append(self.rhs.index)
if self.left_on:
lhs_keys.append(self.left_on)
lhs_cols.append(self.lhs)
if self.right_on:
rhs_keys.append(self.right_on)
rhs_cols.append(self.rhs)
for l_key_grp, r_key_grp, l_col_grp, r_col_grp in zip(
lhs_keys, rhs_keys, lhs_cols, rhs_cols
):
for l_key, r_key in zip(l_key_grp, r_key_grp):
to_dtype = self.input_to_libcudf_casting_rules(
l_col_grp._data[l_key], r_col_grp._data[r_key], self.how
)
l_col_grp._data[l_key] = l_col_grp._data[l_key].astype(
to_dtype
)
r_col_grp._data[r_key] = r_col_grp._data[r_key].astype(
to_dtype
)
def input_to_libcudf_casting_rules(self, lcol, rcol, how):
"""
Determine what dtype the left and right hand
input columns must be cast to for a libcudf
join to proceed.
"""
cast_warn = (
"can't safely cast column from {} with type"
" {} to {}, upcasting to {}"
)
ctgry_err = (
"can't implicitly cast column {0} to categories"
" from {1} during {1} join"
)
dtype_l = lcol.dtype
dtype_r = rcol.dtype
libcudf_join_type = None
if pd.api.types.is_dtype_equal(dtype_l, dtype_r):
# if categorical and equal, children passed to libcudf
libcudf_join_type = dtype_l
elif isinstance(dtype_l, CategoricalDtype) and isinstance(
dtype_r, CategoricalDtype
):
# categories are not equal
libcudf_join_type = np.dtype("O")
elif how == "left":
check_col = rcol.fillna(0)
if not check_col.can_cast_safely(dtype_l):
libcudf_join_type = self.input_to_libcudf_casting_rules(
lcol, rcol, "inner"
)
warnings.warn(
cast_warn.format(
"right", dtype_r, dtype_l, libcudf_join_type
)
)
else:
libcudf_join_type = dtype_l
elif how == "right":
check_col = lcol.fillna(0)
if not check_col.can_cast_safely(dtype_r):
libcudf_join_type = self.input_to_libcudf_casting_rules(
lcol, rcol, "inner"
)
warnings.warn(
cast_warn.format(
"left", dtype_l, dtype_r, libcudf_join_type
)
)
else:
libcudf_join_type = dtype_r
elif isinstance(dtype_l, CategoricalDtype):
if how == "right":
raise ValueError(ctgry_err.format(rcol, "right"))
libcudf_join_type = lcol.cat().categories.dtype
elif isinstance(dtype_r, CategoricalDtype):
if how == "left":
raise ValueError(ctgry_err.format(lcol, "left"))
libcudf_join_type = rcol.cat().categories.dtype
elif how in {"inner", "outer"}:
if (np.issubdtype(dtype_l, np.number)) and (
np.issubdtype(dtype_r, np.number)
):
if dtype_l.kind == dtype_r.kind:
# both ints or both floats
libcudf_join_type = max(dtype_l, dtype_r)
else:
libcudf_join_type = np.find_common_type(
[], [dtype_l, dtype_r]
)
elif np.issubdtype(dtype_l, np.datetime64) and np.issubdtype(
dtype_r, np.datetime64
):
libcudf_join_type = max(dtype_l, dtype_r)
return libcudf_join_type
def libcudf_to_output_casting_rules(self, lcol, rcol, how):
"""
Determine what dtype an output merge key column should be
cast to after it has been processed by libcudf. Determine
if a column should be promoted to a categorical datatype.
"""
dtype_l = lcol.dtype
dtype_r = rcol.dtype
merge_return_type = None
# we currently only need to do this for categorical variables
if isinstance(dtype_l, CategoricalDtype) and isinstance(
dtype_r, CategoricalDtype
):
if pd.api.types.is_dtype_equal(dtype_l, dtype_r):
if how in {"inner", "left"}:
merge_return_type = dtype_l
elif how == "outer" and not (
dtype_l.ordered or dtype_r.ordered
):
new_cats = cudf.concat(
dtype_l.categories, dtype_r.categories
).unique()
merge_return_type = cudf.core.dtypes.CategoricalDtype(
categories=new_cats
)
else:
merge_return_type = "category"
return merge_return_type
def compute_output_dtypes(self):
"""
Determine what datatypes should be applied to the result
of a libcudf join, baesd on the original left and right
frames.
"""
index_dtypes = {}
l_data_join_cols = {}
r_data_join_cols = {}
data_dtypes = {
name: col.dtype
for name, col in itertools.chain(
self.lhs._data.items(), self.rhs._data.items()
)
}
if self.left_index and self.right_index:
l_idx_join_cols = list(self.lhs.index._data.values())
r_idx_join_cols = list(self.rhs.index._data.values())
elif self.left_on and self.right_index:
# Keep the orignal dtypes in the LEFT index if possible
# should trigger a bunch of no-ops
l_idx_join_cols = list(self.lhs.index._data.values())
r_idx_join_cols = list(self.lhs.index._data.values())
for i, name in enumerate(self.left_on):
l_data_join_cols[name] = self.lhs._data[name]
r_data_join_cols[name] = list(self.rhs.index._data.values())[i]
elif self.left_index and self.right_on:
# see above
l_idx_join_cols = list(self.rhs.index._data.values())
r_idx_join_cols = list(self.rhs.index._data.values())
for i, name in enumerate(self.right_on):
l_data_join_cols[name] = list(self.lhs.index._data.values())[i]
r_data_join_cols[name] = self.rhs._data[name]
if self.left_on and self.right_on:
l_data_join_cols = self.lhs._data
r_data_join_cols = self.rhs._data
for i in range(
(self.left_index or self.right_index)
* len(self.lhs.index._data.items())
):
index_dtypes[i] = self.libcudf_to_output_casting_rules(
l_idx_join_cols[i], r_idx_join_cols[i], self.how
)
for name in itertools.chain(self.left_on, self.right_on):
if name in self.left_on and name in self.right_on:
data_dtypes[name] = self.libcudf_to_output_casting_rules(
l_data_join_cols[name], r_data_join_cols[name], self.how
)
return (index_dtypes, data_dtypes)
def typecast_libcudf_to_output(self, output, output_dtypes):
"""
Apply precomputed output index and data column data types
to the output of a libcudf join.
"""
index_dtypes, data_dtypes = output_dtypes
if output._index and len(index_dtypes) > 0:
for index_dtype, index_col_lbl, index_col in zip(
index_dtypes.values(),
output._index._data.keys(),
output._index._data.values(),
):
if index_dtype:
output._index._data[
index_col_lbl
] = self._build_output_col(index_col, index_dtype)
for data_col_lbl, data_col in output._data.items():
data_dtype = data_dtypes[data_col_lbl]
if data_dtype:
output._data[data_col_lbl] = self._build_output_col(
data_col, data_dtype
)
return output
def _build_output_col(self, col, dtype):
if isinstance(
dtype, (cudf.core.dtypes.CategoricalDtype, pd.CategoricalDtype)
):
outcol = cudf.core.column.build_categorical_column(
categories=dtype.categories,
codes=col.set_mask(None),
mask=col.base_mask,
)
else:
outcol = col.astype(dtype)
return outcol
|
py | 1a3ee3427727e94bf8c8804957cbfd3c0685cdc1 | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import re
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class SubNeeded(CloudFormationLintRule):
"""Check if a substitution string exists without a substitution function"""
id = 'E1029'
shortdesc = 'Sub is required if a variable is used in a string'
description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
tags = ['functions', 'sub']
# Free-form text properties to exclude from this rule
# content is part of AWS::CloudFormation::Init
excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
'CloudWatchAlarmDefinition', 'TopicRulePayload']
api_excludes = ['Uri', 'Body']
# IAM Policy has special variables that don't require !Sub, Check for these
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
# https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
# https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
# https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
'${aws:TokenIssueTime}', '${aws:principaltype}',
'${aws:SecureTransport}', '${aws:SourceIp}',
'${aws:UserAgent}', '${aws:userid}',
'${aws:username}', '${ec2:SourceInstanceARN}',
'${iot:Connection.Thing.ThingName}',
'${iot:Connection.Thing.ThingTypeName}',
'${iot:Connection.Thing.IsAttached}',
'${iot:ClientId}', '${transfer:HomeBucket}',
'${transfer:HomeDirectory}', '${transfer:HomeFolder}',
'${transfer:UserName}', '${redshift:DbUser}',
'${cognito-identity.amazonaws.com:aud}',
'${cognito-identity.amazonaws.com:sub}',
'${cognito-identity.amazonaws.com:amr}']
# https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
condition_excludes = [
'${redshift:DbUser}',
]
def _match_values(self, searchRegex, cfnelem, path):
"""Recursively search for values matching the searchRegex"""
values = []
if isinstance(cfnelem, dict):
for key in cfnelem:
pathprop = path[:]
pathprop.append(key)
values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))
elif isinstance(cfnelem, list):
for index, item in enumerate(cfnelem):
pathprop = path[:]
pathprop.append(index)
values.extend(self._match_values(searchRegex, item, pathprop))
else:
# Leaf node
if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):
# Get all variables as seperate paths
regex = re.compile(r'(\$\{.*?\.?.*?})')
for variable in re.findall(regex, cfnelem):
values.append(path + [variable])
return values
def match_values(self, searchRegex, cfn):
"""
Search for values in all parts of the templates that match the searchRegex
"""
results = []
results.extend(self._match_values(searchRegex, cfn.template, []))
# Globals are removed during a transform. They need to be checked manually
results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))
return results
def _api_exceptions(self, value):
""" Key value exceptions """
parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
return re.match(parameter_search, value)
def match(self, cfn):
"""Basic Rule Matching"""
matches = []
# Generic regex to match a string containing at least one ${parameter}
parameter_search = re.compile(r'^.*(\$\{.*\}.*(\$\{.*\}.*)*)$')
# Get a list of paths to every leaf node string containing at least one ${parameter}
parameter_string_paths = self.match_values(parameter_search, cfn)
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
for parameter_string_path in parameter_string_paths:
# Exxclude the special IAM variables
variable = parameter_string_path[-1]
if 'Resource' in parameter_string_path:
if variable in self.resource_excludes:
continue
if 'Condition' in parameter_string_path:
if variable in self.condition_excludes:
continue
# Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
if variable.startswith('${!'):
continue
found_sub = False
# Does the path contain an 'Fn::Sub'?
for step in parameter_string_path:
if step in self.api_excludes:
if self._api_exceptions(parameter_string_path[-1]):
found_sub = True
elif step == 'Fn::Sub' or step in self.excludes:
found_sub = True
# If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
if not found_sub:
# Remove the last item (the variable) to prevent multiple errors on 1 line errors
path = parameter_string_path[:-1]
message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format(
'/'.join(map(str, path)))
matches.append(RuleMatch(path, message))
return matches
|
py | 1a3ee486cb89e3f7f8274cbe2fd0164c3acb9f90 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'app_win_frm_src.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_window(object):
def setupUi(self, window):
window.setObjectName("window")
window.resize(273, 110)
self.centralWidget = QtWidgets.QWidget(window)
self.centralWidget.setObjectName("centralWidget")
self.cmdOk = QtWidgets.QPushButton(self.centralWidget)
self.cmdOk.setGeometry(QtCore.QRect(96, 41, 81, 22))
self.cmdOk.setObjectName("cmdOk")
self.txtName = QtWidgets.QLineEdit(self.centralWidget)
self.txtName.setGeometry(QtCore.QRect(118, 5, 146, 22))
self.txtName.setObjectName("txtName")
self.lblName = QtWidgets.QLabel(self.centralWidget)
self.lblName.setGeometry(QtCore.QRect(6, 8, 107, 16))
self.lblName.setObjectName("lblName")
window.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(window)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 273, 19))
self.menuBar.setObjectName("menuBar")
window.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(window)
self.mainToolBar.setObjectName("mainToolBar")
window.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.retranslateUi(window)
QtCore.QMetaObject.connectSlotsByName(window)
def retranslateUi(self, window):
_translate = QtCore.QCoreApplication.translate
window.setWindowTitle(_translate("window", "MainWindow"))
self.cmdOk.setText(_translate("window", "Ok"))
self.txtName.setText(_translate("window", "Max"))
self.lblName.setText(_translate("window", "Enter your name: "))
|
py | 1a3ee4c58c9706b854875a0e8bb6cb511065887f | import clara
import requests as r
import os
from flask import json
from flask import jsonify
from flask import Flask
from flask import request
app = Flask(__name__)
telegram_key = ''
@app.route("/")
def main():
return "Personal Clara instance."
@app.route("/new-message", methods=['POST'])
def handle_message():
message = request.json
brain.get_response(message['text'].lower())
data = {
'chat_id': message['chat']['id'],
'text': 'Test'
}
url = 'https://api.telegram.org/' + telegram_key + '/sendMessage'
r.post('', data)
return 'Thanks!'
if __name__ == "__main__":
port = int(os.environ.get("PORT", 2525))
app.run(host='0.0.0.0', port=port, debug=True)
|
py | 1a3ee625c077a8a22761f6fe1871236a576d44a9 | # coding=utf-8
# This is a sample Python script.
from aliyunIoT import Device
import ujson
import network
import utime as time
from driver import GPIO
from driver import UART
t1 = 30
gas_threshold = 5.0
liq_mdcn_alarm = False
gas_alarm = False
version = 'v0.0.1'
uart1 = UART('serail1')
liq_level = GPIO()
gpio = GPIO()
'''0 1 means cloud ctrl,2 local ctrl'''
cloud_ctrl = 2
g_connect_status = False
ini_file_name = '/user/cfg.txt'
def on_4g_cb(args):
global g_connect_status
pdp = args[0]
netwk_sta = args[1]
if netwk_sta == 1:
g_connect_status = True
else:
g_connect_status = False
def connect_network():
global on_4g_cb,g_connect_status
net = network.NetWorkClient()
g_register_network = False
if net._stagecode is not None and net._stagecode == 3 and net._subcode == 1:
g_register_network = True
else:
g_register_network = False
if g_register_network:
net.on(1,on_4g_cb)
net.connect(None)
else:
print('connect network failed')
for i in range(30):
if g_connect_status:
print('connect network success')
return True
time.sleep(1)
return False
def read_cfg_file():
global t1,gas_threshold,ini_file_name
try:
f = open(ini_file_name,'r')
except OSError:
cfg_dict = {'gasstr':1.0,'t1':60}
print('write',cfg_dict)
f = open(ini_file_name,'w+')
print(f)
f.write(ujson.dumps(cfg_dict))
else:
cfg_txt = f.read()
cfg_dict = ujson.loads(cfg_txt)
if isinstance(cfg_dict,dict) == False:
print('cfg_dict not a dict')
return
print('read',cfg_dict)
gas_threshold = cfg_dict['gasstr']
t1 = cfg_dict['t1']
print('gas',gas_threshold,'t1',t1)
finally:
f.close()
print('close')
return 0
def write_cfg_file(cloudstr):
global t1,gas_threshold,ini_file_name
if isinstance(cloudstr,str) == False:
return
try:
f = open(ini_file_name,'r')
except OSError:
pass
else:
cfg_txt = f.read()
f.close()
finally:
pass
try:
f = open(ini_file_name,'w+')
except OSError:
pass
else:
cfg_dict = ujson.loads(cfg_txt)
cloud_dict = ujson.loads(cloudstr)
if isinstance(cfg_dict,dict) == False:
print('cfg_dict not a dict')
return
if isinstance(cloud_dict,dict) == False:
print('cloud_dict not a dict')
return
for key in cloud_dict.keys():
if cfg_dict.get(key) != None:
cfg_dict[key] = cloud_dict[key]
if key == 'gasstr':
gas_threshold = cfg_dict[key]
if key == 't1':
t1 = cfg_dict[key]
f.seek(0)
f.write(ujson.dumps(cfg_dict))
print(cfg_dict)
pass
finally:
f.close()
print('cloud cfg file close')
return
def on_connect():
print('linkkit is connected')
def on_disconnect():
print('linkkit is disconnected')
def on_props(request):
print('clound req data is {}'.format(request))
global gpio
global cloud_ctrl
cloudmsg = ujson.loads(request)
if 'powerstate' in cloudmsg:
if cloudmsg['powerstate'] == 0:
gpio.write(0)
#pass
cloud_ctrl = 0
print('led state {}'.format(cloudmsg['powerstate']))
else:
cloud_ctrl = 1
gpio.write(1)
#pass
print('led state {}'.format(cloudmsg['powerstate']))
else:
write_cfg_file(request)
def on_service(id,request):
print('clound req id is {} , req is {}'.format(id,request))
def on_error(err):
print('err msg is {} '.format(err))
def gas_detec():
gas_val = 0.0
dotnum = 0
global uart1
len1 = 0
#sign = 0
uart1.open('serial1')
readbuf1 = bytearray(9)
writebuf1 = bytearray([0xd7])
readbuf2 = bytearray(13)
writebuf2 = bytearray([0xff,0x01,0x87,0x00,0x00,0x00,0x00,0x00,0x78])
uart1.write(writebuf1)
len1 = uart1.read(readbuf1)
print('dotnum:',end='')
print(readbuf1)
if len1 != len(readbuf1):
print('read dotnum err')
uart1.close()
return gas_val
uart1.write(writebuf2)
len1 = uart1.read(readbuf2)
print('readlen:',len1,'dotnum:',end='')
print(readbuf2)
if len1 != len(readbuf2):
print('read gas err')
uart1.close()
return gas_val
uart1.close()
dotnum = (readbuf1[6]&0xf0)>> 4
#sign = readbuf1[6]&0x0f
gas_val = (readbuf2[2]*256.000 + readbuf2[3])*1.000/10**dotnum
print('gasvalue:',end='')
print(gas_val)
return gas_val
def liquid_level_detec():
lowval = liq_level.read()
print('lowval',lowval)
liq_meicn_remain = False
if lowval == 1:
liq_meicn_remain = True
else:
liq_meicn_remain = False
return liq_meicn_remain
def main():
global liq_level,cloud_ctrl,t1,liq_mdcn_alarm,gas_alarm
ret = connect_network()
print('network register sta {}'.format(ret))
productKey = 'xxx'
productSecret = ''
deviceName = 'haas505_demo_sn1'
deviceSecret = 'xxx'
key_info = {
'region' : 'cn-shanghai',
'productKey' : productKey,
'deviceName' : deviceName,
'deviceSecret' : deviceSecret,
'productSecret' : productSecret,
'keepaliveSec': 60
}
device = Device()
device.on(device.ON_CONNECT,on_connect)
device.on(device.ON_DISCONNECT,on_disconnect)
device.on(device.ON_PROPS,on_props)
device.on(device.ON_SERVICE,on_service)
device.on(device.ON_ERROR,on_error)
device.connect(key_info)
send_info = {'ver':version,'name':key_info['deviceName']}
post_data = {'params':ujson.dumps(send_info)}
device.postProps(post_data)
read_cfg_file()
time.sleep(2)
led1 = GPIO()
pump = GPIO()
'''liqid level detec prompt led'''
led1.open('led1')
'''liquid level detec io'''
liq_level.open('liq_level')
'''control pump relay'''
pump.open('pump')
pump.write(1)
'''cloud_flg is cloud down data led'''
gpio.open('cloud_flg')
time_cnt = 0
gas_value = 0.00
liq_mdcn_re_flg_chg = False
need_send = False
while True:
time.sleep_ms(1000)
time_cnt += 1
liq_mdcn_re_flg = liquid_level_detec()
if liq_mdcn_re_flg == False:
led1.write(0)
if liq_mdcn_re_flg_chg == True:
liq_mdcn_re_flg_chg = False
need_send = True
pass
else:
led1.write(1)
need_send = True
liq_mdcn_re_flg_chg = True
print('need send')
'''need send data to cloud'''
pass
if time_cnt%10 == 0:
gas_value = gas_detec()
if gas_value > gas_threshold:
'''need send data to cloud'''
gas_alarm = True
need_send = True
print('need send')
else:
gas_alarm = False
pass
if liq_mdcn_re_flg == True:
need_send = False
pump.write(1)
cloud_ctrl = 2
print('close pump')
post_data = {'params':{'liq_mdcn_re':0,'gasval':100,'gasalarm':0,'powerstate':0}}
post_data['params']['liq_mdcn_re'] = 0
gas_value = gas_detec()
post_data['params']['gasval'] = int(gas_value*100)
if gas_alarm == True:
post_data['params']['gasalarm'] = 1
post_data['params']['powerstate'] = gpio.read()
post_data_dict = {'params':ujson.dumps(post_data['params'])}
device.postProps(post_data_dict)
continue
if gas_alarm == False:
if time_cnt%t1 == 0:
if pump.read() == 1 :
pump.write(0)
print('open pump')
else:
pump.write(1)
print('close pump')
else:
pass
if cloud_ctrl == 0:
pump.write(1)
cloud_ctrl = 2
time_cnt = 0
print('cloud close pump')
elif cloud_ctrl == 1:
pump.write(0)
cloud_ctrl = 2
time_cnt = 0
print('cloud open pump')
elif gas_alarm == True:
pump.write(1)
print('gas alarm close pump')
if need_send == True:
need_send = False
post_data1 = {'params':{'liq_mdcn_re':0,'gasval':100,'gasalarm':0,'powerstate':0}}
if liq_mdcn_re_flg == True:
post_data1['params']['liq_mdcn_re'] = 0
else:
post_data1['params']['liq_mdcn_re'] = 1
post_data1['params']['gasval'] = int(gas_value*100)
if gas_alarm == True:
post_data1['params']['gasalarm'] = 1
post_data1['params']['powerstate'] = gpio.read()
post_data1_dict = {'params':ujson.dumps(post_data1['params'])}
device.postProps(post_data1_dict)
if __name__ == '__main__':
main()
|
py | 1a3ee6c8f9a89d9526ed5b9ad96e7192dde2f1c5 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .mixins import PrometheusScraperMixin
from ..base import AgentCheck
from ...errors import CheckException
from six import string_types
class PrometheusScraper(PrometheusScraperMixin):
"""
This class scrapes a prometheus endpoint and submits the metrics on behalf of a check. This class
is used by checks that scrape more than one prometheus endpoint.
"""
def __init__(self, check):
super(PrometheusScraper, self).__init__()
self.check = check
def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):
"""
Submit a metric as a rate, additional tags provided will be added to
the ones from the label provided via the metrics object.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the rate to Datadog.
"""
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):
"""
Submit a metric as a gauge, additional tags provided will be added to
the ones from the label provided via the metrics object.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None):
"""
Submit a metric as a monotonic count, additional tags provided will be added to
the ones from the label provided via the metrics object.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the monotonic count to Datadog.
"""
_tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)
self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
def _metric_tags(self, metric_name, val, metric, custom_tags=None, hostname=None):
_tags = []
if custom_tags is not None:
_tags += custom_tags
for label in metric.label:
if self.exclude_labels is None or label.name not in self.exclude_labels:
tag_name = label.name
if self.labels_mapper is not None and label.name in self.labels_mapper:
tag_name = self.labels_mapper[label.name]
_tags.append('{}:{}'.format(tag_name, label.value))
return self._finalize_tags_to_submit(
_tags, metric_name, val, metric, custom_tags=custom_tags, hostname=hostname
)
def _submit_service_check(self, *args, **kwargs):
self.check.service_check(*args, **kwargs)
class GenericPrometheusCheck(AgentCheck):
"""
GenericPrometheusCheck is a class that helps instantiating PrometheusCheck only
with YAML configurations. As each check has it own states it maintains a map
of all checks so that the one corresponding to the instance is executed
Minimal example configuration:
instances:
- prometheus_url: http://foobar/endpoint
namespace: "foobar"
metrics:
- bar
- foo
"""
def __init__(self, name, init_config, agentConfig, instances=None, default_instances=None, default_namespace=""):
super(GenericPrometheusCheck, self).__init__(name, init_config, agentConfig, instances)
self.scrapers_map = {}
self.default_instances = default_instances if default_instances is not None else {}
self.default_namespace = default_namespace
for instance in instances:
self.get_scraper(instance)
def check(self, instance):
endpoint = instance["prometheus_url"]
scraper = self.get_scraper(instance)
if not scraper.metrics_mapper:
raise CheckException("You have to collect at least one metric from the endpoint: " + endpoint)
scraper.process(
endpoint,
send_histograms_buckets=instance.get('send_histograms_buckets', True),
send_monotonic_counter=instance.get('send_monotonic_counter', True),
instance=instance,
ignore_unmapped=True
)
def _extract_rate_metrics(self, type_overrides):
rate_metrics = []
for metric in type_overrides:
if type_overrides[metric] == "rate":
rate_metrics.append(metric)
type_overrides[metric] = "gauge"
return rate_metrics
def get_scraper(self, instance):
namespace = instance.get("namespace", "")
# Check if we have a namespace
if namespace == "":
if self.default_namespace == "":
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
endpoint = instance.get("prometheus_url", default_instance.get("prometheus_url", ""))
if endpoint == "":
raise CheckException("Unable to find prometheus URL in config file.")
# If we already created the corresponding scraper, return it
if endpoint in self.scrapers_map:
return self.scrapers_map[endpoint]
# Otherwise we create the scraper
scraper = PrometheusScraper(self)
scraper.NAMESPACE = namespace
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionnaries from optional defaults & instance settings
metrics = default_instance.get("metrics", []) + instance.get("metrics", [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
scraper.metrics_mapper = metrics_mapper
scraper.labels_mapper = default_instance.get("labels_mapper", {})
scraper.labels_mapper.update(instance.get("labels_mapper", {}))
scraper.label_joins = default_instance.get("label_joins", {})
scraper.label_joins.update(instance.get("label_joins", {}))
scraper.rate_metrics = self._extract_rate_metrics(default_instance.get("type_overrides", {}))
scraper.rate_metrics.extend(self._extract_rate_metrics(instance.get("type_overrides", {})))
scraper.type_overrides = default_instance.get("type_overrides", {})
scraper.type_overrides.update(instance.get("type_overrides", {}))
scraper.exclude_labels = default_instance.get("exclude_labels", []) + instance.get("exclude_labels", [])
scraper.extra_headers = default_instance.get("extra_headers", {})
scraper.extra_headers.update(instance.get("extra_headers", {}))
# For simple values instance settings overrides optional defaults
scraper.prometheus_metrics_prefix = instance.get("prometheus_metrics_prefix", default_instance.get("prometheus_metrics_prefix", ''))
scraper.label_to_hostname = instance.get("label_to_hostname", default_instance.get("label_to_hostname", None))
scraper.health_service_check = instance.get("health_service_check", default_instance.get("health_service_check", True))
scraper.ssl_cert = instance.get("ssl_cert", default_instance.get("ssl_cert", None))
scraper.ssl_private_key = instance.get("ssl_private_key", default_instance.get("ssl_private_key", None))
scraper.ssl_ca_cert = instance.get("ssl_ca_cert", default_instance.get("ssl_ca_cert", None))
scraper.set_prometheus_timeout(instance, default_instance.get("prometheus_timeout", 10))
self.scrapers_map[endpoint] = scraper
return scraper
|
py | 1a3ee6d3e4b7b870fea0c3d4981d37640981f8fc | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# CHANGED manage.py will use development settings by
# default. Change the DJANGO_SETTINGS_MODULE environment variable
# for using the environment specific settings file.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dockyard.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 1a3ee81d6a383490ea585863e5b5465aacfec9dd | import numpy as np
from scipy.optimize import minimize
from learner.learner import Learner
class LinearSeparator(Learner):
""" This is currently broken.
"""
def __init__(self, dim_size):
self.dim_size = dim_size
def learn(self, loss):
def to_minimize(a_vector):
return loss(Linear(a_vector))
init_a = 2 * np.random.rand(self.dim_size + 1) - 1
res = minimize(to_minimize, init_a)
return Linear(res.x)
class Linear:
def __init__(self, a_vector):
self.a_vector = a_vector
def __call__(self, xs):
xs = np.array(xs)
ys = xs @ self.a_vector[1:] + self.a_vector[0]
return np.int_(ys > 0)
def value(self, xs):
xs = np.array(xs)
ys = xs @ self.a_vector[1:] + self.a_vector[0]
return ys |
py | 1a3ee95823624ea5904641d57255180b645f64ba | # -*- coding: utf-8 -*-
# @File : api.py
# @Date : 2021/2/25
# @Desc :
import socket
from Lib.log import logger
def is_empty_ports(useport=None):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("0.0.0.0", useport))
sock.close()
return True, ""
except socket.error:
logger.warning(f"端口: {useport},已占用")
return False, ""
def data_return(code, message, data):
return {'code': code, 'message': message, 'data': data}
|
gyp | 1a3ee9be811bd0d4e6138ecfd68e99d127d268e9 | {
'targets': [
{
'target_name': 'dmp',
'sources': [
'src/diff_match_patch.cpp',
'src/dmp.cc'
],
'cflags': [ '-std=c++11' ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions'],
'conditions': [
['OS=="mac"', {
'include_dirs': [
'/usr/local/opt/qt/include',
'/usr/local/opt/qt/include/QtCore'
],
'libraries': [
'/usr/local/opt/qt/lib/QtCore.framework/QtCore'
],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.12',
'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ]
}
}],
['OS=="linux"', {
'include_dirs': [
'/usr/local/include',
'/usr/local/include/QtCore'
],
'cflags': [
'<!@(pkg-config --cflags Qt5Core)'
],
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other Qt5Core)'
],
'libraries': [
'<!@(pkg-config --libs-only-l Qt5Core)'
]
}]
]
},
{
'target_name': 'dmp-test',
'type': 'executable',
'sources': [
'src/diff_match_patch_test.cpp',
'src/diff_match_patch.cpp'
],
'cflags': [ '-std=c++11' ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions'],
'conditions': [
['OS=="mac"', {
'include_dirs': [
'/usr/local/opt/qt/include',
'/usr/local/opt/qt/include/QtCore'
],
'libraries': [
'/usr/local/opt/qt/lib/QtCore.framework/QtCore'
],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.12',
'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ]
}
}],
['OS=="linux"', {
'include_dirs': [
'/usr/local/include',
'/usr/local/include/QtCore'
],
'cflags': [
'<!@(pkg-config --cflags Qt5Core)'
],
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other Qt5Core)'
],
'libraries': [
'<!@(pkg-config --libs-only-l Qt5Core)'
]
}]
]
}
]
}
|
py | 1a3eea13560f6eef7294c68ec1326581d2a8c87a | # -*- coding: utf-8 -*-
# @Time : 2022/1/3 0:02
# @Author : WhaleFall
# @Site :
# @File : models.py
# @Software: PyCharm
# 存放 `sqlmodel` 数据库模型
from . import login_manager
from sqlmodel import Field, SQLModel
from typing import Optional
from datetime import datetime
# 提供一个 user_loader 回调函数,用于通过 session 中存储的用户 ID 重新加载用户对象。
# 它应该接收用户的 unicode ID,并返回相应的用户对象。
# 如果 ID 无效,函数应该返回 None (而不是唤起异常),
# 这样 ID 将从 session 中被手动移除且程序可以继续执行.
@login_manager.user_loader
def load_user(user_id):
return None
class User(SQLModel, table=True):
"""用户类"""
id: Optional[int] = Field(default=None, primary_key=True)
username: str
password: str
reg_time: datetime = datetime.now()
@property
def is_active(self):
return True
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return self.id
|
py | 1a3eeb705cb20e99c8e854e9936703160858dbe7 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import threading
from collections import deque
from watchdog.utils import DaemonThread
from .inotify_c import Inotify
class _Worker(DaemonThread):
"""
Thread that reads events from `inotify` and writes to `queue`.
"""
def __init__(self, inotify, queue):
DaemonThread.__init__(self)
self._read_events = inotify.read_events
self._queue = queue
def run(self):
while self.should_keep_running():
inotify_events = self._read_events()
for inotify_event in inotify_events:
logging.debug("worker: in event %s", inotify_event)
if inotify_event.is_moved_to:
from_event = self._queue._catch(inotify_event.cookie)
if from_event:
self._queue._put((from_event, inotify_event))
else:
logging.debug("worker: could not find maching move_from event")
self._queue._put(inotify_event)
else:
self._queue._put(inotify_event)
class InotifyBuffer(object):
"""
A wrapper for `Inotify` that keeps events in memory for `delay` seconds.
IN_MOVED_FROM and IN_MOVED_TO events are paired during this time.
"""
def __init__(self, path, recursive=False):
self.delay = 0.5
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._queue = deque()
self._inotify = Inotify(path, recursive)
self._worker = _Worker(self._inotify, self)
self._worker.start()
def read_event(self):
"""
Returns a single event or a tuple of from/to events in case of a
paired move event.
"""
while True:
# wait for queue
self._not_empty.acquire()
while len(self._queue) == 0:
self._not_empty.wait()
head, insert_time = self._queue[0]
self._not_empty.release()
# wait for delay
time_left = insert_time + self.delay - time.time()
while time_left > 0:
time.sleep(time_left)
time_left = insert_time + self.delay - time.time()
# return if event is still here
self._lock.acquire()
try:
if len(self._queue) > 0 and self._queue[0][0] is head:
self._queue.popleft()
return head
finally:
self._lock.release()
def close(self):
self._worker.stop()
self._inotify.close()
self._worker.join()
def _put(self, elem):
self._lock.acquire()
self._queue.append((elem, time.time()))
self._not_empty.notify()
self._lock.release()
def _catch(self, cookie):
self._lock.acquire()
ret = None
for i, elem in enumerate(self._queue):
event, _ = elem
try:
if event.is_moved_from and event.cookie == cookie:
ret = event
del self._queue[i]
break
except AttributeError:
pass
self._lock.release()
return ret
|
py | 1a3eed8add94ac6a43085567c42fb2753803d4cc | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from concurrent import futures
import datetime
import functools
import inspect
import logging
import sys
import threading
import time
from typing import (
Any,
Callable,
Dict,
List,
Iterable,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import proto
from google.api_core import operation
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.compat.types import encryption_spec as gca_encryption_spec
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
class Logger:
"""Logging wrapper class with high level helper methods."""
def __init__(self, name: str = ""):
"""Initializes logger with name.
Args:
name (str): Name to associate with logger.
"""
self._logger = logging.getLogger(name)
def log_create_with_lro(
self,
cls: Type["VertexAiResourceNoun"],
lro: Optional[operation.Operation] = None,
):
"""Logs create event with LRO.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
lro (operation.Operation):
Optional. Backing LRO for creation.
"""
self._logger.info(f"Creating {cls.__name__}")
if lro:
self._logger.info(
f"Create {cls.__name__} backing LRO: {lro.operation.name}"
)
def log_create_complete(
self,
cls: Type["VertexAiResourceNoun"],
resource: proto.Message,
variable_name: str,
):
"""Logs create event is complete.
Will also include code snippet to instantiate resource in SDK.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resourc proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
self._logger.info(f"To use this {cls.__name__} in another session:")
self._logger.info(
f"{variable_name} = aiplatform.{cls.__name__}('{resource.name}')"
)
def log_create_complete_with_getter(
self,
cls: Type["VertexAiResourceNoun"],
resource: proto.Message,
variable_name: str,
):
"""Logs create event is complete.
Will also include code snippet to instantiate resource in SDK.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resourc proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
self._logger.info(f"To use this {cls.__name__} in another session:")
self._logger.info(
f"{variable_name} = aiplatform.{cls.__name__}.get('{resource.name}')"
)
def log_action_start_against_resource(
self, action: str, noun: str, resource_noun_obj: "VertexAiResourceNoun"
):
"""Logs intention to start an action against a resource.
Args:
action (str): Action to complete against the resource ie: "Deploying". Can be empty string.
noun (str): Noun the action acts on against the resource. Can be empty string.
resource_noun_obj (VertexAiResourceNoun):
Resource noun object the action is acting against.
"""
self._logger.info(
f"{action} {resource_noun_obj.__class__.__name__} {noun}: {resource_noun_obj.resource_name}"
)
def log_action_started_against_resource_with_lro(
self,
action: str,
noun: str,
cls: Type["VertexAiResourceNoun"],
lro: operation.Operation,
):
"""Logs an action started against a resource with lro.
Args:
action (str): Action started against resource. ie: "Deploy". Can be empty string.
noun (str): Noun the action acts on against the resource. Can be empty string.
cls (VertexAiResourceNoun):
Resource noun object the action is acting against.
lro (operation.Operation): Backing LRO for action.
"""
self._logger.info(
f"{action} {cls.__name__} {noun} backing LRO: {lro.operation.name}"
)
def log_action_completed_against_resource(
self, noun: str, action: str, resource_noun_obj: "VertexAiResourceNoun"
):
"""Logs action completed against resource.
Args:
noun (str): Noun the action acts on against the resource. Can be empty string.
action (str): Action started against resource. ie: "Deployed". Can be empty string.
resource_noun_obj (VertexAiResourceNoun):
Resource noun object the action is acting against
"""
self._logger.info(
f"{resource_noun_obj.__class__.__name__} {noun} {action}. Resource name: {resource_noun_obj.resource_name}"
)
def __getattr__(self, attr: str):
"""Forward remainder of logging to underlying logger."""
return getattr(self._logger, attr)
_LOGGER = Logger(__name__)
class FutureManager(metaclass=abc.ABCMeta):
"""Tracks concurrent futures against this object."""
def __init__(self):
self.__latest_future_lock = threading.Lock()
# Always points to the latest future. All submitted futures will always
# form a dependency on the latest future.
self.__latest_future = None
# Caches Exception of any executed future. Once one exception occurs
# all additional futures should fail and any additional invocations will block.
self._exception = None
def _raise_future_exception(self):
"""Raises exception if one of the object's futures has raised."""
with self.__latest_future_lock:
if self._exception:
raise self._exception
def _complete_future(self, future: futures.Future):
"""Checks for exception of future and removes the pointer if it's still
latest.
Args:
future (futures.Future): Required. A future to complete.
"""
with self.__latest_future_lock:
try:
future.result() # raises
except Exception as e:
self._exception = e
if self.__latest_future is future:
self.__latest_future = None
def _are_futures_done(self) -> bool:
"""Helper method to check to all futures are complete.
Returns:
True if no latest future.
"""
with self.__latest_future_lock:
return self.__latest_future is None
def wait(self):
"""Helper method to that blocks until all futures are complete."""
future = self.__latest_future
if future:
futures.wait([future], return_when=futures.FIRST_EXCEPTION)
self._raise_future_exception()
@property
def _latest_future(self) -> Optional[futures.Future]:
"""Get the latest future if it exists."""
with self.__latest_future_lock:
return self.__latest_future
@_latest_future.setter
def _latest_future(self, future: Optional[futures.Future]):
"""Optionally set the latest future and add a complete_future
callback."""
with self.__latest_future_lock:
self.__latest_future = future
if future:
future.add_done_callback(self._complete_future)
def _submit(
self,
method: Callable[..., Any],
args: Sequence[Any],
kwargs: Dict[str, Any],
additional_dependencies: Optional[Sequence[futures.Future]] = None,
callbacks: Optional[Sequence[Callable[[futures.Future], Any]]] = None,
internal_callbacks: Iterable[Callable[[Any], Any]] = None,
) -> futures.Future:
"""Submit a method as a future against this object.
Args:
method (Callable): Required. The method to submit.
args (Sequence): Required. The arguments to call the method with.
kwargs (dict): Required. The keyword arguments to call the method with.
additional_dependencies (Optional[Sequence[futures.Future]]):
Optional. Additional dependent futures to wait on before executing
method. Note: No validation is done on the dependencies.
callbacks (Optional[Sequence[Callable[[futures.Future], Any]]]):
Optional. Additional Future callbacks to execute once this created
Future is complete.
Returns:
future (Future): Future of the submitted method call.
"""
def wait_for_dependencies_and_invoke(
deps: Sequence[futures.Future],
method: Callable[..., Any],
args: Sequence[Any],
kwargs: Dict[str, Any],
internal_callbacks: Iterable[Callable[[Any], Any]],
) -> Any:
"""Wrapper method to wait on any dependencies before submitting
method.
Args:
deps (Sequence[futures.Future]):
Required. Dependent futures to wait on before executing method.
Note: No validation is done on the dependencies.
method (Callable): Required. The method to submit.
args (Sequence[Any]): Required. The arguments to call the method with.
kwargs (Dict[str, Any]):
Required. The keyword arguments to call the method with.
internal_callbacks: (Callable[[Any], Any]):
Callbacks that take the result of method.
"""
for future in set(deps):
future.result()
result = method(*args, **kwargs)
# call callbacks from within future
if internal_callbacks:
for callback in internal_callbacks:
callback(result)
return result
# Retrieves any dependencies from arguments.
deps = [
arg._latest_future
for arg in list(args) + list(kwargs.values())
if isinstance(arg, FutureManager)
]
# Retrieves exceptions and raises
# if any upstream dependency has an exception
exceptions = [
arg._exception
for arg in list(args) + list(kwargs.values())
if isinstance(arg, FutureManager) and arg._exception
]
if exceptions:
raise exceptions[0]
# filter out objects that do not have pending tasks
deps = [dep for dep in deps if dep]
if additional_dependencies:
deps.extend(additional_dependencies)
with self.__latest_future_lock:
# form a dependency on the latest future of this object
if self.__latest_future:
deps.append(self.__latest_future)
self.__latest_future = initializer.global_pool.submit(
wait_for_dependencies_and_invoke,
deps=deps,
method=method,
args=args,
kwargs=kwargs,
internal_callbacks=internal_callbacks,
)
future = self.__latest_future
# Clean up callback captures exception as well as removes future.
# May execute immediately and take lock.
future.add_done_callback(self._complete_future)
if callbacks:
for c in callbacks:
future.add_done_callback(c)
return future
@classmethod
@abc.abstractmethod
def _empty_constructor(cls) -> "FutureManager":
"""Should construct object with all non FutureManager attributes as
None."""
pass
@abc.abstractmethod
def _sync_object_with_future_result(self, result: "FutureManager"):
"""Should sync the object from _empty_constructor with result of
future."""
def __repr__(self) -> str:
if self._exception:
return f"{object.__repr__(self)} failed with {str(self._exception)}"
if self.__latest_future:
return f"{object.__repr__(self)} is waiting for upstream dependencies to complete."
return object.__repr__(self)
class VertexAiResourceNoun(metaclass=abc.ABCMeta):
"""Base class the Vertex AI resource nouns.
Subclasses require two class attributes:
client_class: The client to instantiate to interact with this resource noun.
_is_client_prediction_client: Flag to indicate if the client requires a prediction endpoint.
Subclass is required to populate private attribute _gca_resource which is the
service representation of the resource noun.
"""
@property
@classmethod
@abc.abstractmethod
def client_class(cls) -> Type[utils.VertexAiServiceClientWithOverride]:
"""Client class required to interact with resource with optional
overrides."""
pass
@property
@classmethod
@abc.abstractmethod
def _is_client_prediction_client(cls) -> bool:
"""Flag to indicate whether to use prediction endpoint with client."""
pass
@property
@abc.abstractmethod
def _getter_method(cls) -> str:
"""Name of getter method of client class for retrieving the
resource."""
pass
@property
@abc.abstractmethod
def _delete_method(cls) -> str:
"""Name of delete method of client class for deleting the resource."""
pass
@property
@abc.abstractmethod
def _resource_noun(cls) -> str:
"""Resource noun."""
pass
def __init__(
self,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
):
"""Initializes class with project, location, and api_client.
Args:
project(str): Project of the resource noun.
location(str): The location of the resource noun.
credentials(google.auth.crendentials.Crendentials): Optional custom
credentials to use when accessing interacting with resource noun.
resource_name(str): A fully-qualified resource name or ID.
"""
if resource_name:
project, location = self._get_and_validate_project_location(
resource_name=resource_name, project=project, location=location
)
self.project = project or initializer.global_config.project
self.location = location or initializer.global_config.location
self.credentials = credentials or initializer.global_config.credentials
self.api_client = self._instantiate_client(self.location, self.credentials)
@classmethod
def _instantiate_client(
cls,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> utils.VertexAiServiceClientWithOverride:
"""Helper method to instantiate service client for resource noun.
Args:
location (str): The location of the resource noun.
credentials (google.auth.credentials.Credentials):
Optional custom credentials to use when accessing interacting with
resource noun.
Returns:
client (utils.VertexAiServiceClientWithOverride):
Initialized service client for this service noun with optional overrides.
"""
return initializer.global_config.create_client(
client_class=cls.client_class,
credentials=credentials,
location_override=location,
prediction_client=cls._is_client_prediction_client,
)
def _get_and_validate_project_location(
self,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
) -> Tuple:
"""Validate the project and location for the resource.
Args:
resource_name(str): Required. A fully-qualified resource name or ID.
project(str): Project of the resource noun.
location(str): The location of the resource noun.
Raises:
RuntimeError if location is different from resource location
"""
fields = utils.extract_fields_from_resource_name(
resource_name, self._resource_noun
)
if not fields:
return project, location
if location and fields.location != location:
raise RuntimeError(
f"location {location} is provided, but different from "
f"the resource location {fields.location}"
)
return fields.project, fields.location
def _get_gca_resource(self, resource_name: str) -> proto.Message:
"""Returns GAPIC service representation of client class resource."""
"""
Args:
resource_name (str):
Required. A fully-qualified resource name or ID.
"""
resource_name = utils.full_resource_name(
resource_name=resource_name,
resource_noun=self._resource_noun,
project=self.project,
location=self.location,
)
return getattr(self.api_client, self._getter_method)(name=resource_name)
def _sync_gca_resource(self):
"""Sync GAPIC service representation of client class resource."""
self._gca_resource = self._get_gca_resource(resource_name=self.resource_name)
@property
def name(self) -> str:
"""Name of this resource."""
self._assert_gca_resource_is_available()
return self._gca_resource.name.split("/")[-1]
@property
def resource_name(self) -> str:
"""Full qualified resource name."""
self._assert_gca_resource_is_available()
return self._gca_resource.name
@property
def display_name(self) -> str:
"""Display name of this resource."""
self._assert_gca_resource_is_available()
return self._gca_resource.display_name
@property
def create_time(self) -> datetime.datetime:
"""Time this resource was created."""
self._assert_gca_resource_is_available()
return self._gca_resource.create_time
@property
def update_time(self) -> datetime.datetime:
"""Time this resource was last updated."""
self._sync_gca_resource()
return self._gca_resource.update_time
@property
def encryption_spec(self) -> Optional[gca_encryption_spec.EncryptionSpec]:
"""Customer-managed encryption key options for this Vertex AI resource.
If this is set, then all resources created by this Vertex AI resource will
be encrypted with the provided encryption key.
"""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "encryption_spec")
@property
def labels(self) -> Dict[str, str]:
"""User-defined labels containing metadata about this resource.
Read more about labels at https://goo.gl/xmQnxf
"""
self._assert_gca_resource_is_available()
return self._gca_resource.labels
@property
def gca_resource(self) -> proto.Message:
"""The underlying resource proto representation."""
self._assert_gca_resource_is_available()
return self._gca_resource
def _assert_gca_resource_is_available(self) -> None:
"""Helper method to raise when property is not accessible.
Raises:
RuntimeError if _gca_resource is has not been created.
"""
if self._gca_resource is None:
raise RuntimeError(
f"{self.__class__.__name__} resource has not been created"
)
def __repr__(self) -> str:
return f"{object.__repr__(self)} \nresource name: {self.resource_name}"
def optional_sync(
construct_object_on_arg: Optional[str] = None,
return_input_arg: Optional[str] = None,
bind_future_to_self: bool = True,
):
"""Decorator for VertexAiResourceNounWithFutureManager with optional sync
support.
Methods with this decorator should include a "sync" argument that defaults to
True. If called with sync=False this decorator will launch the method as a
concurrent Future in a separate Thread.
Note that this is only robust enough to support our current end to end patterns
and may not be suitable for new patterns.
Args:
construct_object_on_arg (str):
Optional. If provided, will only construct output object if arg is present.
Example: If custom training does not produce a model.
return_input_arg (str):
Optional. If provided will return passed in argument instead of
constructing.
Example: Model.deploy(Endpoint) returns the passed in Endpoint
bind_future_to_self (bool):
Whether to add this future to the calling object.
Example: Model.deploy(Endpoint) would be set to False because we only
want the deployment Future to be associated with Endpoint.
"""
def optional_run_in_thread(method: Callable[..., Any]):
"""Optionally run this method concurrently in separate Thread.
Args:
method (Callable[..., Any]): Method to optionally run in separate Thread.
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
"""Wraps method."""
sync = kwargs.pop("sync", True)
bound_args = inspect.signature(method).bind(*args, **kwargs)
self = bound_args.arguments.get("self")
calling_object_latest_future = None
# check to see if this object has any exceptions
if self:
calling_object_latest_future = self._latest_future
self._raise_future_exception()
# if sync then wait for any Futures to complete and execute
if sync:
if self:
self.wait()
return method(*args, **kwargs)
# callbacks to call within the Future (in same Thread)
internal_callbacks = []
# callbacks to add to the Future (may or may not be in same Thread)
callbacks = []
# additional Future dependencies to capture
dependencies = []
# all methods should have type signatures
return_type = get_annotation_class(
inspect.getfullargspec(method).annotations["return"]
)
# is a classmethod that creates the object and returns it
if args and inspect.isclass(args[0]):
# assumes classmethod is our resource noun
returned_object = args[0]._empty_constructor()
self = returned_object
else: # instance method
# object produced by the method
returned_object = bound_args.arguments.get(return_input_arg)
# if we're returning an input object
if returned_object and returned_object is not self:
# make sure the input object doesn't have any exceptions
# from previous futures
returned_object._raise_future_exception()
# if the future will be associated with both the returned object
# and calling object then we need to add additional callback
# to remove the future from the returned object
# if we need to construct a new empty returned object
should_construct = not returned_object and bound_args.arguments.get(
construct_object_on_arg, not construct_object_on_arg
)
if should_construct:
if return_type is not None:
returned_object = return_type._empty_constructor()
# if the future will be associated with both the returned object
# and calling object then we need to add additional callback
# to remove the future from the returned object
if returned_object and bind_future_to_self:
callbacks.append(returned_object._complete_future)
if returned_object:
# sync objects after future completes
internal_callbacks.append(
returned_object._sync_object_with_future_result
)
# If the future is not associated with the calling object
# then the return object future needs to form a dependency on the
# the latest future in the calling object.
if not bind_future_to_self:
if calling_object_latest_future:
dependencies.append(calling_object_latest_future)
self = returned_object
future = self._submit(
method=method,
callbacks=callbacks,
internal_callbacks=internal_callbacks,
additional_dependencies=dependencies,
args=[],
kwargs=bound_args.arguments,
)
# if the calling object is the one that submitted then add it's future
# to the returned object
if returned_object and returned_object is not self:
returned_object._latest_future = future
return returned_object
return wrapper
return optional_run_in_thread
class VertexAiResourceNounWithFutureManager(VertexAiResourceNoun, FutureManager):
"""Allows optional asynchronous calls to this Vertex AI Resource
Nouns."""
def __init__(
self,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
):
"""Initializes class with project, location, and api_client.
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.crendentials.Crendentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
"""
VertexAiResourceNoun.__init__(
self,
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
FutureManager.__init__(self)
@classmethod
def _empty_constructor(
cls,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
) -> "VertexAiResourceNounWithFutureManager":
"""Initializes with all attributes set to None.
The attributes should be populated after a future is complete. This allows
scheduling of additional API calls before the resource is created.
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.crendentials.Crendentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
Returns:
An instance of this class with attributes set to None.
"""
self = cls.__new__(cls)
VertexAiResourceNoun.__init__(
self,
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
FutureManager.__init__(self)
self._gca_resource = None
return self
def _sync_object_with_future_result(
self, result: "VertexAiResourceNounWithFutureManager"
):
"""Populates attributes from a Future result to this object.
Args:
result: VertexAiResourceNounWithFutureManager
Required. Result of future with same type as this object.
"""
sync_attributes = [
"project",
"location",
"api_client",
"_gca_resource",
"credentials",
]
optional_sync_attributes = ["_prediction_client"]
for attribute in sync_attributes:
setattr(self, attribute, getattr(result, attribute))
for attribute in optional_sync_attributes:
value = getattr(result, attribute, None)
if value:
setattr(self, attribute, value)
@classmethod
def _construct_sdk_resource_from_gapic(
cls,
gapic_resource: proto.Message,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> VertexAiResourceNoun:
"""Given a GAPIC resource object, return the SDK representation.
Args:
gapic_resource (proto.Message):
A GAPIC representation of a Vertex AI resource, usually
retrieved by a get_* or in a list_* API call.
project (str):
Optional. Project to construct SDK object from. If not set,
project set in aiplatform.init will be used.
location (str):
Optional. Location to construct SDK object from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to construct SDK object.
Overrides credentials set in aiplatform.init.
Returns:
VertexAiResourceNoun:
An initialized SDK object that represents GAPIC type.
"""
sdk_resource = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
sdk_resource._gca_resource = gapic_resource
return sdk_resource
# TODO(b/144545165): Improve documentation for list filtering once available
# TODO(b/184910159): Expose `page_size` field in list method
@classmethod
def _list(
cls,
cls_filter: Callable[[proto.Message], bool] = lambda _: True,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List[VertexAiResourceNoun]:
"""Private method to list all instances of this Vertex AI Resource,
takes a `cls_filter` arg to filter to a particular SDK resource
subclass.
Args:
cls_filter (Callable[[proto.Message], bool]):
A function that takes one argument, a GAPIC resource, and returns
a bool. If the function returns False, that resource will be
excluded from the returned list. Example usage:
cls_filter = lambda obj: obj.metadata in cls.valid_metadatas
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
resource = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
# Fetch credentials once and re-use for all `_empty_constructor()` calls
creds = initializer.global_config.credentials
resource_list_method = getattr(resource.api_client, resource._list_method)
list_request = {
"parent": initializer.global_config.common_location_path(
project=project, location=location
),
"filter": filter,
}
if order_by:
list_request["order_by"] = order_by
resource_list = resource_list_method(request=list_request) or []
return [
cls._construct_sdk_resource_from_gapic(
gapic_resource, project=project, location=location, credentials=creds
)
for gapic_resource in resource_list
if cls_filter(gapic_resource)
]
@classmethod
def _list_with_local_order(
cls,
cls_filter: Callable[[proto.Message], bool] = lambda _: True,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List[VertexAiResourceNoun]:
"""Private method to list all instances of this Vertex AI Resource,
takes a `cls_filter` arg to filter to a particular SDK resource
subclass. Provides client-side sorting when a list API doesn't support
`order_by`.
Args:
cls_filter (Callable[[proto.Message], bool]):
A function that takes one argument, a GAPIC resource, and returns
a bool. If the function returns False, that resource will be
excluded from the returned list. Example usage:
cls_filter = lambda obj: obj.metadata in cls.valid_metadatas
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
li = cls._list(
cls_filter=cls_filter,
filter=filter,
order_by=None, # This method will handle the ordering locally
project=project,
location=location,
credentials=credentials,
)
if order_by:
desc = "desc" in order_by
order_by = order_by.replace("desc", "")
order_by = order_by.split(",")
li.sort(
key=lambda x: tuple(getattr(x, field.strip()) for field in order_by),
reverse=desc,
)
return li
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List[VertexAiResourceNoun]:
"""List all instances of this Vertex AI Resource.
Example Usage:
aiplatform.BatchPredictionJobs.list(
filter='state="JOB_STATE_SUCCEEDED" AND display_name="my_job"',
)
aiplatform.Model.list(order_by="create_time desc, display_name")
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
return cls._list(
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
@optional_sync()
def delete(self, sync: bool = True) -> None:
"""Deletes this Vertex AI resource. WARNING: This deletion is
permanent.
Args:
sync (bool):
Whether to execute this deletion synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
_LOGGER.log_action_start_against_resource("Deleting", "", self)
lro = getattr(self.api_client, self._delete_method)(name=self.resource_name)
_LOGGER.log_action_started_against_resource_with_lro(
"Delete", "", self.__class__, lro
)
lro.result()
_LOGGER.log_action_completed_against_resource("deleted.", "", self)
def __repr__(self) -> str:
if self._gca_resource:
return VertexAiResourceNoun.__repr__(self)
return FutureManager.__repr__(self)
def _wait_for_resource_creation(self) -> None:
"""Wait until underlying resource is created.
Currently this should only be used on subclasses that implement the construct then
`run` pattern because the underlying sync=False implementation will not update
downstream resource noun object's _gca_resource until the entire invoked method is complete.
Ex:
job = CustomTrainingJob()
job.run(sync=False, ...)
job._wait_for_resource_creation()
Raises:
RuntimeError if the resource has not been scheduled to be created.
"""
# If the user calls this but didn't actually invoke an API to create
if self._are_futures_done() and not getattr(self._gca_resource, "name", None):
self._raise_future_exception()
raise RuntimeError(
f"{self.__class__.__name__} resource is not scheduled to be created."
)
while not getattr(self._gca_resource, "name", None):
# breaks out of loop if creation has failed async
if self._are_futures_done() and not getattr(
self._gca_resource, "name", None
):
self._raise_future_exception()
time.sleep(1)
def _assert_gca_resource_is_available(self) -> None:
"""Helper method to raise when accessing properties that do not exist.
Overrides VertexAiResourceNoun to provide a more informative exception if
resource creation has failed asynchronously.
Raises:
RuntimeError when resource has not been created.
"""
if not getattr(self._gca_resource, "name", None):
raise RuntimeError(
f"{self.__class__.__name__} resource has not been created."
+ (
f" Resource failed with: {self._exception}"
if self._exception
else ""
)
)
def get_annotation_class(annotation: type) -> type:
"""Helper method to retrieve type annotation.
Args:
annotation (type): Type hint
"""
# typing.Optional
if getattr(annotation, "__origin__", None) is Union:
return annotation.__args__[0]
else:
return annotation
|
py | 1a3eeff0b8e97e5ba89305c59a525bc5c12cc12a | import logging
import os
import sys
import click
from rich.logging import RichHandler
from labfunctions.conf.server_settings import settings
# from labfunctions.control_plane import rqscheduler
from labfunctions.types.agent import AgentConfig
from labfunctions.utils import get_external_ip, get_hostname
hostname = get_hostname()
@click.group(name="agent")
def agentcli():
"""
Execute agent related actions
"""
pass
@agentcli.command(name="run")
@click.option("--workers", "-w", default=1, help="How many workers spawn")
@click.option("--redis", "-r", default=settings.QUEUE_REDIS, help="Redis full dsn")
@click.option(
"--qnames",
"-q",
default="default",
help="Comma separated list of queues to listen to",
)
@click.option(
"--cluster",
"-C",
default="default",
help="Cluster name, it will be added as qname",
)
@click.option(
"--ip-address",
"-i",
default=None,
help="IP address of the host",
)
@click.option(
"--agent-name",
"-a",
default=None,
help="Agent Name",
)
@click.option(
"--debug",
"-D",
is_flag=True,
default=False,
help="Debug log",
)
@click.option("--machine-id", "-m", default=f"localhost/ba/{hostname}")
def runcli(redis, workers, qnames, cluster, ip_address, agent_name, machine_id, debug):
"""Run the agent"""
# pylint: disable=import-outside-toplevel
# from labfunctions.control_plane import agent
from labfunctions.control import agent
level = "INFO"
if debug:
level = "DEBUG"
FORMAT = "%(message)s"
logging.basicConfig(
level=level,
format=FORMAT,
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)],
)
agent.set_env(settings)
ip_address = ip_address or get_external_ip(settings.DNS_IP_ADDRESS)
queues = qnames.split(",")
conf = AgentConfig(
redis_dsn=redis,
cluster=cluster,
qnames=queues,
ip_address=ip_address,
machine_id=machine_id,
heartbeat_ttl=settings.AGENT_HEARTBEAT_TTL,
heartbeat_check_every=settings.AGENT_HEARTBEAT_CHECK,
agent_name=agent_name,
workers_n=workers,
)
agent.run(conf)
|
py | 1a3ef1f1fef8a8706c6e93a7526606fced8821dd | from .ad_hoc import *
from nltk import WordNetLemmatizer
from config import cfg
def extract_tokens(sentence, str_list=None):
"""
Extract tokens among a sentences, meanwhile picking out the
proper nouns and numbers.
:param str sentence: The sentence to tokenize.
:param list str_list: Proper nouns.
:rtype: [list, list]
"""
str_list = str_list or list()
if len(sentence.split()) < cfg.len_threshold:
return None
tokens = []
slot = []
sentence += ' '
sentence.replace(' ', ' ')
while len(sentence) > 1:
to_continue = False
for template_str in str_list:
template_str += ' '
if sentence.startswith(template_str):
slot.append(template_str[:-1])
tokens.append('<STR>')
sentence = sentence[len(template_str):]
to_continue = True
break
if to_continue:
continue
space_idx = sentence.index(' ')
next_word = sentence[:space_idx]
sentence = sentence[space_idx+1:]
if next_word.isdigit():
slot.append(int(next_word))
tokens.append('<NUM>')
continue
if next_word.lower() in num_list:
slot.append(num_list.index(next_word.lower()))
tokens.append('<NUM>')
continue
if len(next_word) > 0:
tokens.append(next_word)
slot.append(None)
return tokens, slot
def collide(l1, l2):
"""
Detect whether l1 and l2 have common elements.
:param list l1: List 1.
:param list l2: List 2.
:rtype: bool
"""
return len(set(l1).intersection(l2)) > 0
wnl = WordNetLemmatizer()
def lemmatize(word):
"""
Helper function of convert.
:param str word: word to convert.
:rtype: str
"""
if word.endswith('ly'):
word = word[:-2]
word = wnl.lemmatize(word, 'v')
word = wnl.lemmatize(word, 'n')
word = wnl.lemmatize(word, 'a')
word = wnl.lemmatize(word, 's')
return word
|
py | 1a3ef20fb6cb913cf5bc9d649e22a8b86ad1fef7 | from com.isecpartners.android.jdwp.pluginservice import AbstractJDIPlugin
from com.isecpartners.android.jdwp import DalvikUtils
import com.sun.jdi.event.Event
class TestJythonPlugin(AbstractJDIPlugin):
def __init__(self):
AbstractJDIPlugin.__init__(self,"TestJythonPlugin")
self.output("Python: initalized TestJythonPlugin")
def setupEvents(self):
self.output("Python: setupEvents")
self.createBreakpointRequest("android.util.Log.i")
self.createBreakpointRequest("android.util.Log.d")
self.createBreakpointRequest("android.util.Log.v")
self.createBreakpointRequest("android.util.Log.e")
self.createBreakpointRequest("android.util.Log.w")
def handleEvent(self, event):
# http://docs.oracle.com/javase/1.5.0/docs/guide/jpda/jdi/com/sun/jdi/event/BreakpointEvent.html
vm = event.virtualMachine();
thread = event.thread()
fr0 = thread.frames()[0]
location = fr0.location()
method = location.method()
name = method.name()
dalvikUtils = DalvikUtils(vm,thread)
args = method.variables()
self.output("="*20)
self.output("EVENT: \n\t%s\n" % ( event.toString()))
vals = []
self.output("VARIABLES:\n")
for arg in args:
val = fr0.getValue(arg)
self.output("\t%s = %s\n" % (arg,val))
vals.append(val)
self.output("="*20)
self.resumeEventSet()
|
py | 1a3ef228b1da31e318b0e08fb520086008a3a1ea | import time
from multiprocessing import Pool, cpu_count
import click
from lib.lsun_room_api.lsun_room.item import DataItems
def worker(item):
#item.remap_layout()
item.save_layout()
@click.command()
@click.option('--dataset_root', default='../data/lsun_room/')
def main(dataset_root):
for phase in ['train', 'val']:
print('==> re-label for data in %s phase' % phase)
s = time.time()
dataset = DataItems(root=dataset_root, phase=phase)
with Pool(cpu_count()) as pool:
pool.map(worker, dataset.items)
print('==> Done in %.4f sec.' % (time.time() - s))
if __name__ == '__main__':
main()
|
py | 1a3ef2b3fda539acb33db6f79bd75b36a0f79b07 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import tempfile
from test_dist_fleet_base import TestFleetBase
class TestDistMnistAsync2x2(TestFleetBase):
def _setup_config(self):
self._mode = "async"
self._reader = "pyreader"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": "",
"CPU_NUM": "2"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=False)
class TestDistCtrHalfAsync2x2(TestFleetBase):
def _setup_config(self):
self._mode = "async"
self._reader = "pyreader"
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "30000", # 5sec to fail fast
"http_proxy": "",
"FLAGS_communicator_send_queue_size": "2",
"FLAGS_communicator_max_merge_var_num": "2",
"CPU_NUM": "2",
"SAVE_MODEL": "0"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def test_dist_train(self):
self.check_with_place(
"dist_fleet_ctr.py", delta=1e-5, check_error_log=False)
if __name__ == "__main__":
unittest.main()
|
py | 1a3ef2f810a38a7621a263c8af87900cd4661574 | from __future__ import print_function, absolute_import
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from .evaluation_metrics import cmc, mean_ap
from .tlift import TLift
def pre_tlift(gallery, query):
gal_cam_id = np.array([cam for _, _, cam, _ in gallery])
gal_time = np.array([frame_time for _, _, _, frame_time in gallery])
prob_cam_id = np.array([cam for _, _, cam, _ in query])
prob_time = np.array([frame_time for _, _, _, frame_time in query])
return {'gal_cam_id': gal_cam_id, 'gal_time': gal_time,
'prob_cam_id': prob_cam_id, 'prob_time': prob_time,
'num_cams': gal_cam_id.max() + 1}
def extract_cnn_feature(model, inputs):
model = model.cuda().eval()
with torch.no_grad():
outputs = model(inputs)
outputs = outputs.cpu()
return outputs
def extract_features(model, data_loader, verbose=False):
fea_time = 0
data_time = 0
features = OrderedDict()
labels = OrderedDict()
end = time.time()
if verbose:
print('Extract Features...', end='\t')
for i, (imgs, fnames, pids, _) in enumerate(data_loader):
data_time += time.time() - end
end = time.time()
outputs = extract_cnn_feature(model, imgs)
for fname, output, pid in zip(fnames, outputs, pids):
features[fname] = output
labels[fname] = pid
fea_time += time.time() - end
end = time.time()
if verbose:
print('Feature time: {:.3f} seconds. Data time: {:.3f} seconds.'.format(fea_time, data_time))
return features, labels
def pairwise_distance(matcher, prob_fea, gal_fea, gal_batch_size=4, prob_batch_size=4096):
with torch.no_grad():
num_gals = gal_fea.size(0)
num_probs = prob_fea.size(0)
score = torch.zeros(num_probs, num_gals, device=prob_fea.device)
matcher.eval()
for i in range(0, num_probs, prob_batch_size):
j = min(i + prob_batch_size, num_probs)
matcher.make_kernel(prob_fea[i: j, :, :, :].cuda())
for k in range(0, num_gals, gal_batch_size):
k2 = min(k + gal_batch_size, num_gals)
score[i: j, k: k2] = matcher(gal_fea[k: k2, :, :, :].cuda())
# scale matching scores to make them visually more recognizable
score = torch.sigmoid(score / 10)
return (1. - score).cpu() # [p, g]
def evaluate_all(distmat, query=None, gallery=None,
query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None,
cmc_topk=(1, 5, 10, 20)):
if query is not None and gallery is not None:
query_ids = [pid for _, pid, _, _ in query]
gallery_ids = [pid for _, pid, _, _ in gallery]
query_cams = [cam for _, _, cam, _ in query]
gallery_cams = [cam for _, _, cam, _ in gallery]
else:
assert (query_ids is not None and gallery_ids is not None
and query_cams is not None and gallery_cams is not None)
# Compute mean AP
mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams)
print('Mean AP: {:4.1%}'.format(mAP))
# Compute CMC scores
cmc_configs = {
'market1501': dict(separate_camera_set=False,
single_gallery_shot=False,
first_match_break=True)}
cmc_scores = {name: cmc(distmat, query_ids, gallery_ids,
query_cams, gallery_cams, **params)
for name, params in cmc_configs.items()}
print('CMC Scores')
for k in cmc_topk:
print(' top-{:<4}{:12.1%}'
.format(k, cmc_scores['market1501'][k - 1]))
return cmc_scores['market1501'][0], mAP
def reranking(dist, query_num, k1=20, k2=6, lamda_value=0.3, verbose=False):
original_dist = dist.numpy()
all_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
if verbose:
print('starting re_ranking...', end='\t')
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
for i in range(query_num):
temp_min = np.zeros(shape=[1, all_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
final_dist = jaccard_dist * (1 - lamda_value) + original_dist * lamda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
class Evaluator(object):
def __init__(self, model):
super(Evaluator, self).__init__()
self.model = model
def evaluate(self, matcher, testset, query_loader, gallery_loader, gal_batch_size=4,
prob_batch_size=4096, tau=100, sigma=200, K=10, alpha=0.2):
query = testset.query
gallery = testset.gallery
print('Compute similarity ...', end='\t')
start = time.time()
prob_fea, _ = extract_features(self.model, query_loader)
prob_fea = torch.cat([prob_fea[f].unsqueeze(0) for f, _, _, _ in query], 0)
num_prob = len(query)
num_gal = len(gallery)
batch_size = gallery_loader.batch_size
dist = torch.zeros(num_prob, num_gal)
for i, (imgs, fnames, pids, _) in enumerate(gallery_loader):
print('Compute similarity %d / %d. \t' % (i + 1, len(gallery_loader)), end='\r', file=sys.stdout.console)
gal_fea = extract_cnn_feature(self.model, imgs)
g0 = i * batch_size
g1 = min(num_gal, (i + 1) * batch_size)
dist[:, g0:g1] = pairwise_distance(matcher, prob_fea, gal_fea, batch_size, prob_batch_size) # [p, g]
print('Time: %.3f seconds.' % (time.time() - start))
rank1, mAP = evaluate_all(dist, query=query, gallery=gallery)
if testset.has_time_info:
num_gal = gal_fea.size(0)
num_prob = prob_fea.size(0)
num_all = num_gal + num_prob
dist_rerank = torch.zeros(num_all, num_all)
print('Compute similarity for rerank...', end='\t')
start = time.time()
with torch.no_grad():
dist_rerank[:num_prob, num_prob:] = dist
dist_rerank[num_prob:, :num_prob] = dist.t()
dist_rerank[:num_prob, :num_prob] = pairwise_distance(matcher, prob_fea, prob_fea, gal_batch_size,
prob_batch_size)
gal_fea, _ = extract_features(self.model, gallery_loader, verbose=True)
gal_fea = torch.cat([gal_fea[f].unsqueeze(0) for f, _, _, _ in gallery], 0)
dist_rerank[num_prob:, num_prob:] = pairwise_distance(matcher, gal_fea, gal_fea, gal_batch_size,
prob_batch_size)
dist_rerank = reranking(dist_rerank, num_prob, verbose=True)
print('Time: %.3f seconds.' % (time.time() - start))
rank1_rerank, mAP_rerank = evaluate_all(dist_rerank, query=query, gallery=gallery)
score_rerank = 1 - dist_rerank
print('Compute TLift...', end='\t')
start = time.time()
pre_tlift_dict = pre_tlift(gallery, query)
score_tlift = TLift(score_rerank, tau=tau, sigma=sigma, K=K, alpha=alpha,
**pre_tlift_dict)
print('Time: %.3f seconds.' % (time.time() - start))
dist_tlift = 1 - score_tlift
rank1_tlift, mAP_tlift = evaluate_all(dist_tlift, query=query, gallery=gallery)
else:
pre_tlift_dict = {'gal_time': 0, 'prob_time': 0}
dist_tlift = 0
dist_rerank = 0
rank1_rerank = 0
mAP_rerank = 0
rank1_tlift = 0
mAP_tlift = 0
return rank1, mAP, rank1_rerank, mAP_rerank, rank1_tlift, mAP_tlift, dist.numpy(), dist_rerank, \
dist_tlift, pre_tlift_dict
|
py | 1a3ef3b79de5d2e2378ec4ab828a5b8902de26c3 | # flake8: noqa
from catalyst.dl import SupervisedRunner as Runner
from experiment import Experiment
from model import Net |
py | 1a3ef45177685541c70b1d0a8d20a1296821b627 | # Copyright (c) 2013, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from datetime import datetime
from calendar import monthrange
from frappe import _, msgprint
from frappe.utils import flt
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
conditions, filters = get_conditions(filters)
total = 0
salary_slips = get_salary_slips(conditions, filters)
for ss in salary_slips:
row = []
if ss.client:
row = [ss.client]
else:
row = [""]
if ss.site:
row += [ss.site]
else:
row = [""]
basic = frappe.db.get_value("Salary Detail", {'abbr': 'B', 'parent': ss.name}, ['amount'])
if basic:
row += [basic]
else:
row += [0]
pf = frappe.db.get_value("Salary Detail", {'abbr': 'PF', 'parent': ss.name}, ['amount'])
if basic:
row += [pf]
else:
row += [0]
esi = frappe.db.get_value("Salary Detail", {'abbr': 'ESI', 'parent': ss.name}, ['amount'])
if esi:
row += [esi]
else:
row += [0]
pt = frappe.db.get_value("Salary Detail", {'abbr': 'PT', 'parent': ss.name}, ['amount'])
if pt:
row += [pt]
else:
row += [0]
ctc = frappe.db.get_value("Salary Detail", {'abbr': 'CTC', 'parent': ss.name}, ['amount'])
if ctc:
row += [ctc]
else:
row += [0]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Client") + ":Data:300",
_("Site") + ":Data:150",
_("Basic") + ":Currency:120",
_("PF") + ":Currency:120",
_("ESI") + ":Currency:120",
_("PT") + ":Currency:120",
_("CTC") + ":Currency:120"
]
return columns
def get_salary_slips(conditions, filters):
# salary_slips = frappe.db.sql("""select sum(`tabSalary Detail`.amount), ss.client_name as client,ss.site as site,ss.name as name from `tabSalary Slip` ss
# left join `tabSalary Detail` on ss.name = `tabSalary Detail`.parent
# where `tabSalary Detail`.salary %s order by site""" % conditions, filters, as_dict=1)
salary_slips = frappe.db.sql("""select ss.client_name as client,ss.site as site,ss.name as name from `tabSalary Slip` ss
where %s order by site""" % conditions, filters, as_dict=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("from_date"):
conditions += "start_date >= %(from_date)s"
if filters.get("to_date"):
conditions += " and end_date >= %(to_date)s"
if filters.get("client"): conditions += " and client_name = %(client)s"
if filters.get("site"): conditions += " and site = %(site)s"
return conditions, filters |
py | 1a3ef4ba1fb6ffb712e19f87939bdb7abe02408a | from html.parser import HTMLParser
class CommentHTMLParser(HTMLParser):
def __init__(self):
super(CommentHTMLParser, self).__init__()
self.data = ''
def handle_comment(self, comment):
self.data = comment.strip().rstrip() # Just save the last comment which contains the data
|
py | 1a3ef51f686c7651e497834b27e2083f92623039 |
from .client import Client, Repository
from .server import app
from .errors import *
from .run import run
#from .repository import Repository
|
py | 1a3ef54b296a5ea6a0fbbf21fe3f43800dfcd487 | from Section12_Proxy.VirtualProxy.Bitmap import Bitmap
class LazyBitmap:
def __init__(self, filename):
self.filename = filename
self.bitmap = None
def draw(self):
if not self.bitmap:
self.bitmap = Bitmap(self.filename)
self.bitmap.draw()
|
py | 1a3ef72003535deb5bbfc9fa1a50729d288fa12d | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 内部错误。
INTERNALERROR = 'InternalError'
# 数据库异常。
INTERNALERROR_DBERROR = 'InternalError.DBError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 名字冲突。
INVALIDPARAMETERVALUE_DUPLICATENAME = 'InvalidParameterValue.DuplicateName'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
py | 1a3ef732d5d0fad3f64f1f4d9a7b91031f10c26e | from dataclasses import dataclass
from typing import List
from stai.types.blockchain_format.coin import Coin
from stai.types.header_block import HeaderBlock
from stai.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class HeaderBlockRecord(Streamable):
"""
These are values that are stored in the wallet database, corresponding to information
that the wallet cares about in each block
"""
header: HeaderBlock
additions: List[Coin] # A block record without additions is not finished
removals: List[Coin] # A block record without removals is not finished
@property
def header_hash(self):
return self.header.header_hash
@property
def prev_header_hash(self):
return self.header.prev_header_hash
@property
def height(self):
return self.header.height
@property
def transactions_filter(self):
return self.header.transactions_filter
|
py | 1a3ef752b3e74fe58696daffc1aebba4bdf5ceeb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pyquestpc',
version='0.1.0',
description="http fetch / debug logging for Python 3.",
long_description=readme + '\n\n' + history,
author="Dmitriy Sintsov",
author_email='[email protected]',
url='https://github.com/Dmitri-Sintsov/pyquestpc',
packages=[
'pyquestpc',
],
package_dir={'pyquestpc':
'pyquestpc'},
include_package_data=True,
install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords='pyquestpc',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
py | 1a3ef817d78425a4fb32432e202761a1004096b8 | import os
import numpy as np
import xarray as xr
import netCDF4 as nc
from glob import glob
from functools import partial
from os import makedirs as mkdir
from multiprocessing import get_context
from datetime import datetime, timedelta
os.environ['OMP_NUM_THREAD'] = '1'
# Set up init to use sys.argv later
init = datetime(2020, 10, 28, 0)
nlat, xlat = 30, 50
nlon, xlon = -130, -100
tmpdir = '/scratch/general/lustre/u1070830/nomads_nbm/tmp/'; mkdir(tmpdir, exist_ok=True)
datadir = '/scratch/general/lustre/u1070830/nomads_nbm/'; mkdir(datadir, exist_ok=True)
def download_grib(url, subset_str, tmp):
from subprocess import Popen, call
import requests
filename = url.split('file=')[1].split('&')[0]
filename = filename.replace('.co.', '.%s.'%subset_str)
if not os.path.isfile(tmp + filename):
print('Downloading %s'%filename)
r = requests.get(url, allow_redirects=True)
open(tmp + filename, 'wb').write(r.content)
cmd = 'wget -O "%s" "%s"'%(tmp + filename, url)
Popen(cmd, shell=True)
return filename
def repack_nbm_grib2(f):
import pygrib
import gc
print(f.split('/')[-1])
if not os.path.isfile(f+'.nc'):
try:
grb = pygrib.open(f)
msgs = grb.read()
init = str(msgs[0]).split(':')[-2].split(' ')[-1]
init = datetime.strptime(init, '%Y%m%d%H%M')
fhr = msgs[0]['endStep']
valid = np.datetime64(init + timedelta(hours=fhr))
lons, lats = msgs[0].data()[2], msgs[0].data()[1]
except:
raise
return None
else:
probability, probability_labels = [], []
percentile, percentile_labels = [], []
deterministic, deterministic_labels = [], []
got_deterministic = {i:False for i in [1, 6, 12, 24]}
for msg in msgs:
interval = msg['stepRange'].split('-')
interval = int(interval[1]) - int(interval[0])
if 'Probability of event' in str(msg):
# Probability of event above upper limit (> 0.254) NOT inclusive
threshold = round(msg['upperLimit']/25.4, 2)
probability.append([msg.values])
probability_labels.append([interval, threshold])
elif 'percentileValue' in msg.keys():
percentile.append([msg.values])
percentile_labels.append([interval, msg['percentileValue']])
else:
if got_deterministic[interval] == False:
deterministic_labels.append(interval)
deterministic.append(msg.values)
got_deterministic[interval] = True
else:
pass
grb.close()
gc.collect()
deterministic_labels = np.array(deterministic_labels)
deterministic_labels = deterministic_labels[np.argsort(deterministic_labels)]
deterministic = np.array(deterministic)[np.argsort(deterministic_labels)]
probability = np.array(probability, dtype=object).reshape(-1, lats.shape[0], lats.shape[1])
probability_labels = np.array(probability_labels)
percentile = np.array(percentile, dtype=object).reshape(-1, 99, lats.shape[0], lats.shape[1])
percentile_labels = np.array(percentile_labels)
deterministic = xr.DataArray(deterministic.astype(np.float32), name='pop',
dims=('interval', 'y', 'x'),
coords={'interval':('interval', deterministic_labels),
'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)})
pop = xr.DataArray(probability[:3].astype(np.float32), name='pop',
dims=('interval', 'y', 'x'),
coords={'interval':('interval', probability_labels[:3, 0]),
'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)})
probability = xr.DataArray([probability[2:].astype(np.float32)], name='probability',
dims=('interval', 'threshold', 'y', 'x'),
coords={'interval':('interval', [24]), 'threshold':('threshold', probability_labels[2:,1]),
'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)})
percentile = xr.DataArray(percentile.astype(np.float32), name='percentile',
dims=('interval', 'percentile', 'y', 'x'),
coords={'interval':('interval', np.unique(percentile_labels[:, 0])),
'percentile':('percentile', range(1, 100)),
'lats':(('y', 'x'), lats), 'lons':(('y', 'x'), lons)})
ds = xr.Dataset()
# ds['fhr'] = fhr
ds['time'] = valid
ds.attrs['InitTime'] = str(init)
ds['qpf'] = deterministic
ds['pop'] = pop
ds['probx'] = probability
ds['pqpf'] = percentile
ds.to_netcdf(f+'.nc')
del ds
gc.collect()
return None
else:
print('Found: %s, skipping'%f.split('/')[-1])
def write_output(output, ncfilename):
lat, lon = output['lats'], output['lons']
os.makedirs(tmpdir, exist_ok=True)
with nc.Dataset(tmpdir + ncfilename, 'w', format='NETCDF4') as ncfile:
ncfile.nx = str(lon.shape[1])
ncfile.ny = str(lon.shape[0])
ncfile.InitTime = output.attrs['InitTime']
# Lat Lon dimensions and data
ncfile.createDimension('lon', lon.shape[1])
ncfile.createDimension('lat', lon.shape[0])
ncfile.createDimension('time', None)
ncfile.createDimension('interval', output['interval'].size)
ncfile.createDimension('percentile', output['percentile'].size)
ncfile.createDimension('threshold', output['threshold'].size)
lon_nc = ncfile.createVariable('lon', 'f4', ('lat', 'lon'))
lon_nc.long_name = 'longitude'
lon_nc.units = 'degrees_east'
lon_nc.standard_name = 'longitude'
lon_nc._CoordinateAxisType = 'Lon'
lat_nc = ncfile.createVariable('lat', 'f4', ('lat', 'lon'))
lat_nc.long_name = 'latitude'
lat_nc.units = 'degrees_north'
lat_nc.standard_name = 'latitude'
lat_nc._CoordinateAxisType = 'Lat'
lon_nc[:] = output.lons.values
lat_nc[:] = output.lats.values
interval = ncfile.createVariable('interval', 'short', ('interval'))
interval.long_name = 'accumulation interval'
interval.units = 'hours'
interval.standard_name = 'interval'
interval[:] = output['interval'].values.astype(int)
percentile = ncfile.createVariable('percentile', 'short', ('percentile'),
zlib=True, complevel=9)
percentile.long_name = 'accumulation percentile'
percentile.units = 'none'
percentile.standard_name = 'percentile'
percentile[:] = output['percentile'].values.astype(int)
threshold = ncfile.createVariable('threshold', 'f4', ('threshold'),
zlib=True, complevel=9)
threshold.long_name = 'probabiity of exceedence threshold'
threshold.units = 'in'
threshold.standard_name = 'threshold'
threshold[:] = output['threshold'].values
# Write variable data
qpf_nc = ncfile.createVariable('qpf', 'f4', ('time', 'interval', 'lat', 'lon'),
fill_value=-9999.0, zlib=True, complevel=9)
qpf_nc.long_name = 'Deterministic QPF'
qpf_nc.level = '0'
qpf_nc.units = 'in'
qpf_nc[:] = output['qpf'].values
# pop_nc = ncfile.createVariable('pop', 'f4', ('time', 'interval', 'lat', 'lon'),
# fill_value=-9999.0, zlib=True, complevel=9)
# pop_nc.long_name = 'Probability of Precipitation (> 0.01")'
# pop_nc.level = '0'
# pop_nc.units = 'in'
# pop_nc[:] = output['pop'].values
pqpf_nc = ncfile.createVariable('pqpf', 'f4', ('time', 'interval', 'percentile', 'lat', 'lon'),
fill_value=-9999.0, zlib=True, complevel=9)
pqpf_nc.long_name = 'Probabilistic QPF'
pqpf_nc.level = '0'
pqpf_nc.units = 'in'
pqpf_nc[:] = output['pqpf'].values
probx_nc = ncfile.createVariable('probx', 'f4', ('time', 'interval', 'threshold', 'lat', 'lon'),
fill_value=-9999.0, zlib=True, complevel=9)
probx_nc.long_name = 'Probability of Exceedence'
probx_nc.level = '0'
probx_nc.units = '%'
probx_nc[:] = output['probx'].values
print(ncfile)
if __name__ == '__main__':
yyyy, mm, dd, hh = init.year, init.month, init.day, init.hour
base = 'https://nomads.ncep.noaa.gov/cgi-bin/filter_blend.pl?'
var = '&var_APCP=on'
region = '&subregion=&leftlon={:.2f}&rightlon={:.2f}&toplat={:.2f}&bottomlat={:.2f}'.format(nlon, xlon, xlat, nlat)
mdir = '&dir=%2Fblend.{:04d}{:02d}{:02d}%2F{:02d}%2Fqmd'.format(yyyy, mm, dd, hh)
url_list = []
# Need to fix the data processing below to allow for sub24 leads
for fhr in np.arange(24, 180+1, 6):
file = 'file=blend.t{:02d}z.qmd.f{:03d}.co.grib2'.format(hh, fhr)
url_list.append(base + file + var + region + mdir)
download_grib_mp = partial(download_grib, subset_str='WR', tmp=tmpdir)
with get_context('forkserver').Pool(len(url_list)) as p:
flist = p.imap_unordered(download_grib_mp, url_list, chunksize=1)
p.close()
p.join()
flist = sorted(flist)
filelist = sorted(glob(tmpdir + '*.grib2'))
print(filelist[0])
repack_nbm_grib2(filelist[0])
# with get_context('forkserver').Pool(6) as p:
# output = p.imap_unordered(repack_nbm_grib2, filelist, chunksize=1)
# p.close()
# p.join()
# output = [xr.open_dataset(f+'.nc') for f in filelist]
# output = xr.concat([i for i in output if i is not None], dim='time')
# write_output(output)
# compress = {'compression':'gzip', 'compression_opts':9}
# encoding = {var:compress for var in output.data_vars if var != 'time'}
# output.to_netcdf(tmpdir + './test_output.nc', engine='h5netcdf', encoding=encoding) |
py | 1a3ef82efa1c863352495901172c67952a8901f4 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import Header, Menu, Panel
from bl_ui.space_dopesheet import (
DopesheetFilterPopoverBase,
dopesheet_filter,
)
class GRAPH_HT_header(Header):
bl_space_type = 'GRAPH_EDITOR'
def draw(self, context):
layout = self.layout
tool_settings = context.tool_settings
st = context.space_data
layout.template_header()
# Now a exposed as a sub-space type
# layout.prop(st, "mode", text="")
GRAPH_MT_editor_menus.draw_collapsible(context, layout)
row = layout.row(align=True)
row.prop(st, "use_normalization", icon='NORMALIZE_FCURVES', text="Normalize", toggle=True)
sub = row.row(align=True)
sub.active = st.use_normalization
sub.prop(st, "use_auto_normalization", icon='FILE_REFRESH', text="", toggle=True)
layout.separator_spacer()
dopesheet_filter(layout, context)
row = layout.row(align=True)
if st.has_ghost_curves:
row.operator("graph.ghost_curves_clear", text="", icon='X')
else:
row.operator("graph.ghost_curves_create", text="", icon='FCURVE_SNAPSHOT')
layout.popover(
panel="GRAPH_PT_filters",
text="",
icon='FILTER',
)
layout.prop(st, "pivot_point", icon_only=True)
layout.prop(st, "auto_snap", text="")
row = layout.row(align=True)
row.prop(tool_settings, "use_proportional_fcurve", text="", icon_only=True)
sub = row.row(align=True)
sub.active = tool_settings.use_proportional_fcurve
sub.prop(tool_settings, "proportional_edit_falloff", text="", icon_only=True)
class GRAPH_PT_filters(DopesheetFilterPopoverBase, Panel):
bl_space_type = 'GRAPH_EDITOR'
bl_region_type = 'HEADER'
bl_label = "Filters"
def draw(self, context):
layout = self.layout
DopesheetFilterPopoverBase.draw_generic_filters(context, layout)
layout.separator()
DopesheetFilterPopoverBase.draw_search_filters(context, layout)
layout.separator()
DopesheetFilterPopoverBase.draw_standard_filters(context, layout)
class GRAPH_MT_editor_menus(Menu):
bl_idname = "GRAPH_MT_editor_menus"
bl_label = ""
def draw(self, _context):
layout = self.layout
layout.menu("GRAPH_MT_view")
layout.menu("GRAPH_MT_select")
layout.menu("GRAPH_MT_marker")
layout.menu("GRAPH_MT_channel")
layout.menu("GRAPH_MT_key")
class GRAPH_MT_view(Menu):
bl_label = "View"
def draw(self, context):
layout = self.layout
st = context.space_data
layout.prop(st, "show_region_ui")
layout.separator()
layout.prop(st, "use_realtime_update")
layout.prop(st, "show_cursor")
layout.prop(st, "show_sliders")
layout.prop(st, "show_group_colors")
layout.prop(st, "show_marker_lines")
layout.prop(st, "use_auto_merge_keyframes")
layout.separator()
layout.prop(st, "use_beauty_drawing")
layout.separator()
layout.prop(st, "show_handles")
layout.prop(st, "use_only_selected_curves_handles")
layout.prop(st, "use_only_selected_keyframe_handles")
layout.prop(st, "show_seconds")
layout.prop(st, "show_locked_time")
layout.separator()
layout.operator("anim.previewrange_set")
layout.operator("anim.previewrange_clear")
layout.operator("graph.previewrange_set")
layout.separator()
layout.operator("graph.view_all")
layout.operator("graph.view_selected")
layout.operator("graph.view_frame")
# Add this to show key-binding (reverse action in dope-sheet).
layout.separator()
props = layout.operator("wm.context_set_enum", text="Toggle Dope Sheet")
props.data_path = "area.type"
props.value = 'DOPESHEET_EDITOR'
layout.separator()
layout.menu("INFO_MT_area")
class GRAPH_MT_select(Menu):
bl_label = "Select"
def draw(self, _context):
layout = self.layout
layout.operator("graph.select_all", text="All").action = 'SELECT'
layout.operator("graph.select_all", text="None").action = 'DESELECT'
layout.operator("graph.select_all", text="Invert").action = 'INVERT'
layout.separator()
props = layout.operator("graph.select_box")
props.axis_range = False
props.include_handles = False
props = layout.operator("graph.select_box", text="Box Select (Axis Range)")
props.axis_range = True
props.include_handles = False
props = layout.operator("graph.select_box", text="Box Select (Include Handles)")
props.axis_range = False
props.include_handles = True
layout.operator("graph.select_circle")
layout.separator()
layout.operator("graph.select_column", text="Columns on Selected Keys").mode = 'KEYS'
layout.operator("graph.select_column", text="Column on Current Frame").mode = 'CFRA'
layout.operator("graph.select_column", text="Columns on Selected Markers").mode = 'MARKERS_COLUMN'
layout.operator("graph.select_column", text="Between Selected Markers").mode = 'MARKERS_BETWEEN'
layout.separator()
props = layout.operator("graph.select_leftright", text="Before Current Frame")
props.extend = False
props.mode = 'LEFT'
props = layout.operator("graph.select_leftright", text="After Current Frame")
props.extend = False
props.mode = 'RIGHT'
layout.separator()
layout.operator("graph.select_more")
layout.operator("graph.select_less")
layout.separator()
layout.operator("graph.select_linked")
class GRAPH_MT_marker(Menu):
bl_label = "Marker"
def draw(self, context):
layout = self.layout
from bl_ui.space_time import marker_menu_generic
marker_menu_generic(layout, context)
# TODO: pose markers for action edit mode only?
class GRAPH_MT_channel(Menu):
bl_label = "Channel"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_CHANNELS'
layout.operator("anim.channels_delete")
if context.space_data.mode == 'DRIVERS':
layout.operator("graph.driver_delete_invalid")
layout.separator()
layout.operator("anim.channels_group")
layout.operator("anim.channels_ungroup")
layout.separator()
layout.operator_menu_enum("anim.channels_setting_toggle", "type")
layout.operator_menu_enum("anim.channels_setting_enable", "type")
layout.operator_menu_enum("anim.channels_setting_disable", "type")
layout.separator()
layout.operator("anim.channels_editable_toggle")
layout.operator_menu_enum("graph.extrapolation_type", "type", text="Extrapolation Mode")
layout.separator()
layout.operator("graph.hide", text="Hide Selected Curves").unselected = False
layout.operator("graph.hide", text="Hide Unselected Curves").unselected = True
layout.operator("graph.reveal")
layout.separator()
layout.operator("anim.channels_expand")
layout.operator("anim.channels_collapse")
layout.separator()
layout.operator_menu_enum("anim.channels_move", "direction", text="Move...")
layout.separator()
layout.operator("anim.channels_fcurves_enable")
class GRAPH_MT_key(Menu):
bl_label = "Key"
def draw(self, _context):
layout = self.layout
layout.menu("GRAPH_MT_key_transform", text="Transform")
layout.operator_menu_enum("graph.snap", "type", text="Snap")
layout.operator_menu_enum("graph.mirror", "type", text="Mirror")
layout.separator()
layout.operator_menu_enum("graph.keyframe_insert", "type")
layout.operator_menu_enum("graph.fmodifier_add", "type")
layout.operator("graph.sound_bake")
layout.separator()
layout.operator("graph.frame_jump")
layout.separator()
layout.operator("graph.copy")
layout.operator("graph.paste")
layout.operator("graph.paste", text="Paste Flipped").flipped = True
layout.operator("graph.duplicate_move")
layout.operator("graph.delete")
layout.separator()
layout.operator_menu_enum("graph.handle_type", "type", text="Handle Type")
layout.operator_menu_enum("graph.interpolation_type", "type", text="Interpolation Mode")
layout.operator_menu_enum("graph.easing_type", "type", text="Easing Type")
layout.separator()
layout.operator("graph.clean").channels = False
layout.operator("graph.clean", text="Clean Channels").channels = True
layout.operator("graph.smooth")
layout.operator("graph.sample")
layout.operator("graph.bake")
layout.separator()
layout.operator("graph.euler_filter", text="Discontinuity (Euler) Filter")
class GRAPH_MT_key_transform(Menu):
bl_label = "Transform"
def draw(self, _context):
layout = self.layout
layout.operator("transform.translate", text="Move")
layout.operator("transform.transform", text="Extend").mode = 'TIME_EXTEND'
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
class GRAPH_MT_delete(Menu):
bl_label = "Delete"
def draw(self, _context):
layout = self.layout
layout.operator("graph.delete")
layout.separator()
layout.operator("graph.clean").channels = False
layout.operator("graph.clean", text="Clean Channels").channels = True
class GRAPH_MT_context_menu(Menu):
bl_label = "F-Curve Context Menu"
def draw(self, _context):
layout = self.layout
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("graph.copy", text="Copy", icon='COPYDOWN')
layout.operator("graph.paste", text="Paste", icon='PASTEDOWN')
layout.operator("graph.paste", text="Paste Flipped", icon='PASTEFLIPDOWN').flipped = True
layout.separator()
layout.operator_menu_enum("graph.handle_type", "type", text="Handle Type")
layout.operator_menu_enum("graph.interpolation_type", "type", text="Interpolation Mode")
layout.operator_menu_enum("graph.easing_type", "type", text="Easing Type")
layout.separator()
layout.operator("graph.keyframe_insert").type = 'SEL'
layout.operator("graph.duplicate_move")
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("graph.delete")
layout.separator()
layout.operator_menu_enum("graph.mirror", "type", text="Mirror")
layout.operator_menu_enum("graph.snap", "type", text="Snap")
class GRAPH_MT_pivot_pie(Menu):
bl_label = "Pivot Point"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.prop_enum(context.space_data, "pivot_point", value='BOUNDING_BOX_CENTER')
pie.prop_enum(context.space_data, "pivot_point", value='CURSOR')
pie.prop_enum(context.space_data, "pivot_point", value='INDIVIDUAL_ORIGINS')
class GRAPH_MT_snap_pie(Menu):
bl_label = "Snap"
def draw(self, _context):
layout = self.layout
pie = layout.menu_pie()
pie.operator("graph.snap", text="Current Frame").type = 'CFRA'
pie.operator("graph.snap", text="Cursor Value").type = 'VALUE'
pie.operator("graph.snap", text="Nearest Frame").type = 'NEAREST_FRAME'
pie.operator("graph.snap", text="Nearest Second").type = 'NEAREST_SECOND'
pie.operator("graph.snap", text="Nearest Marker").type = 'NEAREST_MARKER'
pie.operator("graph.snap", text="Flatten Handles").type = 'HORIZONTAL'
class GRAPH_MT_channel_context_menu(Menu):
bl_label = "F-Curve Channel Context Menu"
def draw(self, context):
layout = self.layout
st = context.space_data
layout.separator()
layout.operator("anim.channels_setting_enable", text="Mute Channels").type = 'MUTE'
layout.operator("anim.channels_setting_disable", text="Unmute Channels").type = 'MUTE'
layout.separator()
layout.operator("anim.channels_setting_enable", text="Protect Channels").type = 'PROTECT'
layout.operator("anim.channels_setting_disable", text="Unprotect Channels").type = 'PROTECT'
layout.separator()
layout.operator("anim.channels_group")
layout.operator("anim.channels_ungroup")
layout.separator()
layout.operator("anim.channels_editable_toggle")
layout.operator_menu_enum("graph.extrapolation_type", "type", text="Extrapolation Mode")
layout.separator()
layout.operator("graph.hide", text="Hide Selected Curves").unselected = False
layout.operator("graph.hide", text="Hide Unselected Curves").unselected = True
layout.operator("graph.reveal")
layout.separator()
layout.operator("anim.channels_expand")
layout.operator("anim.channels_collapse")
layout.separator()
layout.operator_menu_enum("anim.channels_move", "direction", text="Move...")
layout.separator()
layout.operator("anim.channels_delete")
if st.mode == 'DRIVERS':
layout.operator("graph.driver_delete_invalid")
classes = (
GRAPH_HT_header,
GRAPH_MT_editor_menus,
GRAPH_MT_view,
GRAPH_MT_select,
GRAPH_MT_marker,
GRAPH_MT_channel,
GRAPH_MT_key,
GRAPH_MT_key_transform,
GRAPH_MT_delete,
GRAPH_MT_context_menu,
GRAPH_MT_channel_context_menu,
GRAPH_MT_pivot_pie,
GRAPH_MT_snap_pie,
GRAPH_PT_filters,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
|
py | 1a3ef8611dd5997423b50cae59d544966daa9b68 | # coding: utf-8
"""
PoolStatistics.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class PoolStatistics(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PoolStatistics - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'observed_time': 'datetime', # (required parameter)
'observed_time_in_ms': 'int', # (required parameter)
'last_reset_time': 'datetime', # (required parameter)
'last_reset_time_in_ms': 'int', # (required parameter)
'array_id': 'str', # (required parameter)
'array_wwn': 'str', # (required parameter)
'member_ids_hash': 'str', # (required parameter)
'read_ops': 'float', # (required parameter)
'read_hit_ops': 'float', # (required parameter)
'read_hit_bytes': 'float', # (required parameter)
'read_time_total': 'float', # (required parameter)
'read_hit_time_total': 'float', # (required parameter)
'write_ops': 'float', # (required parameter)
'write_time_total': 'float', # (required parameter)
'write_hit_time_total': 'float', # (required parameter)
'err_redundancy_chk_indeterminate_reads': 'float', # (required parameter)
'err_redundancy_chk_recovered_reads': 'float', # (required parameter)
'err_redundancy_chk_unrecovered_reads': 'float', # (required parameter)
'idle_time': 'float', # (required parameter)
'other_ops': 'float', # (required parameter)
'other_time_max': 'float', # (required parameter)
'other_time_total': 'float', # (required parameter)
'read_bytes': 'float', # (required parameter)
'read_hit_time_max': 'float', # (required parameter)
'read_time_max': 'float', # (required parameter)
'write_bytes': 'float', # (required parameter)
'write_hit_bytes': 'float', # (required parameter)
'write_hit_ops': 'float', # (required parameter)
'write_hit_time_max': 'float', # (required parameter)
'write_time_max': 'float', # (required parameter)
'queue_depth_total': 'float', # (required parameter)
'queue_depth_max': 'float', # (required parameter)
'flash_cache_read_hit_ops': 'float', # (required parameter)
'flash_cache_read_hit_bytes': 'float', # (required parameter)
'flash_cache_read_hit_time_total': 'float', # (required parameter)
'flash_cache_read_hit_time_max': 'float', # (required parameter)
'pool_id': 'str'
}
self.attribute_map = {
'observed_time': 'observedTime', # (required parameter)
'observed_time_in_ms': 'observedTimeInMS', # (required parameter)
'last_reset_time': 'lastResetTime', # (required parameter)
'last_reset_time_in_ms': 'lastResetTimeInMS', # (required parameter)
'array_id': 'arrayId', # (required parameter)
'array_wwn': 'arrayWwn', # (required parameter)
'member_ids_hash': 'memberIdsHash', # (required parameter)
'read_ops': 'readOps', # (required parameter)
'read_hit_ops': 'readHitOps', # (required parameter)
'read_hit_bytes': 'readHitBytes', # (required parameter)
'read_time_total': 'readTimeTotal', # (required parameter)
'read_hit_time_total': 'readHitTimeTotal', # (required parameter)
'write_ops': 'writeOps', # (required parameter)
'write_time_total': 'writeTimeTotal', # (required parameter)
'write_hit_time_total': 'writeHitTimeTotal', # (required parameter)
'err_redundancy_chk_indeterminate_reads': 'errRedundancyChkIndeterminateReads', # (required parameter)
'err_redundancy_chk_recovered_reads': 'errRedundancyChkRecoveredReads', # (required parameter)
'err_redundancy_chk_unrecovered_reads': 'errRedundancyChkUnrecoveredReads', # (required parameter)
'idle_time': 'idleTime', # (required parameter)
'other_ops': 'otherOps', # (required parameter)
'other_time_max': 'otherTimeMax', # (required parameter)
'other_time_total': 'otherTimeTotal', # (required parameter)
'read_bytes': 'readBytes', # (required parameter)
'read_hit_time_max': 'readHitTimeMax', # (required parameter)
'read_time_max': 'readTimeMax', # (required parameter)
'write_bytes': 'writeBytes', # (required parameter)
'write_hit_bytes': 'writeHitBytes', # (required parameter)
'write_hit_ops': 'writeHitOps', # (required parameter)
'write_hit_time_max': 'writeHitTimeMax', # (required parameter)
'write_time_max': 'writeTimeMax', # (required parameter)
'queue_depth_total': 'queueDepthTotal', # (required parameter)
'queue_depth_max': 'queueDepthMax', # (required parameter)
'flash_cache_read_hit_ops': 'flashCacheReadHitOps', # (required parameter)
'flash_cache_read_hit_bytes': 'flashCacheReadHitBytes', # (required parameter)
'flash_cache_read_hit_time_total': 'flashCacheReadHitTimeTotal', # (required parameter)
'flash_cache_read_hit_time_max': 'flashCacheReadHitTimeMax', # (required parameter)
'pool_id': 'poolId'
}
self._observed_time = None
self._observed_time_in_ms = None
self._last_reset_time = None
self._last_reset_time_in_ms = None
self._array_id = None
self._array_wwn = None
self._member_ids_hash = None
self._read_ops = None
self._read_hit_ops = None
self._read_hit_bytes = None
self._read_time_total = None
self._read_hit_time_total = None
self._write_ops = None
self._write_time_total = None
self._write_hit_time_total = None
self._err_redundancy_chk_indeterminate_reads = None
self._err_redundancy_chk_recovered_reads = None
self._err_redundancy_chk_unrecovered_reads = None
self._idle_time = None
self._other_ops = None
self._other_time_max = None
self._other_time_total = None
self._read_bytes = None
self._read_hit_time_max = None
self._read_time_max = None
self._write_bytes = None
self._write_hit_bytes = None
self._write_hit_ops = None
self._write_hit_time_max = None
self._write_time_max = None
self._queue_depth_total = None
self._queue_depth_max = None
self._flash_cache_read_hit_ops = None
self._flash_cache_read_hit_bytes = None
self._flash_cache_read_hit_time_total = None
self._flash_cache_read_hit_time_max = None
self._pool_id = None
@property
def observed_time(self):
"""
Gets the observed_time of this PoolStatistics.
End time for this collection as measured by the number of seconds since baseTime.
:return: The observed_time of this PoolStatistics.
:rtype: datetime
:required/optional: required
"""
return self._observed_time
@observed_time.setter
def observed_time(self, observed_time):
"""
Sets the observed_time of this PoolStatistics.
End time for this collection as measured by the number of seconds since baseTime.
:param observed_time: The observed_time of this PoolStatistics.
:type: datetime
"""
self._observed_time = observed_time
@property
def observed_time_in_ms(self):
"""
Gets the observed_time_in_ms of this PoolStatistics.
:return: The observed_time_in_ms of this PoolStatistics.
:rtype: int
:required/optional: required
"""
return self._observed_time_in_ms
@observed_time_in_ms.setter
def observed_time_in_ms(self, observed_time_in_ms):
"""
Sets the observed_time_in_ms of this PoolStatistics.
:param observed_time_in_ms: The observed_time_in_ms of this PoolStatistics.
:type: int
"""
self._observed_time_in_ms = observed_time_in_ms
@property
def last_reset_time(self):
"""
Gets the last_reset_time of this PoolStatistics.
:return: The last_reset_time of this PoolStatistics.
:rtype: datetime
:required/optional: required
"""
return self._last_reset_time
@last_reset_time.setter
def last_reset_time(self, last_reset_time):
"""
Sets the last_reset_time of this PoolStatistics.
:param last_reset_time: The last_reset_time of this PoolStatistics.
:type: datetime
"""
self._last_reset_time = last_reset_time
@property
def last_reset_time_in_ms(self):
"""
Gets the last_reset_time_in_ms of this PoolStatistics.
:return: The last_reset_time_in_ms of this PoolStatistics.
:rtype: int
:required/optional: required
"""
return self._last_reset_time_in_ms
@last_reset_time_in_ms.setter
def last_reset_time_in_ms(self, last_reset_time_in_ms):
"""
Sets the last_reset_time_in_ms of this PoolStatistics.
:param last_reset_time_in_ms: The last_reset_time_in_ms of this PoolStatistics.
:type: int
"""
self._last_reset_time_in_ms = last_reset_time_in_ms
@property
def array_id(self):
"""
Gets the array_id of this PoolStatistics.
:return: The array_id of this PoolStatistics.
:rtype: str
:required/optional: required
"""
return self._array_id
@array_id.setter
def array_id(self, array_id):
"""
Sets the array_id of this PoolStatistics.
:param array_id: The array_id of this PoolStatistics.
:type: str
"""
self._array_id = array_id
@property
def array_wwn(self):
"""
Gets the array_wwn of this PoolStatistics.
:return: The array_wwn of this PoolStatistics.
:rtype: str
:required/optional: required
"""
return self._array_wwn
@array_wwn.setter
def array_wwn(self, array_wwn):
"""
Sets the array_wwn of this PoolStatistics.
:param array_wwn: The array_wwn of this PoolStatistics.
:type: str
"""
self._array_wwn = array_wwn
@property
def member_ids_hash(self):
"""
Gets the member_ids_hash of this PoolStatistics.
Hash of member volume ids.
:return: The member_ids_hash of this PoolStatistics.
:rtype: str
:required/optional: required
"""
return self._member_ids_hash
@member_ids_hash.setter
def member_ids_hash(self, member_ids_hash):
"""
Sets the member_ids_hash of this PoolStatistics.
Hash of member volume ids.
:param member_ids_hash: The member_ids_hash of this PoolStatistics.
:type: str
"""
self._member_ids_hash = member_ids_hash
@property
def read_ops(self):
"""
Gets the read_ops of this PoolStatistics.
Number of read operations
:return: The read_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_ops
@read_ops.setter
def read_ops(self, read_ops):
"""
Sets the read_ops of this PoolStatistics.
Number of read operations
:param read_ops: The read_ops of this PoolStatistics.
:type: float
"""
self._read_ops = read_ops
@property
def read_hit_ops(self):
"""
Gets the read_hit_ops of this PoolStatistics.
Number of read operations that hit cache
:return: The read_hit_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_hit_ops
@read_hit_ops.setter
def read_hit_ops(self, read_hit_ops):
"""
Sets the read_hit_ops of this PoolStatistics.
Number of read operations that hit cache
:param read_hit_ops: The read_hit_ops of this PoolStatistics.
:type: float
"""
self._read_hit_ops = read_hit_ops
@property
def read_hit_bytes(self):
"""
Gets the read_hit_bytes of this PoolStatistics.
Number of bytes read from cache
:return: The read_hit_bytes of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_hit_bytes
@read_hit_bytes.setter
def read_hit_bytes(self, read_hit_bytes):
"""
Sets the read_hit_bytes of this PoolStatistics.
Number of bytes read from cache
:param read_hit_bytes: The read_hit_bytes of this PoolStatistics.
:type: float
"""
self._read_hit_bytes = read_hit_bytes
@property
def read_time_total(self):
"""
Gets the read_time_total of this PoolStatistics.
:return: The read_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_time_total
@read_time_total.setter
def read_time_total(self, read_time_total):
"""
Sets the read_time_total of this PoolStatistics.
:param read_time_total: The read_time_total of this PoolStatistics.
:type: float
"""
self._read_time_total = read_time_total
@property
def read_hit_time_total(self):
"""
Gets the read_hit_time_total of this PoolStatistics.
:return: The read_hit_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_hit_time_total
@read_hit_time_total.setter
def read_hit_time_total(self, read_hit_time_total):
"""
Sets the read_hit_time_total of this PoolStatistics.
:param read_hit_time_total: The read_hit_time_total of this PoolStatistics.
:type: float
"""
self._read_hit_time_total = read_hit_time_total
@property
def write_ops(self):
"""
Gets the write_ops of this PoolStatistics.
Number of write operations
:return: The write_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_ops
@write_ops.setter
def write_ops(self, write_ops):
"""
Sets the write_ops of this PoolStatistics.
Number of write operations
:param write_ops: The write_ops of this PoolStatistics.
:type: float
"""
self._write_ops = write_ops
@property
def write_time_total(self):
"""
Gets the write_time_total of this PoolStatistics.
:return: The write_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_time_total
@write_time_total.setter
def write_time_total(self, write_time_total):
"""
Sets the write_time_total of this PoolStatistics.
:param write_time_total: The write_time_total of this PoolStatistics.
:type: float
"""
self._write_time_total = write_time_total
@property
def write_hit_time_total(self):
"""
Gets the write_hit_time_total of this PoolStatistics.
:return: The write_hit_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_hit_time_total
@write_hit_time_total.setter
def write_hit_time_total(self, write_hit_time_total):
"""
Sets the write_hit_time_total of this PoolStatistics.
:param write_hit_time_total: The write_hit_time_total of this PoolStatistics.
:type: float
"""
self._write_hit_time_total = write_hit_time_total
@property
def err_redundancy_chk_indeterminate_reads(self):
"""
Gets the err_redundancy_chk_indeterminate_reads of this PoolStatistics.
:return: The err_redundancy_chk_indeterminate_reads of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._err_redundancy_chk_indeterminate_reads
@err_redundancy_chk_indeterminate_reads.setter
def err_redundancy_chk_indeterminate_reads(self, err_redundancy_chk_indeterminate_reads):
"""
Sets the err_redundancy_chk_indeterminate_reads of this PoolStatistics.
:param err_redundancy_chk_indeterminate_reads: The err_redundancy_chk_indeterminate_reads of this PoolStatistics.
:type: float
"""
self._err_redundancy_chk_indeterminate_reads = err_redundancy_chk_indeterminate_reads
@property
def err_redundancy_chk_recovered_reads(self):
"""
Gets the err_redundancy_chk_recovered_reads of this PoolStatistics.
:return: The err_redundancy_chk_recovered_reads of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._err_redundancy_chk_recovered_reads
@err_redundancy_chk_recovered_reads.setter
def err_redundancy_chk_recovered_reads(self, err_redundancy_chk_recovered_reads):
"""
Sets the err_redundancy_chk_recovered_reads of this PoolStatistics.
:param err_redundancy_chk_recovered_reads: The err_redundancy_chk_recovered_reads of this PoolStatistics.
:type: float
"""
self._err_redundancy_chk_recovered_reads = err_redundancy_chk_recovered_reads
@property
def err_redundancy_chk_unrecovered_reads(self):
"""
Gets the err_redundancy_chk_unrecovered_reads of this PoolStatistics.
:return: The err_redundancy_chk_unrecovered_reads of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._err_redundancy_chk_unrecovered_reads
@err_redundancy_chk_unrecovered_reads.setter
def err_redundancy_chk_unrecovered_reads(self, err_redundancy_chk_unrecovered_reads):
"""
Sets the err_redundancy_chk_unrecovered_reads of this PoolStatistics.
:param err_redundancy_chk_unrecovered_reads: The err_redundancy_chk_unrecovered_reads of this PoolStatistics.
:type: float
"""
self._err_redundancy_chk_unrecovered_reads = err_redundancy_chk_unrecovered_reads
@property
def idle_time(self):
"""
Gets the idle_time of this PoolStatistics.
:return: The idle_time of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._idle_time
@idle_time.setter
def idle_time(self, idle_time):
"""
Sets the idle_time of this PoolStatistics.
:param idle_time: The idle_time of this PoolStatistics.
:type: float
"""
self._idle_time = idle_time
@property
def other_ops(self):
"""
Gets the other_ops of this PoolStatistics.
:return: The other_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._other_ops
@other_ops.setter
def other_ops(self, other_ops):
"""
Sets the other_ops of this PoolStatistics.
:param other_ops: The other_ops of this PoolStatistics.
:type: float
"""
self._other_ops = other_ops
@property
def other_time_max(self):
"""
Gets the other_time_max of this PoolStatistics.
:return: The other_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._other_time_max
@other_time_max.setter
def other_time_max(self, other_time_max):
"""
Sets the other_time_max of this PoolStatistics.
:param other_time_max: The other_time_max of this PoolStatistics.
:type: float
"""
self._other_time_max = other_time_max
@property
def other_time_total(self):
"""
Gets the other_time_total of this PoolStatistics.
:return: The other_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._other_time_total
@other_time_total.setter
def other_time_total(self, other_time_total):
"""
Sets the other_time_total of this PoolStatistics.
:param other_time_total: The other_time_total of this PoolStatistics.
:type: float
"""
self._other_time_total = other_time_total
@property
def read_bytes(self):
"""
Gets the read_bytes of this PoolStatistics.
:return: The read_bytes of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_bytes
@read_bytes.setter
def read_bytes(self, read_bytes):
"""
Sets the read_bytes of this PoolStatistics.
:param read_bytes: The read_bytes of this PoolStatistics.
:type: float
"""
self._read_bytes = read_bytes
@property
def read_hit_time_max(self):
"""
Gets the read_hit_time_max of this PoolStatistics.
:return: The read_hit_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_hit_time_max
@read_hit_time_max.setter
def read_hit_time_max(self, read_hit_time_max):
"""
Sets the read_hit_time_max of this PoolStatistics.
:param read_hit_time_max: The read_hit_time_max of this PoolStatistics.
:type: float
"""
self._read_hit_time_max = read_hit_time_max
@property
def read_time_max(self):
"""
Gets the read_time_max of this PoolStatistics.
:return: The read_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._read_time_max
@read_time_max.setter
def read_time_max(self, read_time_max):
"""
Sets the read_time_max of this PoolStatistics.
:param read_time_max: The read_time_max of this PoolStatistics.
:type: float
"""
self._read_time_max = read_time_max
@property
def write_bytes(self):
"""
Gets the write_bytes of this PoolStatistics.
:return: The write_bytes of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_bytes
@write_bytes.setter
def write_bytes(self, write_bytes):
"""
Sets the write_bytes of this PoolStatistics.
:param write_bytes: The write_bytes of this PoolStatistics.
:type: float
"""
self._write_bytes = write_bytes
@property
def write_hit_bytes(self):
"""
Gets the write_hit_bytes of this PoolStatistics.
Number of bytes written to cache
:return: The write_hit_bytes of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_hit_bytes
@write_hit_bytes.setter
def write_hit_bytes(self, write_hit_bytes):
"""
Sets the write_hit_bytes of this PoolStatistics.
Number of bytes written to cache
:param write_hit_bytes: The write_hit_bytes of this PoolStatistics.
:type: float
"""
self._write_hit_bytes = write_hit_bytes
@property
def write_hit_ops(self):
"""
Gets the write_hit_ops of this PoolStatistics.
Number of write operations that hit cache
:return: The write_hit_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_hit_ops
@write_hit_ops.setter
def write_hit_ops(self, write_hit_ops):
"""
Sets the write_hit_ops of this PoolStatistics.
Number of write operations that hit cache
:param write_hit_ops: The write_hit_ops of this PoolStatistics.
:type: float
"""
self._write_hit_ops = write_hit_ops
@property
def write_hit_time_max(self):
"""
Gets the write_hit_time_max of this PoolStatistics.
:return: The write_hit_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_hit_time_max
@write_hit_time_max.setter
def write_hit_time_max(self, write_hit_time_max):
"""
Sets the write_hit_time_max of this PoolStatistics.
:param write_hit_time_max: The write_hit_time_max of this PoolStatistics.
:type: float
"""
self._write_hit_time_max = write_hit_time_max
@property
def write_time_max(self):
"""
Gets the write_time_max of this PoolStatistics.
:return: The write_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._write_time_max
@write_time_max.setter
def write_time_max(self, write_time_max):
"""
Sets the write_time_max of this PoolStatistics.
:param write_time_max: The write_time_max of this PoolStatistics.
:type: float
"""
self._write_time_max = write_time_max
@property
def queue_depth_total(self):
"""
Gets the queue_depth_total of this PoolStatistics.
Total channel queue depth.
:return: The queue_depth_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._queue_depth_total
@queue_depth_total.setter
def queue_depth_total(self, queue_depth_total):
"""
Sets the queue_depth_total of this PoolStatistics.
Total channel queue depth.
:param queue_depth_total: The queue_depth_total of this PoolStatistics.
:type: float
"""
self._queue_depth_total = queue_depth_total
@property
def queue_depth_max(self):
"""
Gets the queue_depth_max of this PoolStatistics.
Maximum channel queue depth.
:return: The queue_depth_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._queue_depth_max
@queue_depth_max.setter
def queue_depth_max(self, queue_depth_max):
"""
Sets the queue_depth_max of this PoolStatistics.
Maximum channel queue depth.
:param queue_depth_max: The queue_depth_max of this PoolStatistics.
:type: float
"""
self._queue_depth_max = queue_depth_max
@property
def flash_cache_read_hit_ops(self):
"""
Gets the flash_cache_read_hit_ops of this PoolStatistics.
The number of flash cache read hit operations.
:return: The flash_cache_read_hit_ops of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._flash_cache_read_hit_ops
@flash_cache_read_hit_ops.setter
def flash_cache_read_hit_ops(self, flash_cache_read_hit_ops):
"""
Sets the flash_cache_read_hit_ops of this PoolStatistics.
The number of flash cache read hit operations.
:param flash_cache_read_hit_ops: The flash_cache_read_hit_ops of this PoolStatistics.
:type: float
"""
self._flash_cache_read_hit_ops = flash_cache_read_hit_ops
@property
def flash_cache_read_hit_bytes(self):
"""
Gets the flash_cache_read_hit_bytes of this PoolStatistics.
The number of flash cache read hit bytes.
:return: The flash_cache_read_hit_bytes of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._flash_cache_read_hit_bytes
@flash_cache_read_hit_bytes.setter
def flash_cache_read_hit_bytes(self, flash_cache_read_hit_bytes):
"""
Sets the flash_cache_read_hit_bytes of this PoolStatistics.
The number of flash cache read hit bytes.
:param flash_cache_read_hit_bytes: The flash_cache_read_hit_bytes of this PoolStatistics.
:type: float
"""
self._flash_cache_read_hit_bytes = flash_cache_read_hit_bytes
@property
def flash_cache_read_hit_time_total(self):
"""
Gets the flash_cache_read_hit_time_total of this PoolStatistics.
The sum of all response times for all reads that have cache hits, in microseconds
:return: The flash_cache_read_hit_time_total of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._flash_cache_read_hit_time_total
@flash_cache_read_hit_time_total.setter
def flash_cache_read_hit_time_total(self, flash_cache_read_hit_time_total):
"""
Sets the flash_cache_read_hit_time_total of this PoolStatistics.
The sum of all response times for all reads that have cache hits, in microseconds
:param flash_cache_read_hit_time_total: The flash_cache_read_hit_time_total of this PoolStatistics.
:type: float
"""
self._flash_cache_read_hit_time_total = flash_cache_read_hit_time_total
@property
def flash_cache_read_hit_time_max(self):
"""
Gets the flash_cache_read_hit_time_max of this PoolStatistics.
Maximum I/O time in microseconds for any one read I/O that has a cache hit
:return: The flash_cache_read_hit_time_max of this PoolStatistics.
:rtype: float
:required/optional: required
"""
return self._flash_cache_read_hit_time_max
@flash_cache_read_hit_time_max.setter
def flash_cache_read_hit_time_max(self, flash_cache_read_hit_time_max):
"""
Sets the flash_cache_read_hit_time_max of this PoolStatistics.
Maximum I/O time in microseconds for any one read I/O that has a cache hit
:param flash_cache_read_hit_time_max: The flash_cache_read_hit_time_max of this PoolStatistics.
:type: float
"""
self._flash_cache_read_hit_time_max = flash_cache_read_hit_time_max
@property
def pool_id(self):
"""
Gets the pool_id of this PoolStatistics.
The storage pool or volume group the volume is a member
:return: The pool_id of this PoolStatistics.
:rtype: str
:required/optional: required
"""
return self._pool_id
@pool_id.setter
def pool_id(self, pool_id):
"""
Sets the pool_id of this PoolStatistics.
The storage pool or volume group the volume is a member
:param pool_id: The pool_id of this PoolStatistics.
:type: str
"""
self._pool_id = pool_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a3ef8be50935a58f3240436263e5927059dc87b | """
Copyright 2016 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from cloudcafe.networking.networks.common.proxy_mgr.proxy_mgr \
import NetworkProxyMgr
from cloudroast.networking.networks.fixtures import NetworkingComputeFixture
from cloudcafe.networking.networks.personas import ServerPersona
from cloudroast.networking.networks.topologies.topology_routines \
import TopologyFixtureRoutines
class SpokeAndHubFixture(NetworkingComputeFixture, TopologyFixtureRoutines):
NUM_OF_SPOKES = 5
IP_VERSION = 4
SSH_PROVISION_DELAY = 20
@classmethod
def setUpClass(cls):
super(SpokeAndHubFixture, cls).setUpClass()
cls.iso_nets = []
cls.iso_subnets = []
# dictionary key = server_id
# Sub-dictionary key constants are defined in the
# TopologyFixtureRoutine class
# Dictionary Structure:
# <server_id>: {PERSONA: <persona_obj>,
# SERVER: <server_model>,
# PROXY: <proxy_model>}
cls.servers = {}
# Hub is a single server model representing the hub of the wheel
# (Every network gateway interface is defined on the hub, and all
# traffic goes through the hub). Dictionary is same as sub-dictionary
# in servers. (KEYS = PERSONA, SERVER, PROXY)
cls.hub = {}
cls.last_connectivity_check = {}
cls.base_iso_net = cls.net.subnets.config.ipv4_prefix.replace('*', '0')
cls.base_iso_subnet_mask = cls.determine_octet_mask(cls.base_iso_net)
def setUp(self):
super(SpokeAndHubFixture, self).setUp()
# If the hub is not defined, build the topology
if not self.hub.keys():
self.fixture_log.debug("NUMBER OF SPOKES ON HUB: {0}".format(
self.NUM_OF_SPOKES))
self.log_action('Build spoke and hub topology')
self.servers, self.iso_nets, self.iso_subnets = \
self._build_spokes()
self.hub = self._build_hub_router(network_spokes=self.iso_subnets)
self.log_action('Verify Spoke and Hub Connectivity')
connectivity = self.verify_ping_connectivity()
# If DEBUGGING or STAGING FOR MANUAL DEBUGGING
# (NUM_OF_SPOKES=1), then execute cmd else make sure connectivity
# is working...
if self.DEBUG and (not connectivity or self.NUM_OF_SPOKES == 1):
self.debug_topology_routine()
# If NUM_OF_SPOKES == 1, recommend using flat network fixture
elif self.NUM_OF_SPOKES > 1:
self.assertTrue(connectivity, self.connectivity_error())
def _build_spokes(self):
"""
Builds each spoke network (isolated network) and adds a host at
the end of each spoke. Each network gateway will be registered on
the hub router.
:return: (tuple), servers [Dict: end of spoke hosts],
iso_nets [List: isolated networks created]
iso_subnets [list: isolated network subnets created]
"""
# NOTE: Each spoke is a subnet on its own isolated network
# Check to see if any spokes are needed
if len(self.servers) >= self.NUM_OF_SPOKES:
return self.servers, self.iso_nets, self.iso_subnets
# Determine the network needed for the static route
network_for_static_route = '{net}/{snm}'.format(
net=self.base_iso_net, snm=self.base_iso_subnet_mask)
self.fixture_log.info('Network for static route: {0}'.format(
network_for_static_route))
# Determine how many spokes are needed and build the spokes
num_of_spokes = self.NUM_OF_SPOKES - len(self.servers)
for spoke in xrange(num_of_spokes):
svr_num = '{run_id!s}_{index!s}'.format(
index=spoke + len(self.servers), run_id=self.RUN_ID)
iso_net, iso_subnet, _ = self._build_isolated_network(
ip_version=self.IP_VERSION)
# Store ISOLATED Network/Subnet information
self.iso_nets.append(iso_net)
self.iso_subnets.append(iso_subnet)
# Build "End of the spoke" (non-hub) hosts
self._build_and_register_iso_net_server(
svr_id_num=svr_num, iso_network=iso_net)
# Wait for final spoke server to stabilize
time.sleep(self.SSH_PROVISION_DELAY)
# Add the generalized isolated network static route (so any isolated
# subnets are routed out the local isolated network interface and not
# the standard default route (public network interface).
self.fixture_log.debug('\n\n**** Add Static Routes **** \n\n')
addressing_details = ''
for server_dict in self.servers.itervalues():
# Add a generalized static route for the general isolated networks
persona = server_dict[TopologyFixtureRoutines.PERSONA]
addressing_details += '{0!s}\n'.format(persona)
interface_to_use = self.get_vm_network_interface_for_ip(
server_dict=server_dict,
ip_address=persona.inet_fix_ipv4[0])
self.add_static_default_route(
svr_dict=server_dict, network_to_add=network_for_static_route,
interface=interface_to_use)
self.fixture_log.debug('\n\n**** SPOKE ADDRESSING DETAILS **** \n\n')
self.fixture_log.debug(addressing_details)
return self.servers, self.iso_nets, self.iso_subnets
def _build_hub_router(self, network_spokes):
"""
Build the hub router (host) with each spoke's gateway configured as
a network interface on the router.
:param network_spokes: [List] List of iso_subnets that define each
spoke
:return: VM server model representing the hub router
"""
port_ids = []
hub_name = 'HUB_{spokes}_{run_id}'.format(
spokes=len(network_spokes), run_id=self.RUN_ID)
hub = {}
# Iterate across spoke (subnets), and configure each GW IP as an
# interface on the 'hub' router.
for spoke in network_spokes:
network_id = spoke.network_id
fixed_ips = [{'ip_address': spoke.gateway_ip,
'subnet_id': spoke.id}]
port_resp = self.net.ports.behaviors.create_port(
network_id=network_id, admin_state_up=True,
fixed_ips=fixed_ips)
self.delete_ports.append(port_resp.response.entity.id)
port_ids.append(port_resp.response.entity.id)
# Add public and service networks to the hub router
attached_networks = [self.public_network_id, self.service_network_id]
hub_svr = self.net.behaviors.create_networking_server(
name=hub_name, admin_pass=self.ADMIN_PASS,
network_ids=attached_networks, port_ids=port_ids)
self.delete_servers.append(hub_svr.entity.id)
# Store HUB server information
hub[TopologyFixtureRoutines.SERVER] = hub_svr.entity
hub_persona = ServerPersona(server=hub_svr.entity)
hub[TopologyFixtureRoutines.PERSONA] = hub_persona
proxy = NetworkProxyMgr(use_proxy=False, debug=True)
proxy.set_proxy_server(hub_svr.entity)
hub[TopologyFixtureRoutines.PROXY] = proxy
self.fixture_log.debug("HUB INTERFACE INFO (PARTIAL)\n{0}".format(
hub_persona))
# Wait for VM's public network to come online by pinging the server's
# public interface
attempt = 0
max_attempts = 10
hub_available = False
while not hub_available and attempt < max_attempts:
attempt += 1
self.fixture_log.debug(
'Verifying hub router is online. Attempt: {0} of {1}'.format(
attempt, max_attempts))
try:
hub_available = proxy.ping(hub_persona.pnet_fix_ipv4[0])
except Exception as err:
self.fixture_log.info('PING EXCEPTION: {0}'.format(err))
hub_available = False
if not hub_available:
time.sleep(5)
if attempt >= max_attempts:
self.assertClassSetupFailure(
'Hub router (hub & spoke topology) never came online. Unable '
'to proceed.')
# Give the SSH daemon time to start up. The network is active,
# but SSH is unstable at this point in the hub's provisioning.
time.sleep(self.SSH_PROVISION_DELAY)
# Enable the hub to do basic routing
self.enable_ip_forwarding(hub)
return hub
|
py | 1a3ef918d82bf57e91ff742e891132e40195472e | """
A circuit breaker implementation that works as a requests adapter
"""
from requests_circuit_breaker.circuit_breaker import CircuitBreakerAdapter, CircuitBreaker
from requests_circuit_breaker.circuit_breaker_percentage import PercentageCircuitBreaker
VERSION = __version__ = '0.1.0'
__author__ = 'Chris Tarttelin and Equal Experts LTD'
__email__ = '[email protected]'
__url__ = 'https://eelabs.github.io/circuit-breaker'
__all__ = ['CircuitBreaker', 'CircuitBreakerAdapter', 'PercentageCircuitBreaker']
|
py | 1a3ef955809bf3d338f2cd48320988b2fa09c47c | from libs import con_database
from libs import util
conf = util.conf_path()
def rollbackSQL(db=None, opid=None):
with con_database.SQLgo(
ip=conf.backupdb,
user=conf.backupuser,
password=conf.backuppassword,
db=db,
port=conf.backupport
) as f:
data = f.execute(
sql=
'''
select tablename from $_$Inception_backup_information$_$ where opid_time =%s;
''' % opid)
return data[0][0]
def roll(backdb=None, opid=None):
with con_database.SQLgo(
ip=conf.backupdb,
user=conf.backupuser,
password=conf.backuppassword,
port=conf.backupport
) as f:
data = f.dic_data(
sql=
'''
select rollback_statement from %s where opid_time =%s;
''' % (backdb, opid))
return data |
py | 1a3ef95f33a95bc0e63e77a9c7c1df296580abec | from unittest import TestCase
from microfreshener.core.importer import YMLImporter
from microfreshener.core.exporter import YMLExporter
from microfreshener.core.model.type import MICROTOSCA_RELATIONSHIPS_INTERACT_WITH
from microfreshener.core.importer.ymltype import YML_INTERACTION
from microfreshener.core.importer.ymltype import YML_RELATIONSHIP_T, YML_RELATIONSHIP_D, YML_RELATIONSHIP_C, YML_RELATIONSHIP_CD, YML_RELATIONSHIP_TC, YML_RELATIONSHIP_TD, YML_RELATIONSHIP_TCD
from microfreshener.core.model.type import MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY, MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY,MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY
class TestYmlExporterRelationships(TestCase):
@classmethod
def setUpClass(self):
file = 'data/tests/test_relationships_properties.yml'
importer = YMLImporter()
self.microtosca = importer.Import(file)
self.exporter = YMLExporter()
def test_relationship(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target")
self.assertEqual(rel_dict[YML_INTERACTION], "target")
def test_relationship_t(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_t")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_t")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "t")
def test_relationship_c(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_c")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_c")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "c")
def test_relationship_d(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_d")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_d")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "d")
def test_relationship_tc(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_tc")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_tc")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "tc")
def test_relationship_td(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_td")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_td")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "td")
def test_relationship_cd(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_cd")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_cd")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "cd")
def test_relationship_tcd(self):
rel_dict = self._transform_relationship_from_source_to_target("source", "target_tcd")
self.assertEqual(rel_dict[YML_INTERACTION]['node'], "target_tcd")
self.assertEqual(rel_dict[YML_INTERACTION]["relationship"], "tcd")
def _transform_relationship_from_source_to_target(self, source_name, target_name):
source = self.microtosca[source_name]
target = self.microtosca[target_name]
link_to_target = [
link for link in source.interactions if link.target == target]
self.assertEqual(len(link_to_target), 1)
rel_dict = self.exporter._transform_relationship(link_to_target[0])
return rel_dict
def test_build_relationship_templates_t(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_T], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY not in rel[YML_RELATIONSHIP_T])
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_d(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_D], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY not in rel[YML_RELATIONSHIP_T])
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_c(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_C], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY not in rel[YML_RELATIONSHIP_T])
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_tc(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_TC], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY: True, MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_td(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_TD], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY: True, MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_cd(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_CD], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY: True, MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY: True}})
self.assertTrue(MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY not in rel[YML_RELATIONSHIP_T])
def test_build_relationship_templates_tcd(self):
rel = self.exporter.build_relationship_templates()
self.assertDictEqual(rel[YML_RELATIONSHIP_TCD], {"type": MICROTOSCA_RELATIONSHIPS_INTERACT_WITH, "properties": {MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY: True,MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY: True, MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY: True}})
|
py | 1a3efac68404b793e1e9ca6b3ae7556a627ea3ab | import textwrap
import unittest
from stone.backends.js_client import JavascriptClientBackend
from test.backend_test_util import _mock_output
from stone.ir import Api, ApiNamespace, ApiRoute, Void, Int32
from stone.ir.data_types import Struct
MYPY = False
if MYPY:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
class TestGeneratedJSClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestGeneratedJSClient, self).__init__(*args, **kwargs)
def _get_api(self):
# type () -> Api
api = Api(version='0.1b1')
api.route_schema = Struct(u'Route', 'stone_cfg', None)
route1 = ApiRoute('get_metadata', 1, None)
route1.set_attributes(None, ':route:`get_metadata`', Void(), Void(), Void(), {})
route2 = ApiRoute('get_metadata', 2, None)
route2.set_attributes(None, ':route:`get_metadata:2`', Void(), Int32(), Void(), {})
route3 = ApiRoute('get_metadata', 3, None)
route3.set_attributes(None, ':route:`get_metadata:3`', Int32(), Int32(), Void(), {})
ns = ApiNamespace('files')
ns.add_route(route1)
ns.add_route(route2)
ns.add_route(route3)
api.namespaces[ns.name] = ns
return api, ns
def test_route_versions(self):
# type: () -> None
api, _ = self._get_api()
backend = JavascriptClientBackend(
target_folder_path='output',
args=['files', '-c', 'DropboxBase'])
get_result = _mock_output(backend)
backend.generate(api)
result = get_result()
expected = textwrap.dedent('''\
// Auto-generated by Stone, do not modify.
var routes = {};
/**
* get_metadata
* @function DropboxBase#filesGetMetadata
* @returns {Promise.<void, Error.<void>>}
*/
routes.filesGetMetadata = function () {
return this.request("files/get_metadata", null);
};
/**
* get_metadata_v2
* @function DropboxBase#filesGetMetadataV2
* @returns {Promise.<number, Error.<void>>}
*/
routes.filesGetMetadataV2 = function () {
return this.request("files/get_metadata_v2", null);
};
/**
* get_metadata_v3
* @function DropboxBase#filesGetMetadataV3
* @arg {number} arg - The request parameters.
* @returns {Promise.<number, Error.<void>>}
*/
routes.filesGetMetadataV3 = function (arg) {
return this.request("files/get_metadata_v3", arg);
};
export { routes };
''')
assert result == expected
def test_wrap_response_in_flag(self):
# type: () -> None
api, _ = self._get_api()
backend = JavascriptClientBackend(
target_folder_path='output',
args=['files', '-c', 'DropboxBase', '--wrap-response-in', 'DropboxResponse'])
get_result = _mock_output(backend)
backend.generate(api)
result = get_result()
expected = textwrap.dedent('''\
// Auto-generated by Stone, do not modify.
var routes = {};
/**
* get_metadata
* @function DropboxBase#filesGetMetadata
* @returns {Promise.<DropboxResponse<void>, Error.<void>>}
*/
routes.filesGetMetadata = function () {
return this.request("files/get_metadata", null);
};
/**
* get_metadata_v2
* @function DropboxBase#filesGetMetadataV2
* @returns {Promise.<DropboxResponse<number>, Error.<void>>}
*/
routes.filesGetMetadataV2 = function () {
return this.request("files/get_metadata_v2", null);
};
/**
* get_metadata_v3
* @function DropboxBase#filesGetMetadataV3
* @arg {number} arg - The request parameters.
* @returns {Promise.<DropboxResponse<number>, Error.<void>>}
*/
routes.filesGetMetadataV3 = function (arg) {
return this.request("files/get_metadata_v3", arg);
};
export { routes };
''')
assert result == expected
def test_route_with_version_number_conflict(self):
# type: () -> None
api, ns = self._get_api()
# Add a conflicting route
route3 = ApiRoute('get_metadata_v2', 1, None)
route3.set_attributes(None, None, Void(), Int32(), Void(), {})
ns.add_route(route3)
backend = JavascriptClientBackend(
target_folder_path='output',
args=['files', '-c', 'DropboxBase'])
with self.assertRaises(RuntimeError) as cm:
backend.generate(api)
self.assertTrue(str(cm.exception).startswith(
'There is a name conflict between'))
|
py | 1a3eface546d1e618455f7adafe45a198833b2df | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""lbaas version 2 api
Revision ID: lbaasv2
Revises: start_neutron_lbaas
Create Date: 2014-06-18 10:50:15.606420
"""
# revision identifiers, used by Alembic.
revision = 'lbaasv2'
down_revision = 'start_neutron_lbaas'
from alembic import op
import sqlalchemy as sa
listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="listener_protocolsv2")
pool_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="pool_protocolsv2")
sesssionpersistences_type = sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE",
name="sesssionpersistences_typev2")
lb_algorithms = sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP",
name="lb_algorithmsv2")
healthmonitors_type = sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmonitors_typev2")
def upgrade():
op.create_table(
u'lbaas_healthmonitors',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'type', healthmonitors_type, nullable=False),
sa.Column(u'delay', sa.Integer(), nullable=False),
sa.Column(u'timeout', sa.Integer(), nullable=False),
sa.Column(u'max_retries', sa.Integer(), nullable=False),
sa.Column(u'http_method', sa.String(16), nullable=True),
sa.Column(u'url_path', sa.String(255), nullable=True),
sa.Column(u'expected_codes', sa.String(64), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_pools',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', pool_protocols, nullable=False),
sa.Column(u'lb_algorithm', lb_algorithms, nullable=False),
sa.Column(u'healthmonitor_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.UniqueConstraint(u'healthmonitor_id'),
sa.ForeignKeyConstraint([u'healthmonitor_id'],
[u'lbaas_healthmonitors.id'])
)
op.create_table(
u'lbaas_sessionpersistences',
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'type', sesssionpersistences_type, nullable=False),
sa.Column(u'cookie_name', sa.String(1024), nullable=True),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.PrimaryKeyConstraint(u'pool_id')
)
op.create_table(
u'lbaas_members',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'subnet_id', sa.String(36), nullable=True),
sa.Column(u'address', sa.String(64), nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'weight', sa.Integer(), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port',
name=u'uniq_pool_address_port_v2')
)
op.create_table(
u'lbaas_loadbalancers',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'vip_port_id', sa.String(36), nullable=True),
sa.Column(u'vip_subnet_id', sa.String(36), nullable=False),
sa.Column(u'vip_address', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'vip_port_id'], [u'ports.id'],
name=u'fk_lbaas_loadbalancers_ports_id'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_listeners',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', listener_protocols, nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'connection_limit', sa.Integer(), nullable=True),
sa.Column(u'loadbalancer_id', sa.String(36), nullable=True),
sa.Column(u'default_pool_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id']),
sa.ForeignKeyConstraint([u'default_pool_id'],
[u'lbaas_pools.id']),
sa.UniqueConstraint(u'default_pool_id'),
sa.UniqueConstraint(u'loadbalancer_id', u'protocol_port',
name=u'uniq_loadbalancer_listener_port'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_loadbalancer_statistics',
sa.Column(u'loadbalancer_id', sa.String(36), nullable=False),
sa.Column(u'bytes_in', sa.BigInteger(), nullable=False),
sa.Column(u'bytes_out', sa.BigInteger(), nullable=False),
sa.Column(u'active_connections', sa.BigInteger(), nullable=False),
sa.Column(u'total_connections', sa.BigInteger(), nullable=False),
sa.PrimaryKeyConstraint(u'loadbalancer_id'),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id'])
)
def downgrade():
op.drop_table(u'lbaas_loadbalancer_statistics')
op.drop_table(u'lbaas_listeners')
listener_protocols.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_loadbalancers')
op.drop_table(u'lbaas_members')
op.drop_table(u'lbaas_sessionpersistences')
sesssionpersistences_type.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_pools')
pool_protocols.drop(op.get_bind(), checkfirst=False)
lb_algorithms.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_healthmonitors')
healthmonitors_type.drop(op.get_bind(), checkfirst=False)
|
py | 1a3efaeff1da4cbb370204d6ed03f3dcda3935bb | '''Write a Python program to get the effective group id, effective
user id, real group id, a list of supplemental group ids associated with the current process.
Note: Availability: Unix.'''
import os
print("\nEffective group id: ",os.getegid())
print("Effective user id: ",os.geteuid())
print("Real group id: ",os.getgid())
print("List of supplemental group ids: ",os.getgroups())
|
py | 1a3efb6fea6065e1a4198a0e7a461fc7573301c4 | import json
import os
import sys
import argparse
import shutil
import logging
import re
from zipfile import ZipFile
from google.cloud.storage import Blob, Bucket
from Tests.scripts.utils.log_util import install_logging
from Tests.Marketplace.marketplace_services import init_storage_client, Pack, \
load_json, store_successful_and_failed_packs_in_ci_artifacts, \
get_upload_data
from Tests.Marketplace.marketplace_constants import PackStatus, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \
PACKS_FULL_PATH, IGNORED_FILES
from Tests.Marketplace.upload_packs import extract_packs_artifacts, print_packs_summary, get_packs_summary
LATEST_ZIP_REGEX = re.compile(fr'^{GCPConfig.GCS_PUBLIC_URL}/[\w./-]+/content/packs/([A-Za-z0-9-_.]+/\d+\.\d+\.\d+/'
r'[A-Za-z0-9-_.]+\.zip$)')
def get_pack_names(target_packs: str) -> set:
"""
Retrieves the paths of all relevant packs (that aren't ignored)
Args:
target_packs (str): csv packs names or `All` for all available packs in content.
Returns: The list of paths of the packs
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.error("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def copy_index(index_folder_path: str, build_index_blob: Blob, build_index_generation: str, production_bucket: Bucket,
build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str):
""" Copies the build bucket index to the production bucket index path.
Args:
index_folder_path (str): index folder full path.
build_index_blob (Blob): google cloud storage object that represents build index.zip blob.
build_index_generation (str): downloaded build index generation.
production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where index is copied to.
build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where index is copied from.
storage_base_path (str): the path to upload the index to.
build_bucket_base_path (str): the path in the build bucket of the index.
"""
try:
build_index_blob.reload()
build_current_index_generation = build_index_blob.generation
# disabling caching for prod index blob
prod_index_storage_path = os.path.join(storage_base_path, f"{GCPConfig.INDEX_NAME}.zip")
prod_index_blob = production_bucket.blob(prod_index_storage_path)
prod_index_blob.cache_control = "no-cache,max-age=0"
prod_index_json_storage_path = os.path.join(storage_base_path, f"{GCPConfig.INDEX_NAME}.json")
prod_index_json_blob = production_bucket.blob(prod_index_json_storage_path)
prod_index_json_blob.cache_control = "no-cache,max-age=0"
if build_current_index_generation == build_index_generation:
copied_index = build_bucket.copy_blob(
blob=build_index_blob, destination_bucket=production_bucket, new_name=prod_index_storage_path
)
if copied_index.exists():
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.error("Failed copying index.zip from build index - blob does not exist.")
sys.exit(1)
copied_index_json_blob = build_bucket.blob(
os.path.join(build_bucket_base_path, f"{GCPConfig.INDEX_NAME}.json")
)
copied_index_json = build_bucket.copy_blob(
blob=copied_index_json_blob, destination_bucket=production_bucket, new_name=prod_index_json_storage_path
)
if copied_index_json.exists():
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.json to storage.")
else:
logging.error("Failed copying index.json from build index - blob does not exist.")
sys.exit(1)
else:
logging.error(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.error(f"Downloaded build index generation: {build_index_generation}")
logging.error(f"Current build index generation: {build_current_index_generation}")
sys.exit(1)
except Exception as e:
logging.exception(f"Failed copying {GCPConfig.INDEX_NAME}. Additional Info: {str(e)}")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
def upload_core_packs_config(production_bucket: Bucket, build_number: str, extract_destination_path: str,
build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str):
"""Uploads the corepacks.json file to the target bucket. This files contains all of the server's core packs, under
the key corepacks, and specifies which core packs should be upgraded upon XSOAR upgrade, under the key upgradeCorePacks.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): CircleCI build number.
extract_destination_path (str): Full path of folder to extract the corepacks file
build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is downloaded from.
storage_base_path (str): the path to upload the corepacks.json to.
build_bucket_base_path (str): the path in the build bucket of the corepacks.json.
"""
# download the corepacks.json stored in the build bucket to temp dir
build_corepacks_file_path = os.path.join(build_bucket_base_path, GCPConfig.CORE_PACK_FILE_NAME)
build_corepacks_blob = build_bucket.blob(build_corepacks_file_path)
if not build_corepacks_blob.exists():
logging.critical(f"{GCPConfig.CORE_PACK_FILE_NAME} is missing in {build_bucket.name} bucket, exiting...")
sys.exit(1)
temp_corepacks_file_path = os.path.join(extract_destination_path, GCPConfig.CORE_PACK_FILE_NAME)
build_corepacks_blob.download_to_filename(temp_corepacks_file_path)
corepacks_file = load_json(temp_corepacks_file_path)
# change the storage paths to the prod bucket
corepacks_list = corepacks_file.get('corePacks', [])
try:
corepacks_list = [os.path.join(GCPConfig.GCS_PUBLIC_URL, production_bucket.name, storage_base_path,
LATEST_ZIP_REGEX.findall(corepack_path)[0]) for corepack_path in corepacks_list]
except IndexError:
corepacks_list_str = '\n'.join(corepacks_list)
logging.exception(f"GCS paths in build bucket corepacks.json file are not of format: "
f"{GCPConfig.GCS_PUBLIC_URL}/<BUCKET_NAME>/.../content/packs/...\n"
f"List of build bucket corepacks paths:\n{corepacks_list_str}")
sys.exit(1)
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': corepacks_list,
'upgradeCorePacks': corepacks_file.get('upgradeCorePacks', []),
'buildNumber': build_number
}
# upload core pack json file to gcs
prod_corepacks_file_path = os.path.join(storage_base_path, GCPConfig.CORE_PACK_FILE_NAME)
prod_corepacks_blob = production_bucket.blob(prod_corepacks_file_path)
prod_corepacks_blob.upload_from_string(json.dumps(core_packs_data, indent=4))
logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.")
def download_and_extract_index(build_bucket: Bucket, extract_destination_path: str, build_bucket_base_path: str):
"""Downloads and extracts production and build indexes zip from cloud storage.
Args:
build_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where build index.zip is stored.
extract_destination_path (str): the full path of extract folder.
build_bucket_base_path (str): the path in the build bucket of the index.
Returns:
str: extracted build index folder full path.
Blob: google cloud storage object that represents prod index.zip blob.
Blob: google cloud storage object that represents build index.zip blob.
str: downloaded prod index generation.
str: downloaded build index generation.
"""
build_index_storage_path = os.path.join(build_bucket_base_path, f"{GCPConfig.INDEX_NAME}.zip")
download_build_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
build_index_blob = build_bucket.blob(build_index_storage_path)
build_index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not build_index_blob.exists():
logging.error(f"No build index was found in path: {build_index_storage_path}")
sys.exit(1)
build_index_blob.reload()
build_index_generation = build_index_blob.generation
build_index_blob.download_to_filename(download_build_index_path, if_generation_match=build_index_generation)
if os.path.exists(download_build_index_path):
with ZipFile(download_build_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(build_index_folder_path):
logging.error(f"Failed creating build {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_build_index_path)
logging.success(f"Finished downloading and extracting build {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return build_index_folder_path, build_index_blob, build_index_generation
else:
logging.error(f"Failed to download build {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def copy_id_set(production_bucket: Bucket, build_bucket: Bucket, storage_base_path: str, build_bucket_base_path: str):
""" Copies the id_set.json artifact from the build bucket to the production bucket.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where id_set is copied to.
build_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where id_set is copied from.
storage_base_path (str): the path to upload the id_set.json to.
build_bucket_base_path (str): the path in the build bucket of the id_set.json.
"""
build_id_set_path = os.path.join(os.path.dirname(build_bucket_base_path), 'id_set.json')
build_id_set_blob = build_bucket.blob(build_id_set_path)
if not build_id_set_blob.exists():
logging.error(f"id_set.json file does not exists in build bucket in path: {build_id_set_path}")
sys.exit(1)
prod_id_set_path = os.path.join(os.path.dirname(storage_base_path), 'id_set.json')
try:
copied_blob = build_bucket.copy_blob(
blob=build_id_set_blob, destination_bucket=production_bucket, new_name=prod_id_set_path
)
if not copied_blob.exists():
logging.error(f"Failed to upload id_set.json to {prod_id_set_path}")
sys.exit(1)
else:
logging.success("Finished uploading id_set.json to storage.")
except Exception as e:
logging.exception(f"Failed copying ID Set. Additional Info: {str(e)}")
sys.exit(1)
def verify_copy(successful_packs: list, pc_successful_packs_dict: dict):
""" Verify that all uploaded packs from Prepare were copied & verify that no packs were mistakenly copied
Args:
successful_packs: The packs that were copied successfully
pc_successful_packs_dict: The pack that were uploaded successfully in Prepare Content
"""
pc_successful_packs_names = {*pc_successful_packs_dict}
successful_packs_names = {pack.name for pack in successful_packs}
not_uploaded = [pack for pack in pc_successful_packs_names if pack not in successful_packs_names]
mistakenly_uploaded = [pack for pack in successful_packs_names if pack not in pc_successful_packs_names]
error_str = "Mismatch in Prepare Content successful packs and Upload successful packs\n"
error_str += f"Packs not copied: {', '.join(not_uploaded)}\n" if not_uploaded else ""
error_str += f"Packs mistakenly copied: {', '.join(mistakenly_uploaded)}\n" if mistakenly_uploaded else ""
assert not not_uploaded and not mistakenly_uploaded, error_str
def check_if_need_to_upload(pc_successful_packs_dict: dict, pc_failed_packs_dict: dict,
pc_successful_private_packs_dict: dict, pc_uploaded_images: dict):
""" If the three dicts are empty then no upload was done in Prepare Content step, so we need to skip uploading
Args:
pc_successful_packs_dict: The successful packs dict
pc_failed_packs_dict: The failed packs dict
pc_successful_private_packs_dict : The successful private packs dict
pc_uploaded_images: The image data dict
"""
if not pc_successful_packs_dict and not pc_failed_packs_dict and not pc_successful_private_packs_dict and not \
pc_uploaded_images:
logging.warning("Production bucket is updated with origin/master.")
logging.warning("Skipping Upload To Marketplace Storage Step.")
sys.exit(0)
def options_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-pb', '--production_bucket_name', help="Production bucket name", required=True)
parser.add_argument('-bb', '--build_bucket_name', help="CircleCI Build bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`"
" or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-c', '--circle_branch',
help="CircleCi branch of current build", required=True)
parser.add_argument('-pbp', '--production_base_path', help="Production base path of the directory to upload to.",
required=False)
# disable-secrets-detection-end
return parser.parse_args()
def main():
install_logging('Copy_and_Upload_Packs.log')
options = options_handler()
packs_artifacts_path = options.artifacts_path
extract_destination_path = options.extract_path
production_bucket_name = options.production_bucket_name
build_bucket_name = options.build_bucket_name
service_account = options.service_account
build_number = options.ci_build_number
circle_branch = options.circle_branch
production_base_path = options.production_base_path
target_packs = options.pack_names
# Google cloud storage client initialized
storage_client = init_storage_client(service_account)
production_bucket = storage_client.bucket(production_bucket_name)
build_bucket = storage_client.bucket(build_bucket_name)
# Initialize build and prod base paths
build_bucket_path = os.path.join(GCPConfig.BUILD_PATH_PREFIX, circle_branch, build_number)
build_bucket_base_path = os.path.join(build_bucket_path, GCPConfig.CONTENT_PACKS_PATH)
# Relevant when triggering test upload flow
if production_bucket_name:
GCPConfig.PRODUCTION_BUCKET = production_bucket_name
# Download and extract build index from build and prod buckets
build_index_folder_path, build_index_blob, build_index_generation = \
download_and_extract_index(build_bucket, extract_destination_path, build_bucket_base_path)
# Get the successful and failed packs file from Prepare Content step in Create Instances job if there are
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
pc_successful_packs_dict, pc_failed_packs_dict, pc_successful_private_packs_dict, \
pc_uploaded_images = get_upload_data(packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING)
logging.debug(f"Successful packs from Prepare Content: {pc_successful_packs_dict}")
logging.debug(f"Failed packs from Prepare Content: {pc_failed_packs_dict}")
logging.debug(f"Successful private packs from Prepare Content: {pc_successful_private_packs_dict}")
logging.debug(f"Images from Prepare Content: {pc_uploaded_images}")
# Check if needs to upload or not
check_if_need_to_upload(pc_successful_packs_dict, pc_failed_packs_dict, pc_successful_private_packs_dict,
pc_uploaded_images)
# Detect packs to upload
pack_names = get_pack_names(target_packs)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
# Starting iteration over packs
for pack in packs_list:
# Indicates whether a pack has failed to upload on Prepare Content step
task_status, pack_status = pack.is_failed_to_upload(pc_failed_packs_dict)
if task_status:
pack.status = pack_status
pack.cleanup()
continue
task_status = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.name
pack.cleanup()
continue
task_status = pack.copy_integration_images(
production_bucket, build_bucket, pc_uploaded_images, production_base_path, build_bucket_base_path)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.copy_author_image(
production_bucket, build_bucket, pc_uploaded_images, production_base_path, build_bucket_base_path)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, skipped_pack_uploading = pack.copy_and_upload_to_storage(
production_bucket, build_bucket, pc_successful_packs_dict, production_base_path, build_bucket_base_path)
if skipped_pack_uploading:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(production_bucket, build_number, extract_destination_path, build_bucket,
production_base_path, build_bucket_base_path)
# finished iteration over content packs
copy_index(build_index_folder_path, build_index_blob, build_index_generation, production_bucket,
build_bucket, production_base_path, build_bucket_base_path)
# upload id_set.json to bucket
copy_id_set(production_bucket, build_bucket, production_base_path, build_bucket_base_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE, successful_packs, failed_packs,
list(pc_successful_private_packs_dict)
)
# verify that the successful from Prepare content and are the ones that were copied
verify_copy(successful_packs, pc_successful_packs_dict)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs)
if __name__ == '__main__':
main()
|
py | 1a3efecd74780eea04b23c15fb0f682ae555fd52 | from django.apps import AppConfig
class GamesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'games'
|
py | 1a3eff2ce57cf02df9b0cf49edc9c0d692c40fad | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import mock
import pytest
from datadog_checks.base.utils.common import get_docker_hostname
from datadog_checks.dev.kube_port_forward import port_forward
from datadog_checks.dev.terraform import terraform_run
from .common import ADDL_AGENT_METRICS, AGENT_DEFAULT_METRICS, OPERATOR_AWS_METRICS, OPERATOR_METRICS
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
HERE = os.path.dirname(os.path.abspath(__file__))
HOST = get_docker_hostname()
AGENT_PORT = 9090
OPERATOR_PORT = 6942
AGENT_URL = "http://{}:{}/metrics".format(HOST, AGENT_PORT)
OPERATOR_URL = "http://{}:{}/metrics".format(HOST, OPERATOR_PORT)
PORTS = [AGENT_PORT, OPERATOR_PORT]
@pytest.fixture(scope='session')
def dd_environment():
with terraform_run(os.path.join(HERE, 'terraform')) as outputs:
kubeconfig = outputs['kubeconfig']['value']
with ExitStack() as stack:
ip_ports = [
stack.enter_context(port_forward(kubeconfig, 'cilium', 'cilium-operator', port)) for port in PORTS
]
instances = {
'instances': [
{
'agent_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[0]),
'metrics': ADDL_AGENT_METRICS + AGENT_DEFAULT_METRICS,
},
{
'operator_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[1]),
'metrics': OPERATOR_METRICS + OPERATOR_AWS_METRICS,
},
]
}
yield instances
@pytest.fixture(scope="session")
def agent_instance():
return {'agent_endpoint': AGENT_URL, 'tags': ['pod_test']}
@pytest.fixture
def operator_instance():
return {'operator_endpoint': OPERATOR_URL, 'tags': ['operator_test']}
@pytest.fixture()
def mock_agent_data():
f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'agent_metrics.txt')
with open(f_name, 'r') as f:
text_data = f.read()
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"}
),
):
yield
@pytest.fixture()
def mock_operator_data():
f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'operator_metrics.txt')
with open(f_name, 'r') as f:
text_data = f.read()
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"}
),
):
yield
|
py | 1a3f013d413b62c5dc7c822bce1a2427c50342ee | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'asd.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(754, 602)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setMaximumSize(QtCore.QSize(35, 16777215))
font = QtGui.QFont()
font.setFamily("NanumGothic")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_6.addWidget(self.label_3)
self.domain_edit = QtWidgets.QLineEdit(self.centralwidget)
self.domain_edit.setMaximumSize(QtCore.QSize(200, 16777215))
self.domain_edit.setObjectName("domain_edit")
self.horizontalLayout_6.addWidget(self.domain_edit)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(20)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayout_6.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NanumGothic")
font.setPointSize(11)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.horizontalLayout_6.addWidget(self.label_2)
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.search_bar = QtWidgets.QLineEdit(self.centralwidget)
self.search_bar.setMinimumSize(QtCore.QSize(0, 30))
self.search_bar.setObjectName("search_bar")
self.horizontalLayout_3.addWidget(self.search_bar)
self.search_btn = QtWidgets.QPushButton(self.centralwidget)
self.search_btn.setMinimumSize(QtCore.QSize(120, 0))
self.search_btn.setObjectName("search_btn")
self.horizontalLayout_3.addWidget(self.search_btn)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setMinimumSize(QtCore.QSize(20, 0))
self.label_4.setText("")
self.label_4.setObjectName("label_4")
self.horizontalLayout.addWidget(self.label_4)
self.checkBox = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox.setObjectName("checkBox")
self.horizontalLayout.addWidget(self.checkBox)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.toon_list = QtWidgets.QListWidget(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
self.toon_list.setFont(font)
self.toon_list.setObjectName("toon_list")
self.horizontalLayout_2.addWidget(self.toon_list)
self.toon_view = QtWidgets.QListWidget(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
self.toon_view.setFont(font)
self.toon_view.setObjectName("toon_view")
self.horizontalLayout_2.addWidget(self.toon_view)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout_5.addWidget(self.progressBar)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.pdf_btn = QtWidgets.QRadioButton(self.centralwidget)
self.pdf_btn.setChecked(True)
self.pdf_btn.setObjectName("pdf_btn")
self.verticalLayout.addWidget(self.pdf_btn)
self.img_btn = QtWidgets.QRadioButton(self.centralwidget)
self.img_btn.setObjectName("img_btn")
self.verticalLayout.addWidget(self.img_btn)
self.horizontalLayout_5.addLayout(self.verticalLayout)
self.down_btn = QtWidgets.QCommandLinkButton(self.centralwidget)
self.down_btn.setObjectName("down_btn")
self.horizontalLayout_5.addWidget(self.down_btn)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.search_btn.clicked.connect(MainWindow.search)
self.down_btn.clicked.connect(MainWindow.crawling)
self.search_bar.returnPressed.connect(self.search_btn.click)
self.checkBox.stateChanged['int'].connect(MainWindow.all_checking)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.search_btn, self.toon_list)
MainWindow.setTabOrder(self.toon_list, self.down_btn)
MainWindow.setTabOrder(self.down_btn, self.domain_edit)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "툰코 만화 수집기 Ver 0.35"))
self.label_3.setText(_translate("MainWindow", "주소:"))
self.domain_edit.setText(_translate("MainWindow", "https://toonkor.show"))
self.label.setText(_translate("MainWindow", "Toonkor Collector"))
self.label_2.setText(_translate("MainWindow", "Author: IML\n"
"email: [email protected]\n"
"git: github.com\n/iml1111/Toonkor_Collector"))
self.search_btn.setText(_translate("MainWindow", "검색"))
self.checkBox.setText(_translate("MainWindow", "모두 선택"))
self.pdf_btn.setText(_translate("MainWindow", "PDF 파일"))
self.img_btn.setText(_translate("MainWindow", "이미지 파일"))
self.down_btn.setText(_translate("MainWindow", "다운로드 시작"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
py | 1a3f01726eab62eda3c1a048e6a3150d3d4fffbd | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from TestCase import TestCase
from WidgetTest import WidgetTest
from MenuTest import MenuTest
from SplitContainerTest import SplitContainerTest
from WindowTest import WindowTest
from ListContainerTest import ListContainerTest
from EventSignalCombinerTest import EventSignalCombinerTest
from FrameTest import FrameTest
from NameGadgetTest import NameGadgetTest
from LinearContainerTest import LinearContainerTest
from NodeGadgetTest import NodeGadgetTest
from GadgetTest import GadgetTest
from TabbedContainerTest import TabbedContainerTest
from NodeGraphTest import NodeGraphTest
from WidgetSignalTest import WidgetSignalTest
from EventLoopTest import EventLoopTest
from TextWidgetTest import TextWidgetTest
from BoolWidgetTest import BoolWidgetTest
from ImageTest import ImageTest
from ButtonTest import ButtonTest
from CollapsibleTest import CollapsibleTest
from ImageGadgetTest import ImageGadgetTest
from StandardNodeGadgetTest import StandardNodeGadgetTest
from ColorSwatchTest import ColorSwatchTest
from VariantTest import VariantTest
from GridContainerTest import GridContainerTest
from NoduleTest import NoduleTest
from ProgressBarTest import ProgressBarTest
from ContainerWidgetTest import ContainerWidgetTest
from SelectionMenuTest import SelectionMenuTest
from StandardStyleTest import StandardStyleTest
from EditorWidgetTest import EditorWidgetTest
from NumericSliderTest import NumericSliderTest
from PlugValueWidgetTest import PlugValueWidgetTest
from PathListingWidgetTest import PathListingWidgetTest
from MultiLineTextWidgetTest import MultiLineTextWidgetTest
from LabelTest import LabelTest
from ScrolledContainerTest import ScrolledContainerTest
from NodeEditorTest import NodeEditorTest
from ScriptWindowTest import ScriptWindowTest
from CompoundEditorTest import CompoundEditorTest
from MultiSelectionMenuTest import MultiSelectionMenuTest
from StandardGraphLayoutTest import StandardGraphLayoutTest
from StandardNodeUITest import StandardNodeUITest
from ViewTest import ViewTest
from SliderTest import SliderTest
from NumericPlugValueWidgetTest import NumericPlugValueWidgetTest
from CompoundNumericPlugValueWidgetTest import CompoundNumericPlugValueWidgetTest
from NameLabelTest import NameLabelTest
from NameWidgetTest import NameWidgetTest
from GLWidgetTest import GLWidgetTest
from BookmarksTest import BookmarksTest
from PlaybackTest import PlaybackTest
from SpacerGadgetTest import SpacerGadgetTest
from BoxUITest import BoxUITest
from ConnectionGadgetTest import ConnectionGadgetTest
from AuxiliaryConnectionsGadgetTest import AuxiliaryConnectionsGadgetTest
from MessageWidgetTest import MessageWidgetTest
from ModuleTest import ModuleTest
from PlugLayoutTest import PlugLayoutTest
from ViewportGadgetTest import ViewportGadgetTest
from VectorDataWidgetTest import VectorDataWidgetTest
from DotNodeGadgetTest import DotNodeGadgetTest
from DocumentationTest import DocumentationTest
from LazyMethodTest import LazyMethodTest
from ReferenceUITest import ReferenceUITest
from CompoundDataPlugValueWidgetTest import CompoundDataPlugValueWidgetTest
from GraphGadgetTest import GraphGadgetTest
from MenuBarTest import MenuBarTest
from GadgetWidgetTest import GadgetWidgetTest
from CompoundNoduleTest import CompoundNoduleTest
from SwitchNodeGadgetTest import SwitchNodeGadgetTest
from NoduleLayoutTest import NoduleLayoutTest
from ErrorDialogueTest import ErrorDialogueTest
from WidgetAlgoTest import WidgetAlgoTest
from BackupsTest import BackupsTest
if __name__ == "__main__":
unittest.main()
|
py | 1a3f020ab405dabe5f2af10cea99f1b96fd11799 | import logging
from django.apps import apps
from django.db.utils import OperationalError, ProgrammingError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.class_mixins import AppsModuleLoaderMixin
from mayan.apps.common.classes import PropertyHelper
from mayan.apps.templating.classes import Template
from .exceptions import WorkflowStateActionError
__all__ = ('WorkflowAction',)
logger = logging.getLogger(name=__name__)
class DocumentStateHelper(PropertyHelper):
@staticmethod
@property
def constructor(*args, **kwargs):
return DocumentStateHelper(*args, **kwargs)
def get_result(self, name):
return self.instance.workflows.get(workflow__internal_name=name)
class WorkflowActionMetaclass(type):
_registry = {}
def __new__(mcs, name, bases, attrs):
new_class = super(WorkflowActionMetaclass, mcs).__new__(
mcs, name, bases, attrs
)
if not new_class.__module__ == __name__:
mcs._registry[
'{}.{}'.format(new_class.__module__, name)
] = new_class
return new_class
class WorkflowActionBase(AppsModuleLoaderMixin):
fields = ()
class WorkflowAction(
six.with_metaclass(WorkflowActionMetaclass, WorkflowActionBase)
):
_loader_module_name = 'workflow_actions'
previous_dotted_paths = ()
@classmethod
def load_modules(cls):
super().load_modules()
for action_class in WorkflowAction.get_all():
action_class.migrate()
@classmethod
def clean(cls, request, form_data=None):
return form_data
@classmethod
def get(cls, name):
return cls._registry[name]
@classmethod
def get_all(cls):
return sorted(cls._registry.values(), key=lambda x: x.label)
@classmethod
def get_choices(cls):
apps_name_map = {
app.name: app for app in apps.get_app_configs()
}
# Match each workflow action to an app
apps_workflow_action_map = {}
for klass in WorkflowAction.get_all():
for app_name, app in apps_name_map.items():
if klass.__module__.startswith(app_name):
apps_workflow_action_map.setdefault(app, [])
apps_workflow_action_map[app].append((klass.id(), klass.label))
result = [
(app.verbose_name, workflow_actions) for app, workflow_actions in apps_workflow_action_map.items()
]
# Sort by app, then by workflow action
return sorted(result, key=lambda x: (x[0], x[1]))
@classmethod
def id(cls):
return '{}.{}'.format(cls.__module__, cls.__name__)
@classmethod
def migrate(cls):
WorkflowStateAction = apps.get_model(
app_label='document_states', model_name='WorkflowStateAction'
)
for previous_dotted_path in cls.previous_dotted_paths:
try:
WorkflowStateAction.objects.filter(
action_path=previous_dotted_path
).update(action_path=cls.id())
except (OperationalError, ProgrammingError):
# Ignore errors during the database migration and
# quit further attempts.
return
def __init__(self, form_data=None):
self.form_data = form_data
def get_form_schema(self, workflow_state, request=None):
result = {
'fields': self.fields or {},
'media': getattr(self, 'media', {}),
'widgets': getattr(self, 'widgets', {}),
}
if hasattr(self, 'field_order'):
result['field_order'] = self.field_order
return result
def render_field(self, field_name, context):
try:
result = Template(
template_string=self.form_data.get(field_name, '')
).render(
context=context
)
except Exception as exception:
raise WorkflowStateActionError(
_('%(field_name)s template error: %(exception)s') % {
'field_name': field_name, 'exception': exception
}
)
logger.debug('%s template result: %s', field_name, result)
return result
|
py | 1a3f02f06051179e57c0a6fe730b8ce2e37bc952 | # Gamma is a discrete RandomVariable that represents
# the instantaneous values of a model parameter
# to be embedded into continuous space
# parameters:
#
# stencil : list of values that the parameter takes
# alphas: probabilities of taking each value.
# For example, stencil = [2, 3] and alphas=[0.2, 0.8]
# means the random variable takes value 3 with prob 0.2
# and the value 3 with prob 0.8.
import numpy as np
import math
import random
class Gamma():
def __init__(self, stencil, alphas):
self.stencil = stencil
self.alphas = alphas
assert(len(stencil)>0)
assert(len(alphas)==len(stencil))
assert(sum(alphas)<=1.0+1e-6)#all probabilities should sum to 1
#instantaneous and mean values
self.value = self.stencil[0]
self.mean_value=sum([(stencil[i]*alphas[i]) for i in range(len(stencil))])
#update and return the instantaneous value:
def get(self):
v = np.random.choice(self.stencil,p=self.alphas)
self.value=v
return v
def Test():
gamma = Gamma([2,3,4],[0.4,0.4,0.2])
for i in range(20):
print (gamma.get())
print("Mean=", gamma.mean_value)
|
py | 1a3f04bbcb2bde5952880a7867014765dace30d3 | import tempfile
from typing import Optional, Union
class ModuleContent(object):
def __init__(self, dir: Optional[Union[tempfile.TemporaryDirectory, str]], next_url=None, exception=None) -> None:
self.dir = dir.replace('//', '/') if dir else None
self.next_url = next_url
def loaded(self) -> bool:
"""
Indicates whether or not the module content could be loaded. If False is returned, `path()` will return None.
"""
return self.dir is not None
def path(self) -> Optional[str]:
"""
Returns the directory path containing module resources.
"""
if isinstance(self.dir, tempfile.TemporaryDirectory):
return self.dir.name
else:
return self.dir
def cleanup(self):
"""
Clean up any temporary resources, if applicable.
"""
if isinstance(self.dir, tempfile.TemporaryDirectory):
self.dir.cleanup()
def __repr__(self) -> str:
return self.path()
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup() |
py | 1a3f05be11c507e1c13031e7e2af5f23a25fd24f | from typing import Any
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
try:
from apex import amp
except ImportError:
APEX_AVAILABLE = False
else:
APEX_AVAILABLE = True
class ModelHooks(torch.nn.Module):
# TODO: remove in v0.9.0
def on_sanity_check_start(self):
"""
Called before starting evaluation.
Warning:
Deprecated. Will be removed in v0.9.0.
"""
def on_train_start(self) -> None:
"""
Called at the beginning of training before sanity check.
"""
# do something at the start of training
def on_train_end(self) -> None:
"""
Called at the end of training before logger experiment is closed.
"""
# do something at the end of training
def on_batch_start(self, batch: Any) -> None:
"""
Called in the training loop before anything happens for that batch.
If you return -1 here, you will skip training for the rest of the current epoch.
Args:
batch: The batched data as it is returned by the training DataLoader.
"""
# do something when the batch starts
def on_batch_end(self) -> None:
"""
Called in the training loop after the batch.
"""
# do something when the batch ends
def on_epoch_start(self) -> None:
"""
Called in the training loop at the very beginning of the epoch.
"""
# do something when the epoch starts
def on_epoch_end(self) -> None:
"""
Called in the training loop at the very end of the epoch.
"""
# do something when the epoch ends
def on_pre_performance_check(self) -> None:
"""
Called at the very beginning of the validation loop.
"""
# do something before validation starts
def on_post_performance_check(self) -> None:
"""
Called at the very end of the validation loop.
"""
# do something before validation end
def on_before_zero_grad(self, optimizer: Optimizer) -> None:
"""
Called after optimizer.step() and before optimizer.zero_grad().
Called in the training loop after taking an optimizer step and before zeroing grads.
Good place to inspect weight information with weights updated.
This is where it is called::
for optimizer in optimizers:
optimizer.step()
model.on_before_zero_grad(optimizer) # < ---- called here
optimizer.zero_grad
Args:
optimizer: The optimizer for which grads should be zeroed.
"""
# do something with the optimizer or inspect it.
def on_after_backward(self) -> None:
"""
Called in the training loop after loss.backward() and before optimizers do anything.
This is the ideal place to inspect or log gradient information.
Example::
def on_after_backward(self):
# example to inspect gradient information in tensorboard
if self.trainer.global_step % 25 == 0: # don't make the tf file huge
params = self.state_dict()
for k, v in params.items():
grads = v
name = k
self.logger.experiment.add_histogram(tag=name, values=grads,
global_step=self.trainer.global_step)
"""
def backward(self, trainer, loss: Tensor, optimizer: Optimizer, optimizer_idx: int) -> None:
"""
Override backward with your own implementation if you need to.
Args:
trainer: Pointer to the trainer
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, use_amp, loss, optimizer):
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
"""
if trainer.precision == 16:
# .backward is not special on 16-bit with TPUs
if trainer.on_tpu:
return
if self.trainer.use_native_amp:
self.trainer.scaler.scale(loss).backward()
# TODO: remove in v0.8.0
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
|
py | 1a3f065f810854831b7cdb0bfea48fe1a8477b1a | '''
------------------------------------------------------------------------------
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
------------------------------------------------------------------------------
'''
import os
import glob
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
__all__ = [ os.path.basename(f)[:-3] for f in modules if not os.path.basename(f).startswith('_')] |
py | 1a3f067bdc4d0c7ffbb58539f9e8d53074efc97d | #
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from itertools import chain
from pandas import DatetimeIndex
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay,
Christmas,
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayBefore1993,
USBlackFridayInOrAfter1993,
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
)
# Useful resources for making changes to this file:
# http://www.nyse.com/pdfs/closings.pdf
# http://www.stevemorse.org/jcal/whendid.html
class XNYSExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the New York Stock Exchange (XNYS).
Open Time: 9:31 AM, US/Eastern
Close Time: 4:00 PM, US/Eastern
Regularly-Observed Holidays:
- New Years Day (observed on monday when Jan 1 is a Sunday)
- Martin Luther King Jr. Day (3rd Monday in January, only after 1998)
- Washington's Birthday (aka President's Day, 3rd Monday in February)
- Good Friday (two days before Easter Sunday)
- Memorial Day (last Monday in May)
- Independence Day (observed on the nearest weekday to July 4th)
- Labor Day (first Monday in September)
- Thanksgiving (fourth Thursday in November)
- Christmas (observed on nearest weekday to December 25)
NOTE: The NYSE does not observe the following US Federal Holidays:
- Columbus Day
- Veterans Day
Regularly-Observed Early Closes:
- July 3rd (Mondays, Tuesdays, and Thursdays, 1995 onward)
- July 5th (Fridays, 1995 onward, except 2013)
- Christmas Eve (except on Fridays, when the exchange is closed entirely)
- Day After Thanksgiving (aka Black Friday, observed from 1992 onward)
NOTE: Until 1993, the standard early close time for the NYSE was 2:00 PM.
From 1993 onward, it has been 1:00 PM.
Additional Irregularities:
- Closed from 9/11/2001 to 9/16/2001 due to terrorist attacks in NYC.
- Closed on 10/29/2012 and 10/30/2012 due to Hurricane Sandy.
- Closed on 4/27/1994 due to Richard Nixon's death.
- Closed on 6/11/2004 due to Ronald Reagan's death.
- Closed on 1/2/2007 due to Gerald Ford's death.
- Closed at 1:00 PM on Wednesday, July 3rd, 2013
- Closed at 1:00 PM on Friday, December 31, 1999
- Closed at 1:00 PM on Friday, December 26, 1997
- Closed at 1:00 PM on Friday, December 26, 2003
NOTE: The exchange was **not** closed early on Friday December 26, 2008,
nor was it closed on Friday December 26, 2014. The next Thursday Christmas
will be in 2025. If someone is still maintaining this code in 2025, then
we've done alright...and we should check if it's a half day.
"""
regular_early_close = time(13)
name = 'XNYS'
tz = timezone('US/Eastern')
open_times = (
(None, time(9, 31)),
)
close_times = (
(None, time(16)),
)
@property
def regular_holidays(self):
return HolidayCalendar([
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
))
@property
def special_closes(self):
return [
(self.regular_early_close, HolidayCalendar([
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayInOrAfter1993,
ChristmasEveInOrAfter1993
])),
(time(14), HolidayCalendar([
ChristmasEveBefore1993,
USBlackFridayBefore1993,
])),
]
@property
def special_closes_adhoc(self):
return [
(
self.regular_early_close,
DatetimeIndex(
[
'1997-12-26',
'1999-12-31',
'2003-12-26',
'2013-07-03',
],
tz='UTC',
)
)
]
|
py | 1a3f077dfa95888fd37db0d86d6524a8aeb05f72 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.reorgyolo import ReorgYoloOp
from mo.front.common.extractors.utils import layout_attrs
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
'reorg': {'type': 'ReorgYolo', 'kind': 'op'},
'node_3': {'type': 'Identity', 'kind': 'op'},
'op_output': { 'kind': 'op', 'op': 'Result'}
}
def calculate_reorgyolo_output(input, stride):
output = np.full_like(input, -1, dtype=np.int64)
output[0] = input[0]
output[1] = input[1] * stride ** 2
output[2] = np.round(input[2] / stride)
output[3] = np.round(input[3] / stride)
return output
class TestReorgYOLO(unittest.TestCase):
def test_reorgyolo_infer(self):
graph = build_graph(nodes_attributes,
[('node_1', 'reorg'),
('reorg', 'node_3'),
('node_3', 'op_output')
],
{'node_3': {'shape': None},
'node_1': {'shape': np.array([1, 3, 227, 227])},
'reorg': {'stride': 2,
**layout_attrs()}
})
reorg_node = Node(graph, 'reorg')
ReorgYoloOp.reorgyolo_infer(reorg_node)
exp_shape = calculate_reorgyolo_output(np.array([1, 3, 227, 227]), 2)
res_shape = graph.node['node_3']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
|
py | 1a3f07fcb340ee16cc4575b738f6fd0f2da6a73d | import numpy
import theano
from theano import tensor
from theano.tests.breakpoint import PdbBreakpoint
from theano.tests import unittest_tools as utt
from theano.tensor.tests import test_basic
import theano.sandbox.gpuarray
from .. import basic_ops
from ..type import GpuArrayType, gpuarray_shared_constructor, get_context
from ..basic_ops import GpuAlloc, GpuReshape, GpuFromHost, host_from_gpu
from ..elemwise import GpuCAReduceCuda, GpuCAReduceCPY, GpuElemwise
from ..subtensor import GpuSubtensor
from .config import mode_with_gpu, test_ctx_name
def test_local_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
assert isinstance(a_op[0].inputs[0].type, GpuArrayType)
def test_local_remove_all_assert():
x = theano.tensor.fmatrix()
a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())
# By default `unsafe` should not be there
f = theano.function([x], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
# Put `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 0
# Remove `unsafe`
f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
topo = f.maker.fgraph.toposort()
a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
assert len(a_op) == 1
def test_local_gpu_contiguous_gpu_contiguous():
a = tensor.fmatrix()
o1 = basic_ops.gpu_contiguous(a)
o2 = basic_ops.gpu_contiguous(o1)
f1 = theano.function([a], o1, mode=mode_with_gpu)
f2 = theano.function([a], o2, mode=mode_with_gpu)
assert 1 == len([node for node in f1.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
assert 1 == len([node for node in f2.maker.fgraph.toposort()
if isinstance(node.op, basic_ops.GpuContiguous)])
def test_flatten():
m = theano.tensor.fmatrix()
f = theano.function([m], m.flatten(), mode=mode_with_gpu)
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val.flatten())
assert res.shape == val.flatten().shape
assert GpuReshape in [type(node.op)
for node in f.maker.fgraph.toposort()]
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val.flatten())
assert res.shape == val.flatten().shape
assert GpuReshape in [type(node.op)
for node in f.maker.fgraph.toposort()]
f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val)
assert res.shape == val.shape
assert GpuReshape in [type(node.op)
for node in f.maker.fgraph.toposort()]
m = theano.tensor.tensor3()
f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
val = numpy.random.rand(10, 11, 12).astype("float32")
res = f(val)
utt.assert_allclose(res, val.reshape(10, -1))
assert res.shape == val.reshape(10, -1).shape
assert GpuReshape in [type(node.op)
for node in f.maker.fgraph.toposort()]
def test_reduce():
kind = get_context(test_ctx_name).kind
for method, param in [('sum', dict(acc_dtype='float32')),
('prod', dict(acc_dtype='float32')),
('max', {}), ('min', {})]:
m = theano.tensor.fmatrix()
f = theano.function([m], getattr(m, method)(axis=0,
**param),
mode=mode_with_gpu)
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, getattr(val, method)(axis=0))
assert res.shape == (11,)
topo = f.maker.fgraph.toposort()
ops = [type(node.op) for node in topo]
if kind == 'opencl' and method in ["max", "min"]:
assert not(GpuCAReduceCuda in ops or GpuCAReduceCPY in ops)
else:
assert GpuCAReduceCuda in ops or GpuCAReduceCPY in ops
def test_local_gpualloc_memset_0():
i = theano.tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = GpuAlloc(test_ctx_name)(z, i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = GpuAlloc(test_ctx_name)(o, i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = GpuAlloc(test_ctx_name)(ones, i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
def test_rebroadcast():
d = numpy.random.rand(10, 10).astype('float32')
v = theano.tensor.fmatrix()
up = tensor.unbroadcast(v.sum().dimshuffle('x', 'x'), 0, 1)
f = theano.function([v], [up], mode=mode_with_gpu)
f(d)
topo = f.maker.fgraph.toposort()
rebrs = [node for node in topo if isinstance(node.op, tensor.Rebroadcast)]
assert len(rebrs) == 1
rebr = rebrs[0]
assert isinstance(rebr.inputs[0].type, GpuArrayType)
assert isinstance(rebr.outputs[0].type, GpuArrayType)
class TestSpecifyShape(test_basic.TestSpecifyShape):
mode = mode_with_gpu
input_type = GpuArrayType
def test_print_op():
""" Test that print ops don't block gpu optimization"""
b = tensor.fmatrix()
f = theano.function([b], theano.printing.Print()(b) * 2,
mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, GpuFromHost)
assert isinstance(topo[1].op, theano.printing.Print)
assert isinstance(topo[2].op, GpuElemwise)
assert topo[3].op == host_from_gpu
f(numpy.random.random((5, 5)).astype('float32'))
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, GpuElemwise)
assert topo[-1].op == host_from_gpu
def test_local_gpu_elemwise_careduce():
x = theano.tensor.matrix()
o = (x * x).sum()
f = theano.function([x], o, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert topo[1].op.pre_scalar_op == theano.scalar.sqr
f(numpy.random.rand(3, 4).astype(theano.config.floatX))
def test_local_gpu_subtensor():
# Test shared forced on CPU.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, GpuSubtensor) for node in topo])
# Test graph input.
t = tensor.fmatrix()
f = theano.function([t], t[3:4], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, GpuSubtensor) for node in topo])
# Test multiple use of the input
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t + 1], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
# Test multiple use of the input + input as output
# We want the subtensor to be on the GPU to prevent multiple transfer.
t = tensor.fmatrix()
f = theano.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert not any([type(node.op) is tensor.Subtensor for node in topo])
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
# Test shared forced on CPU end we do computation on the output of
# the subtensor.
t = tensor._shared(numpy.zeros(20, "float32"))
f = theano.function([], t[3:4] + 1, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([type(node.op) is tensor.Subtensor for node in topo])
assert not any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuElemwise) for node in topo])
def test_local_gpu_elemwise():
"""
Test local_gpu_elemwise when there is a dtype upcastable to float32
"""
a = tensor.bmatrix()
b = tensor.fmatrix()
c = tensor.fmatrix()
a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
c_v = (numpy.random.rand(4, 5) * 10).astype("float32")
# Due to optimization order, this composite is created when all
# the op are on the gpu.
f = theano.function([a, b, c], a + b + c, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
# Now test with the composite already on the cpu before we move it
# to the gpu
a_s = theano.scalar.int8()
b_s = theano.scalar.float32()
c_s = theano.scalar.float32()
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
out_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)
return # Not yet implemeted
# Test multiple output
a_s = theano.scalar.float32()
a = tensor.fmatrix()
from theano.scalar.basic import identity
out_s = theano.scalar.Composite([a_s, b_s, c_s],
[identity(a_s), identity(c_s), identity(b_s)])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v)
utt.assert_allclose(out[1], c_v)
utt.assert_allclose(out[2], b_v)
# Test multiple output
out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * b_s])
outs_op = tensor.Elemwise(out_s)
f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
out = f(a_v, b_v, c_v)
utt.assert_allclose(out[0], a_v + b_v)
utt.assert_allclose(out[1], a_v * c_v)
# Test non-contiguous input
c = gpuarray_shared_constructor(numpy.asarray(c_v, dtype='float32'))
f = theano.function([a, b], outs_op(a[::2], b[::2], c[::2]),
mode=mode_with_gpu)
out = f(a_v, b_v)
utt.assert_allclose(out[0], a_v[::2] + b_v[::2])
utt.assert_allclose(out[1], a_v[::2] * c_v[::2])
|
py | 1a3f091da65d0d2e7913a72d49db0bfeb1b271b1 | import math
import torch
import torch.nn as nn
from .utils import to_cpu
# This new loss function is based on https://github.com/ultralytics/yolov3/blob/master/utils/loss.py
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
# convex (smallest enclosing box) width
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * \
torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(QFocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
def compute_loss(predictions, targets, model): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = build_targets(predictions, targets, model) # targets
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(
pos_weight=torch.tensor([1.0], device=device))
BCEobj = nn.BCEWithLogitsLoss(
pos_weight=torch.tensor([1.0], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# Focal loss
gamma = 0 # focal loss gamma
if gamma > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, gamma), FocalLoss(BCEobj, gamma)
# Losses
# layer index, layer predictions
for layer_index, layer_predictions in enumerate(predictions):
# image, anchor, gridy, gridx
b, anchor, grid_j, grid_i = indices[layer_index]
tobj = torch.zeros_like(layer_predictions[..., 0], device=device) # target obj
num_targets = b.shape[0] # number of targets
if num_targets:
# prediction subset corresponding to targets
ps = layer_predictions[b, anchor, grid_j, grid_i]
# Regression
pxy = ps[:, :2].sigmoid()
pwh = torch.exp(ps[:, 2:4]) * anchors[layer_index]
pbox = torch.cat((pxy, pwh), 1) # predicted box
# iou(prediction, target)
iou = bbox_iou(pbox.T, tbox[layer_index], x1y1x2y2=False, CIoU=True)
lbox += (1.0 - iou).mean() # iou loss
model.gr = 1
# Objectness
tobj[b, anchor, grid_j, grid_i] = \
(1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
# Classification
if ps.size(1) - 5 > 1:
t = torch.full_like(ps[:, 5:], cn, device=device) # targets
t[range(num_targets), tcls[layer_index]] = cp
lcls += BCEcls(ps[:, 5:], t) # BCE
lobj += BCEobj(layer_predictions[..., 4], tobj) # obj loss
lbox *= 0.05 * (3. / 2)
lobj *= (3. / 2)
lcls *= 0.31
batch_size = tobj.shape[0] # batch size
loss = lbox + lobj + lcls
return loss * batch_size, to_cpu(torch.cat((lbox, lobj, lcls, loss)))
def build_targets(p, targets, model):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = 3, targets.shape[0] # number of anchors, targets #TODO
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
# append anchor indices
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2)
g = 0.5 # bias
off = torch.tensor([[0, 0]], device=targets.device).float() * g # offsets
for i, yolo_layer in enumerate(model.yolo_layers):
anchors = yolo_layer.anchors / yolo_layer.stride
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < 4 # compare #TODO
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j),))
t = t.repeat((off.shape[0], 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
# image, anchor, grid indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1)))
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
|
py | 1a3f091f988a37d0fe15cfc213816b7ee111dae4 | from flask import Flask, render_template, request, redirect, jsonify
from flask import make_response, url_for, flash
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import random
import string
import httplib2
import json
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
# Connect to Database and create database session
engine = create_engine('postgresql://catalog:postgres@localhost/catalog')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = getUserID(data["email"])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;" \
"-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("You are now logged in as %s" % login_session['username'])
print("Done!")
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
try:
user = session.query(User).filter_by(id=user_id).one()
return user
except:
return None
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'))
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps(
'Failed to revoke token for given user.'))
response.headers['Content-Type'] = 'application/json'
return response
# JSON APIs to view Information
@app.route('/category/<int:category_id>/item/JSON')
def categoryItemJSON(category_id):
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).filter_by(
category_id=category_id).all()
return jsonify(Items=[i.serialize for i in items])
@app.route('/category/<int:category_id>/item/<int:item_id>/JSON')
def itemJSON(category_id, item_id):
Item = session.query(Item).filter_by(id=item_id).one()
return jsonify(Item=Item.serialize)
@app.route('/category/JSON')
def categoriesJSON():
catagories = session.query(Category).all()
return jsonify(catagories=[i.serialize for i in catagories])
# Show all categories
@app.route('/')
@app.route('/category/')
def showCategories():
categories = session.query(Category).order_by(asc(Category.name))
if 'username' not in login_session:
return render_template('publiccategory.html', categories=categories)
else:
return render_template('category.html', categories=categories)
# Create a new category
@app.route('/category/new/', methods=['GET', 'POST'])
def newCategory():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newCategory = Category(
name=request.form['name'], user_id=login_session['user_id'])
session.add(newCategory)
flash('New Category (%s) Successfully Created' % newCategory.name)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('newcategory.html')
# Edit a category
@app.route('/category/<int:category_id>/edit/', methods=['GET', 'POST'])
def editCategory(category_id):
editedCategory = session.query(
Category).filter_by(id=category_id).one()
if 'username' not in login_session:
return redirect('/login')
if editedCategory.user_id != login_session['user_id']:
return "<script>function mAlert() {alert('You are not authorized" \
" to edit this category. Please create your own category" \
" in order to edit.');}</script><body onload='mAlert()''>"
if request.method == 'POST':
if request.form['name']:
editedCategory.name = request.form['name']
flash('Category (%s) Successfully Edited' % editedCategory.name)
return redirect(url_for('showCategories'))
else:
return render_template('editcategory.html', category=editedCategory)
# Delete a category
@app.route('/category/<int:category_id>/delete/', methods=['GET', 'POST'])
def deleteCategory(category_id):
categoryToDelete = session.query(
Category).filter_by(id=category_id).one()
if 'username' not in login_session:
return redirect('/login')
if categoryToDelete.user_id != login_session['user_id']:
return "<script>function mAlert() {alert('You are not authorized" \
" to delete this category. Please create your own category" \
" in order to delete.');}</script><body onload='mAlert()''>"
if request.method == 'POST':
session.delete(categoryToDelete)
flash('Category (%s) Successfully Deleted' % categoryToDelete.name)
session.commit()
return redirect(url_for('showCategories', category_id=category_id))
else:
return render_template(
'deletecategory.html', category=categoryToDelete
)
# Show a categories items
@app.route('/category/<int:category_id>/')
@app.route('/category/<int:category_id>/item/')
def showItem(category_id):
category = session.query(Category).filter_by(id=category_id).one()
creator = getUserInfo(category.user_id)
print creator.picture
items = session.query(Item).filter_by(
category_id=category_id).all()
if 'username' not in login_session or (
creator.id != login_session
['user_id']):
return render_template('publicitem.html',
items=items,
category=category,
creator=creator)
else:
return render_template('item.html',
items=items,
category=category,
creator=creator)
# Create a new item
@app.route('/category/<int:category_id>/item/new/', methods=['GET', 'POST'])
def newItem(category_id):
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
if login_session['user_id'] != category.user_id:
return "<script>function mAlert() {alert('You are not authorized" \
" to add items to this category. Please create your own category" \
" in order to add items.');}</script><body onload='mAlert()''>"
if request.method == 'POST':
newitem = Item(name=request.form['name'],
description=request.form['description'],
category_id=category_id,
user_id=category.user_id)
session.add(newitem)
session.commit()
flash('New Item (%s) Successfully Created' % newitem.name)
return redirect(url_for('showItem', category_id=category_id))
else:
return render_template('newitem.html', category_id=category_id)
# Edit an item
@app.route('/category/<int:category_id>/item/<int:item_id>/edit',
methods=['GET', 'POST'])
def editItem(category_id, item_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(Item).filter_by(id=item_id).one()
category = session.query(Category).filter_by(id=category_id).one()
if login_session['user_id'] != category.user_id:
return "<script>function mAlert() {alert('You are not authorized to" \
" edit items to this category. Please create your own category" \
" in order to edit items.');}</script><body onload='mAlert()''>"
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
session.add(editedItem)
session.commit()
flash('Item Successfully Edited')
return redirect(url_for('showItem', category_id=category_id))
else:
return render_template('edititem.html',
category_id=category_id,
item_id=item_id,
item=editedItem)
# Delete a item
@app.route('/category/<int:category_id>/item/<int:item_id>/delete',
methods=['GET', 'POST'])
def deleteItem(category_id, item_id):
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
itemToDelete = session.query(Item).filter_by(id=item_id).one()
if login_session['user_id'] != category.user_id:
script = "<script>function mAlert() {alert('You are not authorized" \
" to delete items to this category. Please create your own" \
" category in order to delete items.');} \
</script><body onload='mAlert()''>"
return script
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Item Successfully Deleted')
return redirect(url_for('showItem', category_id=category_id))
else:
return render_template('deleteitem.html', item=itemToDelete)
# Disconnect
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
if login_session.get('gplus_id'):
del login_session['gplus_id']
if login_session.get('credentials'):
del login_session['credentials']
flash("You have successfully been logged out.")
return redirect(url_for('showCategories'))
else:
flash("You were not logged in")
return redirect(url_for('showCategories'))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
|
py | 1a3f094b95297c83c0ee157eb480c6c3272f5ebe | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="adafruit-circuitpython-lis3dh",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="CircuitPython library for LIS3DH accelerometer.",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/adafruit/Adafruit_CircuitPython_LIS3DH",
# Author details
author="Adafruit Industries",
author_email="[email protected]",
install_requires=["Adafruit-Blinka"],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit accelerometer lis3dh acceleration hardware micropython circuitpython",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
py_modules=["adafruit_lis3dh"],
)
|
py | 1a3f096f1786bd47f9084a559c2657f72a164da0 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'identity_resizer':
def image_resizer_fn(image, masks=None, **kwargs):
del kwargs
if masks is None:
return [image, tf.shape(image)]
else:
return [image, masks, tf.shape(image)]
return image_resizer_fn
elif image_resizer_oneof == 'conditional_shape_resizer':
conditional_shape_resize_config = (
image_resizer_config.conditional_shape_resizer)
method = _tf_resize_method(conditional_shape_resize_config.resize_method)
if conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.GREATER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_max_dimension,
max_dimension=conditional_shape_resize_config.size_threshold,
method=method)
elif conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.SMALLER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_min_dimension,
min_dimension=conditional_shape_resize_config.size_threshold,
method=method)
else:
raise ValueError(
'Invalid image resizer condition option for '
'ConditionalShapeResizer: \'%s\'.'
% conditional_shape_resize_config.condition)
if not conditional_shape_resize_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'pad_to_multiple_resizer':
pad_to_multiple_resizer_config = (
image_resizer_config.pad_to_multiple_resizer)
if pad_to_multiple_resizer_config.multiple < 0:
raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.')
else:
image_resizer_fn = functools.partial(
preprocessor.resize_pad_to_multiple,
multiple=pad_to_multiple_resizer_config.multiple)
if not pad_to_multiple_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image, masks=None):
"""Convert to grayscale before applying image_resizer_fn.
Args:
image: A 3D tensor of shape [height, width, 3]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, 1],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
"""
# image_resizer_fn returns [resized_image, resized_image_shape] if
# mask==None, otherwise it returns
# [resized_image, resized_mask, resized_image_shape]. In either case, we
# only deal with first and last element of the returned list.
retval = image_resizer_fn(image, masks)
resized_image = retval[0]
resized_image_shape = retval[-1]
retval[0] = preprocessor.rgb_to_gray(resized_image)
retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0)
return retval
return functools.partial(grayscale_image_resizer)
|
py | 1a3f0b173c9d1f26af1b565041b80661d3a87f60 | """ Advent of code 2020 day 6/2 """
import math
from os import path
class Code(object):
def __init__(self, lines):
self.lines = lines
def match_everyone(self, people_answers):
answers = people_answers[0]
for answer in people_answers[1:]:
answers = answers.intersection(answer)
return len(answers)
def solve(self):
return sum(map(self.match_everyone, self.lines))
def preprocess(raw_data):
processed_data = [[set(x) for x in groups.split("\n")]
for groups in raw_data.split("\n\n")]
return processed_data
def solution(data):
""" Solution to the problem """
lines = preprocess(data)
solver = Code(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
|
py | 1a3f0bf5c7d5206b1c262019985faf86fc66b559 | from pathlib import Path
from ruamel.yaml import YAML
def load_yaml_file(yaml_file_path: str) -> dict:
"""
Loads a yaml file and returns the content as nested dictionary.
:return: nested dictionary as the content of the yaml file
"""
path = Path(yaml_file_path)
yaml = YAML()
yaml.boolean_representation = ["False", "True"] # type: ignore
return yaml.load(path)
|
py | 1a3f0cf046a7698b1d86ca4e273a4675185e4f43 | # -*- coding: utf-8 -*-
"""Top-level package for subs2network."""
__author__ = """Dima Kagan"""
__email__ = '[email protected]'
__version__ = '0.4.2'
|
py | 1a3f0d01f2962a1b0f203c1c0b6dd74f0487156e |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RxFilters(Base):
"""The RxFilters class encapsulates a required rxFilters node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the RxFilters property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'rxFilters'
def __init__(self, parent):
super(RxFilters, self).__init__(parent)
@property
def FilterPalette(self):
"""An instance of the FilterPalette class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.rxfilters.filterpalette.filterpalette.FilterPalette)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.rxfilters.filterpalette.filterpalette import FilterPalette
return FilterPalette(self)._select()
@property
def Uds(self):
"""An instance of the Uds class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.rxfilters.uds.uds.Uds)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.rxfilters.uds.uds import Uds
return Uds(self)
|
py | 1a3f0deb1cc6d82e7ac22d4a14f8eaa6145a6c78 | # -*- coding: UTF-8 -*-
def decorated_1(func):
func.__doc__ += '\nDecorator one'
print ("This is decorated one")
return func
def decorated_2(func):
func.__doc__ += '\nDecorator two'
print ("This is decorated two")
return func
@decorated_2
@decorated_1
def add(x, y):
"""Return the sum of x and y."""
return x + y
if __name__ == '__main__':
print (add(3, 5))
print (help(add))
|
py | 1a3f0e484e36192cae84a46fd74c952c77fa085f | # Generated by Django 2.2.3 on 2019-09-13 22:29
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0017_auto_20190822_1509'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='products.Product_categories'),
),
migrations.AlterField(
model_name='product',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 13, 23, 29, 51, 470649)),
),
migrations.AlterField(
model_name='subproduct',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 13, 23, 29, 51, 470649)),
),
migrations.AlterField(
model_name='subproduct',
name='parent_product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product'),
),
]
|
py | 1a3f0e741bf9c317a1549907f03490fc643c7372 | import argparse
import datetime
import gym
import envs
import numpy as np
import torch
import imageio
import itertools
from rl.model import GaussianPolicy, QNetwork, DeterministicPolicy
from transformer_split.util import getGraphStructure
from transformer_split.vae_model import VAE_Model
from torch.nn import functional as F
from transformer_vae import util
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env1-name', default="ant",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--env2-name', default="ant3",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--model_path', default="runs/2021-05-19_13-46-41_VAE_ant-v0_both/",
help='model path')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--policy_hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--latent_dim', type=int, default=128,
help='Encoder latent dimension')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument('--agent_memory1', default='data/ant_jump.memory',
help='Path for saved replay memory')
parser.add_argument('--video_file_name', default="ant_turn.mp4",
help='output file name')
parser.add_argument('--msg_dim', type=int, default=32,
help='run on CUDA (default: False)')
parser.add_argument('--batch_size', type=int, default=1,
help='run on CUDA (default: False)')
parser.add_argument('--actor_path',
help='checkpoint training model every # steps')
parser.add_argument('--num_episodes', type=int, default=3, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--root_size', type=int, default=11,
help='root dimension')
parser.add_argument('--lr', type=float, default=1e-4, metavar='N',
help='random seed (default: 123456)')
parser.add_argument(
"--transformer_norm", default=0, type=int, help="Use layernorm",
)
parser.add_argument(
"--beta",
type=float,
default=.1,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--gradient_penalty",
type=float,
default=10,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--discriminator_limiting_accuracy",
type=float,
default=0.7,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--attention_layers",
default=3,
type=int,
help="How many attention layers to stack",
)
parser.add_argument(
"--attention_heads",
default=2,
type=int,
help="How many attention heads to stack",
)
parser.add_argument(
"--attention_hidden_size",
type=int,
default=128,
help="Hidden units in an attention block",
)
parser.add_argument(
"--attention_embedding_size",
type=int,
default=128,
help="Hidden units in an attention block",
)
parser.add_argument(
"--dropout_rate",
type=float,
default=0.0,
help="How much to drop if drop in transformers",
)
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
device = torch.device("cuda" if args.cuda else "cpu")
env_names = ["ant-v0", "ant3-v0", "ant_a-v0"]
train_envs = [gym.make(n) for n in env_names]
graphs = [getGraphStructure(e.xml) for e in train_envs]
# All environments have the same dimension per limb.
num_limbs = len(graphs[0]) #torso + body limbs
body_limbs = num_limbs - 1
dim_per_limb = int((train_envs[0].observation_space.shape[0] - args.root_size) / (body_limbs - 1))
max_num_limbs = max(len(g) for g in graphs)
args.dim_per_limb = dim_per_limb
args.max_num_limbs = max_num_limbs
root_dir = util.get_project_root()
render_env = train_envs[2]
render_topology = graphs[2]
render_limbs = len(render_topology)
expert_env = train_envs[0]
expert_topology = graphs[0]
policy = GaussianPolicy(
expert_env.observation_space.shape[0],
expert_env.action_space.shape[0],
args.policy_hidden_size,
expert_env.action_space).to(device)
policy.load_state_dict(torch.load(args.actor_path))
vae_model = VAE_Model(args)
vae_model.load_model(args.model_path)
def pad_state(data, state_size, max_num_limbs):
max_dim = args.root_size + state_size * (max_num_limbs - 1)
output = torch.zeros(max_dim)
output[:data.shape[0]] = torch.tensor(data)
return output
def pad_topology(top, max_num_limbs):
topology = torch.full((max_num_limbs,), -1, dtype=torch.int32)
topology[:len(top)] = torch.tensor(top, dtype=torch.int32)
return topology
# Evaluation loop
total_numsteps = 0
avg_reward = 0.
state = render_env.reset()
with imageio.get_writer(args.video_file_name, fps=30) as video:
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = render_env.reset()
video.append_data(render_env.render('rgb_array'))
done = False
while not done:
state = pad_state(state, dim_per_limb, max_num_limbs).unsqueeze(0)
src_topology = pad_topology(render_topology, max_num_limbs).unsqueeze(0)
tgt_topology = pad_topology(expert_topology, max_num_limbs).unsqueeze(0)
x_hat = vae_model.transfer(state, tgt_topology)
x_hat = x_hat.detach().cpu()
x_hat = x_hat[:(render_limbs-1)]
x_hat = torch.FloatTensor(x_hat).to(device).unsqueeze(0)
action, _, _ = policy.sample(x_hat)
action = action.detach().cpu().numpy()[0]
next_state, reward, done, _ = render_env.step(action[0][:7])
video.append_data(render_env.render('rgb_array'))
episode_reward += reward
state = next_state
avg_reward += episode_reward
if i_episode > args.num_episodes:
break
|
py | 1a3f0ed0394df92e3f9cfc7101bf9c308e11deb2 | # ===================================================================================== #
# Module for solving Ising models exactly.
#
# Distributed with ConIII.
#
# NOTE: This code needs cleanup.
#
# Author : Edward Lee, [email protected]
# ===================================================================================== #
#
# MIT License
#
# Copyright (c) 2019 Edward D. Lee, Bryan C. Daniels
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import mpmath as mp
import scipy.special as ss
from itertools import combinations
import sys
np.set_printoptions(threshold=sys.maxsize)
def write_eqns(n, sym, corrTermsIx, suffix='', high_prec=False):
"""Create strings for writing out the equations and then write them to file.
TODO: This code needs some cleanup.
Parameters
----------
n : int
number of spins
sym : int
value of 1 will use {-1,1} formulation, 0 means {0,1}
corrTermsIx : list of ndarrays
Allows specification of arbitrary correlations to constrain using an index based
structure. These should be index arrays as would be returned by np.where that
specify which correlations to write down. Each consecutive array should specify
a matrix of sequentially increasing dimension.
[Nx1, NxN, NxNxN, ...]
suffix : str, ''
high_prec : bool, False
"""
import re
assert sym in [0,1], "sym argument must be 0 or 1."
abc = 'HJKLMNOPQRSTUVWXYZABCDE'
expterms = [] # 2**N exponential corrTermsIx
binstates = [] # all binary states as strings
signs = [] # coefficient for all numerator terms when computing correlations
br = "[]"
ix0 = 0
# default suffix for high precision files
if high_prec:
suffix += '_hp'
# Collect all corrTermsIx in the partition function.
for state in range(2**n):
binstates.append("{0:b}".format(state))
if len(binstates[state])<n:
binstates[state] = "0"*(n-len(binstates[state])) + binstates[state]
expterms.append( '' )
# Get corrTermsIx corresponding to each of the ith order term.
if sym:
for i in range(len(corrTermsIx)):
expterms[state] += get_terms11(corrTermsIx[i], abc[i], binstates[state], br, ix0)
else:
for i in range(len(corrTermsIx)):
expterms[state] += get_terms01(corrTermsIx[i], abc[i], binstates[state], br, ix0)
expterms[state] = re.sub(r'\+0\+','+', expterms[state])
expterms[state] = re.sub(r'\)\+0',')', expterms[state])
expterms[state] += ', '
# Collect all terms with corresponding prefix in the equation to solve.
for state in range(2**n):
for i in range(len(corrTermsIx)):
if state==0:
signs.append([])
# Get corrTermsIx corresponding to each of the ith order term.
if sym:
signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state])
else:
signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state], False)
# expand the length of signs if we haven't reached those constraints yet before
if len(signs[i])<signs_.size:
for j in range(signs_.size-len(signs[i])):
signs[i].append(np.zeros(0, dtype=int))
for j in range(signs_.size):
signs[i][j] = np.append(signs[i][j], signs_[j])
Z = ''.join(expterms)
# Account for fact that symmetric Python had inverted the order of the states.
if sym:
extra = '\n Pout = Pout[::-1]'
else:
extra = ''
# write to files
write_py(n, sym, corrTermsIx, signs, expterms, Z,
extra=extra,
suffix=suffix,
high_prec=high_prec)
def write_py(n, sym, contraintTermsIx, signs, expterms, Z,
extra='',
suffix='',
high_prec=False):
"""
Write out Ising equations for Python.
Parameters
----------
n : int
System size.
contraintTermsIx : list of str
signs : list of ndarray
Sign for each term in the numerator when computing correlations.
expterms : list of str
Every single energy term.
Z : str
Energies for all states that will be put into partition function.
extra : str, ''
any extra lines to add at the end
suffix : str, ''
high_prec : bool, False
If True, write version that uses mpmath for high precision calculations.
"""
import time
import os
abc = 'HJKLMNOPQRSTUVWXYZABCDE'
fname = 'ising_eqn/ising_eqn_%d%s.py'%(n,suffix)
print("Generating file ./%s"%fname)
if not os.path.isdir('./ising_eqn'):
os.makedirs('./ising_eqn')
f = open(fname,'w')
# insert license
try:
license = open('../LICENSE.txt','r').readlines()
for el in license:
el = '# '+el
f.write(el)
f.write('\n')
except FileNotFoundError:
print("License file not found...")
f.write("# Equations for %d-spin Ising model.\n\n"%n)
f.write("# ")
f.write(time.strftime("Written on %Y/%m/%d.")+"\n")
if high_prec:
f.write("from numpy import zeros, array, prod\n")
f.write("from ..enumerate import mp_fast_logsumexp as fast_logsumexp\n")
f.write("from mpmath import exp, isnan\n\n")
else:
f.write("from numpy import zeros, exp, array, prod, isnan\n")
f.write("from ..enumerate import fast_logsumexp\n\n")
# Keep these as string because they need to grow in the loop and then can just be
# added all at once at the end.
fargs = "def calc_observables(params):\n"
if high_prec:
vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'), dtype=object)\n' # string of variable declarations
else:
vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'))\n' # string of variable declarations
eqns = '' # string of equations to compute
ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) ))
for i in range(len(contraintTermsIx)):
vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n'
if sym:
k = 0
for i in range(len(contraintTermsIx)):
for j in range(len(signs[i])):
eqns += (" num = fast_logsumexp(energyTerms, "+
str(signs[i][j]).replace('1 ','1,').replace('1\n','1,\n')+
")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n")
k += 1
else:
k = 0
for i in range(len(contraintTermsIx)):
for j in range(len(signs[i])):
eqns += (" num = fast_logsumexp(energyTerms, "+
str(signs[i][j]).replace('0 ','0,').replace('1 ','1,').replace('0\n','0,\n').replace('1\n','1,\n')+
")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n")
k += 1
# Write out correlation terms
f.write(fargs)
f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+
" Returns all correlations.\n \"\"\"\n"))
f.write(vardec)
_write_energy_terms(f, Z)
f.write(eqns)
if high_prec:
f.write(" for i in range(Cout.size):\n if isnan(Cout[i]):\n Cout[i] = 0.\n")
else:
f.write(" Cout[isnan(Cout)] = 0.\n")
f.write(" return(Cout)\n\n")
# Write equations for probabilities of all states.
#f.write("def p("+string.join([i+"," for i in abc[:len(contraintTermsIx)]])+"):\n")
f.write("def p(params):\n")
f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+
" Returns probabilities of all configurations.\n \"\"\"\n"))
f.write(vardec)
# Output variable decs and put params into explicit parameters.
ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) ))
vardec = ''
for i in range(len(contraintTermsIx)):
vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n'
if high_prec:
vardec += ' Pout = zeros(('+str(2**n)+'), dtype=object)\n' # string of variable declarations
else:
vardec += ' Pout = zeros(('+str(2**n)+'))\n' # string of variable declarations
f.write(vardec)
_write_energy_terms(f, Z)
# each probability equation
for i in range(len(expterms)):
f.write(' Pout['+str(i)+'] = exp( '+expterms[i][:-2]+' - logZ )\n')
f.write(extra)
f.write("\n return(Pout)\n")
f.close()
def _write_energy_terms(f, Z):
"""Split expression for energy terms for each term in Z into multiple lines and write
out nicely into file.
Parameters
----------
f : file
Z : list of str
Energy terms to write out.
"""
f.write(' energyTerms = array([')
i=0
while i<len(Z):
iend=i+100
# end line on a +
while iend<len(Z) and Z[iend-1]!='+':
iend+=1
if iend>=len(Z):
# ignore comma at end of line
f.write(' '+Z[i:-1]+'])\n logZ = fast_logsumexp(energyTerms)[0]\n')
else:
f.write(' '+Z[i:iend]+'\n')
i=iend
def _compute_signs(subix, expterm, binstate, sym=True):
"""Iterate through terms that belong in the numerator for each constraint and keep
track of the sign of those terms.
Parameters
----------
subix : list
expterm : list of str
binstate : list of str
sym : bool, True
Returns
-------
ndarray
Sign of each exponential term in numerator.
"""
if len(subix)==0:
return
if sym:
downSpin = -1
signs = np.ones(len(subix[0]), dtype=int)
for i in range(len(subix[0])):
if np.mod( sum([binstate[k[i]]=="1" for k in subix]),2 ):
signs[i] = downSpin
else:
downSpin = 0
signs = np.ones(len(subix[0]), dtype=int)
for i in range(len(subix[0])):
if np.mod( any([binstate[k[i]]=="0" for k in subix]),2 ):
signs[i] = downSpin
return signs
def get_terms11(subix, prefix, binstate, br, ix0):
"""
Specific to {-1,1}.
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
if np.mod( sum([binstate[k[j]]=="1" for k in subix]),2 ):
s += '-'
else:
s += '+'
s += prefix+br[0]+str(j+ix0)+br[1]
j += 1
return s
def get_terms01(subix, prefix, binstate, br, ix0):
"""
Specific to {0,1}.
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
if np.all( [binstate[k[j]]=="1" for k in subix] ):
s += '+'+prefix+br[0]+str(j+ix0)+br[1]
j += 1
if s=='':
s = '+0'
return s
def get_terms(subix, prefix, binstate, br, ix0):
"""
Spins are put in explicitly
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
s += '+'+prefix+br[0]+str(j+ix0)+br[1]
for k in range(len(subix)):
s += '*s'+br[0]+str(subix[k][i])+br[1]
j += 1
if s=='':
s = '+0'
return s
def get_3idx(n):
"""Get binary 3D matrix with truth values where index values correspond to the index
of all possible ijk parameters. We can do this by recognizing that the pattern along
each plane in the third dimension is like the upper triangle pattern that just moves
up and over by one block each cut lower into the box.
"""
b = np.zeros((n,n,n))
c = np.triu(np.ones((n-1,n-1))==1,1)
for i in range(n-1):
# shunt this diagonal matrix over every descent into a lower plane in the box
# the plane xz
if i==0:
b[i,(1+i):,(1+i):] = c
else:
b[i,(1+i):,(1+i):] = c[:-i,:-i]
return b
def get_nidx(k, n):
"""
Get the kth order indices corresponding to all the states in which k elements
are firing up out of n spins. The ordering correspond to that returned by
bin_states().
One can check this code for correctness by comparing with get_3idx()
>>>>>
print where(exact.get_3idx(4))
print where(exact.get_nidx(3,4))
<<<<<
"""
if k==n:
return np.reshape(list(range(n)),(n,1))
elif k<n:
allStates = bin_states(n)
statesix = np.sum(allStates,1)==k
ix = []
for s in allStates[statesix,:]:
j = 0
for i in np.argwhere(s==1).flatten():
if len(ix)<(j+1):
ix.append([])
ix[j].append(i)
j += 1
return np.array(ix)[:,::-1] # make sure last idx increases first
def pairwise(n, sym=0, **kwargs):
"""Wrapper for writing pairwise maxent model (Ising) files.
Parameters
----------
n : int
System size.
sym : int, 0
Can be 0 or 1.
**kwargs
Returns
-------
None
"""
assert sym==0 or sym==1
print("Writing equations for pairwise Ising model with %d spins."%n)
if sym:
write_eqns(n, sym, [np.where(np.ones((n))==1),
np.where(np.triu(np.ones((n,n)),k=1)==1)],
suffix='_sym',
**kwargs)
else:
write_eqns(n, sym, [np.where(np.ones((n))==1),
np.where(np.triu(np.ones((n,n)),k=1)==1)],
**kwargs)
def triplet(n, sym=0, **kwargs):
"""Wrapper for writing triplet-order maxent model.
Parameters
----------
n : int
System size.
sym : int, 0
Can be 0 or 1.
**kwargs
Returns
-------
None
"""
assert sym==0 or sym==1
print("Writing equations for Ising model with triplet interactions and %d spins."%n)
if sym:
write_eqns(n,sym,[(range(n),),
list(zip(*list(combinations(range(n),2)))),
list(zip(*list(combinations(range(n),3))))],
suffix='_sym_triplet',
**kwargs)
else:
write_eqns(n,sym,[(range(n),),
list(zip(*list(combinations(range(n),2)))),
list(zip(*list(combinations(range(n),3))))],
suffix='_triplet',
**kwargs)
def _write_matlab(n, terms, fitterms, expterms, Z, suffix=''):
"""
DEPRECATED: code here for future referencing
Write out equations to solve for matlab.
"""
import time
abc = 'HJKLMNOPQRSTUVWXYZABCDE'
vardec = ''
# Write function to solve to file.
f = open('ising_eqn_%d%s.m'%(n,suffix),'w')
f.write("% Equations of %d-spin Ising model.\n\n"%n)
f.write(time.strftime("%Y/%m/%d")+"\n")
f.write("% Give each set of parameters concatenated into one array.\n\n")
# Keep these as string because they need to grow in the loop and then can just be
# added all at once at the end.
f.write("function Cout = calc_observables(params)\n")
f.write('\tCout = zeros('+str(sum([len(i) for i in fitterms]))+',1);\n') # string of variable declarations
eqns = '' # string of equations to compute
ix = np.hstack(( 0,np.cumsum([len(i) for i in fitterms]) ))+1
for i in range(len(terms)):
vardec += '\t'+abc[i]+' = params('+str(ix[i])+':'+str(ix[i+1]-1)+');\n'
k = 0
for i in range(len(terms)):
for j in range(len(fitterms[i])):
eqns += "\tCout("+str(k+1)+") = ("+fitterms[i][j]+")/Z;\n"
k += 1
f.write(vardec)
f.write("\tZ = "+Z+";\n")
f.write(eqns)
f.close()
g = open('probs'+str(n)+'.m','w')
g.write("% File for getting the probabilities of Ising model.\n% ")
g.write(time.strftime("%Y/%m/%d")+"\n")
# Write equations for probabilities of all states.
g.write("function Pout = p(params)\n")
g.write(vardec)
g.write(' Pout = zeros('+str(2**n)+',1);\n') # string of variable declarations
g.write(' Z = '+Z+';\n')
for i in range(len(expterms)):
g.write(' Pout('+str(i+1)+') = '+expterms[i]+'/Z;\n')
g.close()
def fast_logsumexp(X, coeffs=None):
"""Simplified version of logsumexp to do correlation calculation in Ising equation
files. Scipy's logsumexp can be around 10x slower in comparison.
Parameters
----------
X : ndarray
Terms inside logs.
coeffs : ndarray
Factors in front of exponentials.
Returns
-------
float
Value of magnitude of quantity inside log (the sum of exponentials).
float
Sign.
"""
Xmx = max(X)
if coeffs is None:
y = np.exp(X-Xmx).sum()
else:
y = np.exp(X-Xmx).dot(coeffs)
if y<0:
return np.log(np.abs(y))+Xmx, -1.
return np.log(y)+Xmx, 1.
def mp_fast_logsumexp(X, coeffs=None):
"""fast_logsumexp for high precision numbers using mpmath.
Parameters
----------
X : ndarray
Terms inside logs.
coeffs : ndarray
Factors in front of exponentials.
Returns
-------
float
Value of magnitude of quantity inside log (the sum of exponentials).
float
Sign.
"""
Xmx = max(X)
if coeffs is None:
y = sum(map(mp.exp, X-Xmx))
else:
y = np.array(coeffs).dot(list(map(mp.exp, X-Xmx)))
if y<0:
return mp.log(abs(y))+Xmx, -1.
return mp.log(y)+Xmx, 1.
if __name__=='__main__':
"""When run with Python, this will write the equations for the Ising model
into file ising_eqn_[n][_sym] where n will be replaced by the system size
and the suffix '_sym' is included if the equations are written in the
{-1,+1} basis.
To write the Ising model equations for a system of size 3 in the {0,1} basis, call
>>> python enumerate.py 3
For the {-1,1} basis, call
>>> python enumerate.py 3 1
To include triplet order interactions, include a 3 at the very end
>>> python enumerate.py 3 0 3
To write high precision, include an '-hp=true' as the last argument.
>>> python enumerate.py 3 0 3 -hp=true
"""
import sys
args = [i for i in sys.argv if '-'!=i[0]]
kwargs = [i for i in sys.argv if '-'==i[0]]
n = int(args[1])
if len(args)==2:
sym = 0
order = 2
elif len(args)==3:
sym = int(args[2])
assert sym==0 or sym==1
order = 2
elif len(args)==4:
sym = int(args[2])
order = int(args[3])
else:
raise Exception("Unrecognized arguments.")
# parse kwargs
if len(kwargs):
if '-hp='==kwargs[0][:4]:
if kwargs[0][4:].lower()=='true':
high_prec = True
elif kwargs[0][4:].lower()=='false':
high_prec = False
else:
raise Exception("Unrecognized value for hp.")
else:
high_prec = False
else:
# default kwargs
high_prec = False
if order==2:
pairwise(n, sym, high_prec=high_prec)
elif order==3:
triplet(n, sym, high_prec=high_prec)
else:
raise NotImplementedError("Only order up to 3 implemented for this convenient interface.")
|
py | 1a3f10bf52d7f846b33fe3d9a1841607aac1f822 | #!/usr/bin/env python3
"""
GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Code generator for wrapping a C++ module with Pybind11
Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal, and Frank Dellaert
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, line-too-long
import re
from pathlib import Path
from typing import List
import gtwrap.interface_parser as parser
import gtwrap.template_instantiator as instantiator
class PybindWrapper:
"""
Class to generate binding code for Pybind11 specifically.
"""
def __init__(self,
module_name,
top_module_namespaces='',
use_boost=False,
ignore_classes=(),
module_template=""):
self.module_name = module_name
self.top_module_namespaces = top_module_namespaces
self.use_boost = use_boost
self.ignore_classes = ignore_classes
self._serializing_classes = []
self.module_template = module_template
self.python_keywords = [
'lambda', 'False', 'def', 'if', 'raise', 'None', 'del', 'import',
'return', 'True', 'elif', 'in', 'try', 'and', 'else', 'is',
'while', 'as', 'except', 'lambda', 'with', 'assert', 'finally',
'nonlocal', 'yield', 'break', 'for', 'not', 'class', 'from', 'or',
'continue', 'global', 'pass'
]
# amount of indentation to add before each function/method declaration.
self.method_indent = '\n' + (' ' * 8)
# Special methods which are leveraged by ipython/jupyter notebooks
self._ipython_special_methods = [
"svg", "png", "jpeg", "html", "javascript", "markdown", "latex"
]
def _py_args_names(self, args):
"""Set the argument names in Pybind11 format."""
names = args.names()
if names:
py_args = []
for arg in args.list():
if arg.default is not None:
default = ' = {arg.default}'.format(arg=arg)
else:
default = ''
argument = 'py::arg("{name}"){default}'.format(
name=arg.name, default='{0}'.format(default))
py_args.append(argument)
return ", " + ", ".join(py_args)
else:
return ''
def _method_args_signature(self, args):
"""Generate the argument types and names as per the method signature."""
cpp_types = args.to_cpp(self.use_boost)
names = args.names()
types_names = [
"{} {}".format(ctype, name)
for ctype, name in zip(cpp_types, names)
]
return ', '.join(types_names)
def wrap_ctors(self, my_class):
"""Wrap the constructors."""
res = ""
for ctor in my_class.ctors:
res += (
self.method_indent + '.def(py::init<{args_cpp_types}>()'
'{py_args_names})'.format(
args_cpp_types=", ".join(ctor.args.to_cpp(self.use_boost)),
py_args_names=self._py_args_names(ctor.args),
))
return res
def _wrap_serialization(self, cpp_class):
"""Helper method to add serialize, deserialize and pickle methods to the wrapped class."""
if not cpp_class in self._serializing_classes:
self._serializing_classes.append(cpp_class)
serialize_method = self.method_indent + \
".def(\"serialize\", []({class_inst} self){{ return gtsam::serialize(*self); }})".format(class_inst=cpp_class + '*')
deserialize_method = self.method_indent + \
'.def("deserialize", []({class_inst} self, string serialized)' \
'{{ gtsam::deserialize(serialized, *self); }}, py::arg("serialized"))' \
.format(class_inst=cpp_class + '*')
# Since this class supports serialization, we also add the pickle method.
pickle_method = self.method_indent + \
".def(py::pickle({indent} [](const {cpp_class} &a){{ /* __getstate__: Returns a string that encodes the state of the object */ return py::make_tuple(gtsam::serialize(a)); }},{indent} [](py::tuple t){{ /* __setstate__ */ {cpp_class} obj; gtsam::deserialize(t[0].cast<std::string>(), obj); return obj; }}))"
return serialize_method + deserialize_method + \
pickle_method.format(cpp_class=cpp_class, indent=self.method_indent)
def _wrap_print(self, ret: str, method: parser.Method, cpp_class: str,
args_names: List[str], args_signature_with_names: str,
py_args_names: str, prefix: str, suffix: str):
"""
Update the print method to print to the output stream and append a __repr__ method.
Args:
ret (str): The result of the parser.
method (parser.Method): The method to be wrapped.
cpp_class (str): The C++ name of the class to which the method belongs.
args_names (List[str]): List of argument variable names passed to the method.
args_signature_with_names (str): C++ arguments containing their names and type signatures.
py_args_names (str): The pybind11 formatted version of the argument list.
prefix (str): Prefix to add to the wrapped method when writing to the cpp file.
suffix (str): Suffix to add to the wrapped method when writing to the cpp file.
Returns:
str: The wrapped print method.
"""
# Redirect stdout - see pybind docs for why this is a good idea:
# https://pybind11.readthedocs.io/en/stable/advanced/pycpp/utilities.html#capturing-standard-output-from-ostream
ret = ret.replace('self->print',
'py::scoped_ostream_redirect output; self->print')
# Make __repr__() call .print() internally
ret += '''{prefix}.def("__repr__",
[](const {cpp_class}& self{opt_comma}{args_signature_with_names}){{
gtsam::RedirectCout redirect;
self.{method_name}({method_args});
return redirect.str();
}}{py_args_names}){suffix}'''.format(
prefix=prefix,
cpp_class=cpp_class,
opt_comma=', ' if args_names else '',
args_signature_with_names=args_signature_with_names,
method_name=method.name,
method_args=", ".join(args_names) if args_names else '',
py_args_names=py_args_names,
suffix=suffix)
return ret
def _wrap_method(self,
method,
cpp_class,
prefix,
suffix,
method_suffix=""):
"""
Wrap the `method` for the class specified by `cpp_class`.
Args:
method: The method to wrap.
cpp_class: The C++ name of the class to which the method belongs.
prefix: Prefix to add to the wrapped method when writing to the cpp file.
suffix: Suffix to add to the wrapped method when writing to the cpp file.
method_suffix: A string to append to the wrapped method name.
"""
py_method = method.name + method_suffix
cpp_method = method.to_cpp()
args_names = method.args.names()
py_args_names = self._py_args_names(method.args)
args_signature_with_names = self._method_args_signature(method.args)
# Special handling for the serialize/serializable method
if cpp_method in ["serialize", "serializable"]:
return self._wrap_serialization(cpp_class)
# Special handling of ipython specific methods
# https://ipython.readthedocs.io/en/stable/config/integrating.html
if cpp_method in self._ipython_special_methods:
idx = self._ipython_special_methods.index(cpp_method)
py_method = f"_repr_{self._ipython_special_methods[idx]}_"
# Add underscore to disambiguate if the method name matches a python keyword
if py_method in self.python_keywords:
py_method = py_method + "_"
is_method = isinstance(
method, (parser.Method, instantiator.InstantiatedMethod))
is_static = isinstance(
method,
(parser.StaticMethod, instantiator.InstantiatedStaticMethod))
return_void = method.return_type.is_void()
caller = cpp_class + "::" if not is_method else "self->"
function_call = ('{opt_return} {caller}{method_name}'
'({args_names});'.format(
opt_return='return' if not return_void else '',
caller=caller,
method_name=cpp_method,
args_names=', '.join(args_names),
))
ret = ('{prefix}.{cdef}("{py_method}",'
'[]({opt_self}{opt_comma}{args_signature_with_names}){{'
'{function_call}'
'}}'
'{py_args_names}){suffix}'.format(
prefix=prefix,
cdef="def_static" if is_static else "def",
py_method=py_method,
opt_self="{cpp_class}* self".format(
cpp_class=cpp_class) if is_method else "",
opt_comma=', ' if is_method and args_names else '',
args_signature_with_names=args_signature_with_names,
function_call=function_call,
py_args_names=py_args_names,
suffix=suffix,
))
# Create __repr__ override
# We allow all arguments to .print() and let the compiler handle type mismatches.
if method.name == 'print':
ret = self._wrap_print(ret, method, cpp_class, args_names,
args_signature_with_names, py_args_names,
prefix, suffix)
return ret
def wrap_methods(self,
methods,
cpp_class,
prefix='\n' + ' ' * 8,
suffix=''):
"""
Wrap all the methods in the `cpp_class`.
"""
res = ""
for method in methods:
# To avoid type confusion for insert
if method.name == 'insert' and cpp_class == 'gtsam::Values':
name_list = method.args.names()
type_list = method.args.to_cpp(self.use_boost)
# inserting non-wrapped value types
if type_list[0].strip() == 'size_t':
method_suffix = '_' + name_list[1].strip()
res += self._wrap_method(method=method,
cpp_class=cpp_class,
prefix=prefix,
suffix=suffix,
method_suffix=method_suffix)
res += self._wrap_method(
method=method,
cpp_class=cpp_class,
prefix=prefix,
suffix=suffix,
)
return res
def wrap_variable(self,
namespace,
module_var,
variable,
prefix='\n' + ' ' * 8):
"""
Wrap a variable that's not part of a class (i.e. global)
"""
variable_value = ""
if variable.default is None:
variable_value = variable.name
else:
variable_value = variable.default
return '{prefix}{module_var}.attr("{variable_name}") = {namespace}{variable_value};'.format(
prefix=prefix,
module_var=module_var,
variable_name=variable.name,
namespace=namespace,
variable_value=variable_value)
def wrap_properties(self, properties, cpp_class, prefix='\n' + ' ' * 8):
"""Wrap all the properties in the `cpp_class`."""
res = ""
for prop in properties:
res += ('{prefix}.def_{property}("{property_name}", '
'&{cpp_class}::{property_name})'.format(
prefix=prefix,
property="readonly"
if prop.ctype.is_const else "readwrite",
cpp_class=cpp_class,
property_name=prop.name,
))
return res
def wrap_operators(self, operators, cpp_class, prefix='\n' + ' ' * 8):
"""Wrap all the overloaded operators in the `cpp_class`."""
res = ""
template = "{prefix}.def({{0}})".format(prefix=prefix)
for op in operators:
if op.operator == "[]": # __getitem__
res += "{prefix}.def(\"__getitem__\", &{cpp_class}::operator[])".format(
prefix=prefix, cpp_class=cpp_class)
elif op.operator == "()": # __call__
res += "{prefix}.def(\"__call__\", &{cpp_class}::operator())".format(
prefix=prefix, cpp_class=cpp_class)
elif op.is_unary:
res += template.format("{0}py::self".format(op.operator))
else:
res += template.format("py::self {0} py::self".format(
op.operator))
return res
def wrap_enum(self, enum, class_name='', module=None, prefix=' ' * 4):
"""
Wrap an enum.
Args:
enum: The parsed enum to wrap.
class_name: The class under which the enum is defined.
prefix: The amount of indentation.
"""
if module is None:
module = self._gen_module_var(enum.namespaces())
cpp_class = enum.cpp_typename().to_cpp()
if class_name:
# If class_name is provided, add that as the namespace
cpp_class = class_name + "::" + cpp_class
res = '{prefix}py::enum_<{cpp_class}>({module}, "{enum.name}", py::arithmetic())'.format(
prefix=prefix, module=module, enum=enum, cpp_class=cpp_class)
for enumerator in enum.enumerators:
res += '\n{prefix} .value("{enumerator.name}", {cpp_class}::{enumerator.name})'.format(
prefix=prefix, enumerator=enumerator, cpp_class=cpp_class)
res += ";\n\n"
return res
def wrap_enums(self, enums, instantiated_class, prefix=' ' * 4):
"""Wrap multiple enums defined in a class."""
cpp_class = instantiated_class.to_cpp()
module_var = instantiated_class.name.lower()
res = ''
for enum in enums:
res += "\n" + self.wrap_enum(
enum, class_name=cpp_class, module=module_var, prefix=prefix)
return res
def wrap_instantiated_class(
self, instantiated_class: instantiator.InstantiatedClass):
"""Wrap the class."""
module_var = self._gen_module_var(instantiated_class.namespaces())
cpp_class = instantiated_class.to_cpp()
if cpp_class in self.ignore_classes:
return ""
if instantiated_class.parent_class:
class_parent = "{instantiated_class.parent_class}, ".format(
instantiated_class=instantiated_class)
else:
class_parent = ''
if instantiated_class.enums:
# If class has enums, define an instance and set module_var to the instance
instance_name = instantiated_class.name.lower()
class_declaration = (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>> '
'{instance_name}({module_var}, "{class_name}");'
'\n {instance_name}').format(
shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_class.name,
class_parent=class_parent,
instance_name=instance_name,
module_var=module_var)
module_var = instance_name
else:
class_declaration = (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
).format(shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_class.name,
class_parent=class_parent,
module_var=module_var)
return ('{class_declaration}'
'{wrapped_ctors}'
'{wrapped_methods}'
'{wrapped_static_methods}'
'{wrapped_properties}'
'{wrapped_operators};\n'.format(
class_declaration=class_declaration,
wrapped_ctors=self.wrap_ctors(instantiated_class),
wrapped_methods=self.wrap_methods(
instantiated_class.methods, cpp_class),
wrapped_static_methods=self.wrap_methods(
instantiated_class.static_methods, cpp_class),
wrapped_properties=self.wrap_properties(
instantiated_class.properties, cpp_class),
wrapped_operators=self.wrap_operators(
instantiated_class.operators, cpp_class)))
def wrap_instantiated_declaration(
self, instantiated_decl: instantiator.InstantiatedDeclaration):
"""Wrap the class."""
module_var = self._gen_module_var(instantiated_decl.namespaces())
cpp_class = instantiated_decl.to_cpp()
if cpp_class in self.ignore_classes:
return ""
res = (
'\n py::class_<{cpp_class}, '
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
).format(shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=instantiated_decl.name,
module_var=module_var)
return res
def wrap_stl_class(self, stl_class):
"""Wrap STL containers."""
module_var = self._gen_module_var(stl_class.namespaces())
cpp_class = stl_class.to_cpp()
if cpp_class in self.ignore_classes:
return ""
return (
'\n py::class_<{cpp_class}, {class_parent}'
'{shared_ptr_type}::shared_ptr<{cpp_class}>>({module_var}, "{class_name}")'
'{wrapped_ctors}'
'{wrapped_methods}'
'{wrapped_static_methods}'
'{wrapped_properties};\n'.format(
shared_ptr_type=('boost' if self.use_boost else 'std'),
cpp_class=cpp_class,
class_name=stl_class.name,
class_parent=str(stl_class.parent_class) +
(', ' if stl_class.parent_class else ''),
module_var=module_var,
wrapped_ctors=self.wrap_ctors(stl_class),
wrapped_methods=self.wrap_methods(stl_class.methods,
cpp_class),
wrapped_static_methods=self.wrap_methods(
stl_class.static_methods, cpp_class),
wrapped_properties=self.wrap_properties(
stl_class.properties, cpp_class),
))
def wrap_functions(self,
functions,
namespace,
prefix='\n' + ' ' * 8,
suffix=''):
"""
Wrap all the global functions.
"""
res = ""
for function in functions:
function_name = function.name
# Add underscore to disambiguate if the function name matches a python keyword
python_keywords = self.python_keywords + ['print']
if function_name in python_keywords:
function_name = function_name + "_"
cpp_method = function.to_cpp()
is_static = isinstance(function, parser.StaticMethod)
return_void = function.return_type.is_void()
args_names = function.args.names()
py_args_names = self._py_args_names(function.args)
args_signature = self._method_args_signature(function.args)
caller = namespace + "::"
function_call = ('{opt_return} {caller}{function_name}'
'({args_names});'.format(
opt_return='return'
if not return_void else '',
caller=caller,
function_name=cpp_method,
args_names=', '.join(args_names),
))
ret = ('{prefix}.{cdef}("{function_name}",'
'[]({args_signature}){{'
'{function_call}'
'}}'
'{py_args_names}){suffix}'.format(
prefix=prefix,
cdef="def_static" if is_static else "def",
function_name=function_name,
args_signature=args_signature,
function_call=function_call,
py_args_names=py_args_names,
suffix=suffix))
res += ret
return res
def _partial_match(self, namespaces1, namespaces2):
for i in range(min(len(namespaces1), len(namespaces2))):
if namespaces1[i] != namespaces2[i]:
return False
return True
def _gen_module_var(self, namespaces):
"""Get the Pybind11 module name from the namespaces."""
# We skip the first value in namespaces since it is empty
sub_module_namespaces = namespaces[len(self.top_module_namespaces):]
return "m_{}".format('_'.join(sub_module_namespaces))
def _add_namespaces(self, name, namespaces):
if namespaces:
# Ignore the first empty global namespace.
idx = 1 if not namespaces[0] else 0
return '::'.join(namespaces[idx:] + [name])
else:
return name
def wrap_namespace(self, namespace):
"""Wrap the complete `namespace`."""
wrapped = ""
includes = ""
namespaces = namespace.full_namespaces()
if not self._partial_match(namespaces, self.top_module_namespaces):
return "", ""
if len(namespaces) < len(self.top_module_namespaces):
for element in namespace.content:
if isinstance(element, parser.Include):
include = "{}\n".format(element)
# replace the angle brackets with quotes
include = include.replace('<', '"').replace('>', '"')
includes += include
if isinstance(element, parser.Namespace):
(
wrapped_namespace,
includes_namespace,
) = self.wrap_namespace( # noqa
element)
wrapped += wrapped_namespace
includes += includes_namespace
else:
module_var = self._gen_module_var(namespaces)
if len(namespaces) > len(self.top_module_namespaces):
wrapped += (
' ' * 4 + 'pybind11::module {module_var} = '
'{parent_module_var}.def_submodule("{namespace}", "'
'{namespace} submodule");\n'.format(
module_var=module_var,
namespace=namespace.name,
parent_module_var=self._gen_module_var(
namespaces[:-1]),
))
# Wrap an include statement, namespace, class or enum
for element in namespace.content:
if isinstance(element, parser.Include):
include = "{}\n".format(element)
# replace the angle brackets with quotes
include = include.replace('<', '"').replace('>', '"')
includes += include
elif isinstance(element, parser.Namespace):
wrapped_namespace, includes_namespace = self.wrap_namespace(
element)
wrapped += wrapped_namespace
includes += includes_namespace
elif isinstance(element, instantiator.InstantiatedClass):
wrapped += self.wrap_instantiated_class(element)
wrapped += self.wrap_enums(element.enums, element)
elif isinstance(element, instantiator.InstantiatedDeclaration):
wrapped += self.wrap_instantiated_declaration(element)
elif isinstance(element, parser.Variable):
variable_namespace = self._add_namespaces('', namespaces)
wrapped += self.wrap_variable(namespace=variable_namespace,
module_var=module_var,
variable=element,
prefix='\n' + ' ' * 4)
elif isinstance(element, parser.Enum):
wrapped += self.wrap_enum(element)
# Global functions.
all_funcs = [
func for func in namespace.content
if isinstance(func, (parser.GlobalFunction,
instantiator.InstantiatedGlobalFunction))
]
wrapped += self.wrap_functions(
all_funcs,
self._add_namespaces('', namespaces)[:-2],
prefix='\n' + ' ' * 4 + module_var,
suffix=';',
)
return wrapped, includes
def wrap_file(self, content, module_name=None, submodules=None):
"""
Wrap the code in the interface file.
Args:
content: The contents of the interface file.
module_name: The name of the module.
submodules: List of other interface file names that should be linked to.
"""
# Parse the contents of the interface file
module = parser.Module.parseString(content)
# Instantiate all templates
module = instantiator.instantiate_namespace(module)
wrapped_namespace, includes = self.wrap_namespace(module)
# Export classes for serialization.
boost_class_export = ""
for cpp_class in self._serializing_classes:
new_name = cpp_class
# The boost's macro doesn't like commas, so we have to typedef.
if ',' in cpp_class:
new_name = re.sub("[,:<> ]", "", cpp_class)
boost_class_export += "typedef {cpp_class} {new_name};\n".format( # noqa
cpp_class=cpp_class, new_name=new_name)
boost_class_export += "BOOST_CLASS_EXPORT({new_name})\n".format(
new_name=new_name, )
# Reset the serializing classes list
self._serializing_classes = []
holder_type = "PYBIND11_DECLARE_HOLDER_TYPE(TYPE_PLACEHOLDER_DONOTUSE, " \
"{shared_ptr_type}::shared_ptr<TYPE_PLACEHOLDER_DONOTUSE>);"
include_boost = "#include <boost/shared_ptr.hpp>" if self.use_boost else ""
submodules_init = []
if submodules is not None:
module_def = "PYBIND11_MODULE({0}, m_)".format(module_name)
for idx, submodule in enumerate(submodules):
submodules[idx] = "void {0}(py::module_ &);".format(submodule)
submodules_init.append("{0}(m_);".format(submodule))
else:
module_def = "void {0}(py::module_ &m_)".format(module_name)
submodules = []
return self.module_template.format(
include_boost=include_boost,
module_def=module_def,
module_name=module_name,
includes=includes,
holder_type=holder_type.format(
shared_ptr_type=('boost' if self.use_boost else 'std'))
if self.use_boost else "",
wrapped_namespace=wrapped_namespace,
boost_class_export=boost_class_export,
submodules="\n".join(submodules),
submodules_init="\n".join(submodules_init),
)
def wrap_submodule(self, source):
"""
Wrap a list of submodule files, i.e. a set of interface files which are
in support of a larger wrapping project.
E.g. This is used in GTSAM where we have a main gtsam.i, but various smaller .i files
which are the submodules.
The benefit of this scheme is that it reduces compute and memory usage during compilation.
Args:
source: Interface file which forms the submodule.
"""
filename = Path(source).name
module_name = Path(source).stem
# Read in the complete interface (.i) file
with open(source, "r") as f:
content = f.read()
# Wrap the read-in content
cc_content = self.wrap_file(content, module_name=module_name)
# Generate the C++ code which Pybind11 will use.
with open(filename.replace(".i", ".cpp"), "w") as f:
f.write(cc_content)
def wrap(self, sources, main_module_name):
"""
Wrap all the main interface file.
Args:
sources: List of all interface files.
The first file should be the main module.
main_module_name: The name for the main module.
"""
main_module = sources[0]
# Get all the submodule names.
submodules = []
for source in sources[1:]:
module_name = Path(source).stem
submodules.append(module_name)
with open(main_module, "r") as f:
content = f.read()
cc_content = self.wrap_file(content,
module_name=self.module_name,
submodules=submodules)
# Generate the C++ code which Pybind11 will use.
with open(main_module_name, "w") as f:
f.write(cc_content)
|
py | 1a3f126bc3295d878c13d5d0cc051117fa12ba65 | # Copyright (c) 2014 Dark Secret Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import datetime
import dateutil.parser
class Criteria(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def should_fire(self, stream, last_event, now=None):
return False
class Inactive(Criteria):
def __init__(self, expiry_in_seconds):
super(Inactive, self).__init__()
self.expiry_in_seconds = expiry_in_seconds
def should_fire(self, stream, last_event, debugger, now=None):
secs = (now - stream.last_update).seconds
#print "Stream %s = %d seconds (%d)" % (stream.uuid, secs, self.expiry_in_seconds)
if now is None:
now = datetime.datetime.utcnow()
return debugger.check(
(now - stream.last_update).seconds > self.expiry_in_seconds,
"no timeout")
class EventType(Criteria):
def __init__(self, event_type):
super(EventType, self).__init__()
self.event_type = event_type
def should_fire(self, stream, last_event, debugger, now=None):
if not last_event:
return debugger.criteria_mismatch('no last event')
return debugger.check(last_event['event_type'] == self.event_type,
"wrong event type")
class And(Criteria):
def __init__(self, criteria_list):
super(And, self).__init__()
self.criteria_list = criteria_list
def should_fire(self, stream, last_event, debugger, now=None):
should = [c.should_fire(stream, last_event, debugger, now)
for c in self.criteria_list]
return debugger.check(all(should), "AND failed")
class EndOfDayExists(Criteria):
def __init__(self, exists_name):
super(EndOfDayExists, self).__init__()
self.exists_name = exists_name
def _is_zero_hour(self, tyme):
return tyme.time() == datetime.time.min
def should_fire(self, stream, last_event, debugger, now=None):
if not last_event:
stream.load_events() # Ouch ... expensive.
if len(stream.events) == 0:
return debugger.criteria_mismatch("No events")
last_event = stream.events[-1]
if last_event['event_type'] != self.exists_name:
return debugger.criteria_mismatch("Wrong event type")
payload = last_event['payload']
audit_start = payload.get('audit_period_beginning')
audit_end = payload.get('audit_period_ending')
if None in [audit_start, audit_end]:
return debugger.criteria_mismatch("No audit beginning/end")
audit_start = dateutil.parser.parse(audit_start)
audit_end = dateutil.parser.parse(audit_end)
return debugger.check(self._is_zero_hour(audit_start) and
self._is_zero_hour(audit_end),
"time != 00:00:00.0 ")
|
py | 1a3f12951a1b5c335a54d1b498cdc7fc934b8a98 | from common.func_plots import get_plot_pca
from common.func_plots import get_plot_line
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from datetime import datetime as ddtime
from scipy import signal
import datetime as dtime
import pmdarima as pm
import pandas as pd
import numpy as np
import math
__author__ = "Jose Fernando Montoya Cardona"
__credits__ = ["Jose Fernando Montoya Cardona"]
__email__ = "[email protected]"
def round_down_to_even(f):
return math.floor(f / 2.) * 2
def transform_data(df_data, date_i, date_f, get_only_dem=False):
cols_demh = ['d' + str(i).zfill(2) for i in range(1, 25)]
cols_df_t = ['fecha', 'codsbm'] + cols_demh
dft = df_data[cols_df_t]
cols_dft = [str(i).zfill(2) + ':00:00' for i in range(0, 24)]
dft.columns = ['fecha', 'codsbm'] + cols_dft
date_f = (ddtime.strptime(date_f, '%Y-%m-%d')+dtime.timedelta(days=1)).strftime('%Y-%m-%d')
dft = dft[(dft.fecha >= ddtime.strptime(date_i, '%Y-%m-%d')) & (dft.fecha < ddtime.strptime(date_f, '%Y-%m-%d'))]
dft = pd.melt(dft, id_vars=['fecha', 'codsbm'], var_name='hora', value_vars=cols_dft, value_name='demanda')
dft['fechahora'] = dft['fecha'].dt.strftime('%Y-%m-%d') + ' ' + dft['hora']
dft.fechahora = pd.to_datetime(dft.fechahora)
dft_sbm = pd.pivot_table(data=dft, values='demanda', columns='codsbm', index=dft.fechahora)
if get_only_dem:
dft = dft.groupby(by='fechahora')['demanda'].sum()
dft = dft.reset_index()
return dft, dft_sbm
def normalize_min_max(arr):
'''
Función que realiza la estandarización Z de las variables
Entrada: DataFrame --filas: muestras, columnas: características
Salida: DataFrame estandarizado por columnas, es decir por características
'''
arr = arr[~np.isnan(arr)].reshape(-1, 1)
minmax_scaler = MinMaxScaler()
df_norm = minmax_scaler.fit_transform(arr)
# df_norm = pd.DataFrame(df_norm, columns=df.columns)
return df_norm.reshape(-1)
def standar_z(df):
'''
Función que realiza la estandarización Z de las variables
Entrada: DataFrame --filas: muestras, columnas: características
Salida: DataFrame estandarizado por columnas, es decir por características
'''
standar_scaler = StandardScaler()
df_stand = standar_scaler.fit_transform(df)
df_stand = pd.DataFrame(df_stand, columns=df.columns, index=df.index)
return df_stand
def get_matrix_pca(matrix_features, exp_variance=0.9, show_plot=False, dynamic_component=True, n_comp=40):
pca = PCA(n_components=matrix_features.shape[1], svd_solver='full')
pca.fit(matrix_features)
ev = pd.DataFrame({'Explained_variance': pca.explained_variance_ratio_,
'Cum_explained_variance': np.cumsum(pca.explained_variance_ratio_),
'n_components': list(range(1, matrix_features.shape[1] + 1))
})
if dynamic_component:
n_components = ev[ev['Cum_explained_variance'] <= exp_variance]['n_components'].values[-1]
print('Getting PCA')
print('Número de componentes que explican el ', '{:.1f}'.format(exp_variance * 100), '% de la varianza: ',
n_components)
else:
n_components = n_comp
exp_var = ev[ev['n_components'] == n_components]['Cum_explained_variance'].values[0]
print('Getting PCA')
print('Con ', n_components, ' componentes se explica el ', '{:.1f}'.format(exp_var * 100), '% de la varianza')
pca_int = PCA(n_components=n_components)
m_pca = pca_int.fit_transform(matrix_features)
m_pca = pd.DataFrame(m_pca, columns=['PC_' + str(pca).zfill(2) for pca in range(1, n_components + 1)],
index=matrix_features.index)
if show_plot:
get_plot_pca(ev)
return m_pca
def group_dem_users_cluster(dem_data, m_features_labels):
df_labels_sbm = m_features_labels.reset_index()[['codsbm', 'labels']]
df_group = pd.merge(dem_data, df_labels_sbm, how='left', left_on='codsbm', right_on='codsbm')
df_group_label = df_group.groupby(by=['fechahora', 'labels'])['demanda'].sum().reset_index()
df_train_labels = pd.pivot_table(data=df_group_label, values='demanda', columns='labels',
index=df_group_label.fechahora)
return df_train_labels
def log_transform(s_dem_data):
s_log_data = pd.Series(np.log(s_dem_data))
return s_log_data
def get_period_signal_num_k(data, n_coeff_fourier=4):
f, pxx_den = signal.periodogram(data)
m_f = round_down_to_even(round(1 / f[list(pxx_den).index(max(pxx_den))], 0))
if m_f < n_coeff_fourier * 2:
k_f = round_down_to_even(m_f / 2)
else:
k_f = n_coeff_fourier
return m_f, k_f, f, pxx_den
def conditional_seasonal(seasonal, num_forecast, m, gap_pred):
if gap_pred + num_forecast > m:
gap_seasonal = list(seasonal)[gap_pred:m]
new_n_forecast = gap_pred + num_forecast - m
ratio = new_n_forecast / m
ent, res = int(str(ratio).split('.')[0]), int(round((ratio - int(str(ratio).split('.')[0])) * m, 0))
pred_seasonal = np.array(gap_seasonal + list(seasonal)[0:m] * ent + list(seasonal)[0:res])
return pred_seasonal
elif gap_pred + num_forecast <= m:
pred_seasonal = np.array(list(seasonal)[gap_pred:num_forecast+gap_pred])
return pred_seasonal
def second_conditional_seasonal(seasonal, num_forecast, m):
if num_forecast > m:
ratio = num_forecast / m
ent, res = int(str(ratio).split('.')[0]), int(round((ratio - int(str(ratio).split('.')[0])) * m, 0))
pred_seasonal = np.array(list(seasonal)[0:m] * ent + list(seasonal)[0:res])
return pred_seasonal
elif num_forecast <= m:
# pred_seasonal = np.array(list(seasonal)[int(m/2):num_forecast+int(m/2)])
pred_seasonal = np.array(list(seasonal)[0:num_forecast])
return pred_seasonal
def get_seasonal(seasonal, num_forecast, m, gap_pred):
print('seasonal_shape: ', seasonal.shape, 'period: ', m)
if gap_pred/m <= 1:
print('condition_gap/m < 1: ', gap_pred/m)
# pred_seasonal = conditional_seasonal(seasonal, num_forecast, m, gap_pred)
pred_seasonal = second_conditional_seasonal(seasonal, num_forecast, m)
else:
ratio_gap = gap_pred/m
new_gap_pred = int(round((ratio_gap - int(str(ratio_gap).split('.')[0])) * m, 0))
# pred_seasonal = conditional_seasonal(seasonal, num_forecast, m, new_gap_pred)
pred_seasonal = second_conditional_seasonal(seasonal, num_forecast, m)
return pred_seasonal
def decompose_series_forecast_seasonal(series, m, forecast_seasonal, gap_pred=0, num_forecast=24, type_decompose='additive', filter_decompose=None):
s_decompose = pm.arima.decompose(series, type_=type_decompose, m=m, filter_=filter_decompose)
get_plot_line(pd.DataFrame(s_decompose.seasonal))
seasonal = s_decompose.seasonal
pred_seasonal = get_seasonal(seasonal, num_forecast, m, gap_pred)
if type_decompose == 'additive':
forecast_seasonal += pred_seasonal
elif type_decompose == 'multiplicative':
forecast_seasonal = forecast_seasonal * pred_seasonal
trend = np.array(s_decompose.trend)[~np.isnan(np.array(s_decompose.trend))]
residual = s_decompose.random[~np.isnan(s_decompose.random)]
trend_residual = trend + residual
return trend_residual, forecast_seasonal
def decompose_series_search_periods(data, type_decompose='additive', num_decompose=1, filter_decompose=None, num_forecast=24):
threshold_power = 6
gap = 0
periods_decompose = []
if type_decompose == 'additive':
forecast_seasonal = np.zeros(num_forecast)
elif type_decompose == 'multiplicative':
forecast_seasonal = np.ones(num_forecast)
else:
raise ValueError('invalid variable type decompose {}.'.format(type_decompose))
for i in range(1, num_decompose+1):
len_data = len(data)
val_period, _, f, pxx_den = get_period_signal_num_k(data)
if val_period < len_data:
m = val_period
else:
periods = 1 / f[np.where(pxx_den >= max(pxx_den) / threshold_power)]
powers = pxx_den[np.where(pxx_den >= max(pxx_den) / threshold_power)]
if len(periods) > 1:
new_periods = periods[1:]
new_powers = powers[1:]
m = round_down_to_even(round(new_periods[list(new_powers).index(max(new_powers))], 0))
else:
m = val_period
if m < len_data:
periods_decompose.append(str(m))
data, forecast_seasonal = decompose_series_forecast_seasonal(series=data, m=m
, forecast_seasonal=forecast_seasonal
, num_forecast=num_forecast
, type_decompose=type_decompose
, filter_decompose=filter_decompose
, gap_pred=gap)
gap += int(m / 2)
else:
print('max_num_decompose_possible: ', i-1)
return forecast_seasonal, data, periods_decompose
return forecast_seasonal, data, periods_decompose
def decompose_series_with_periods(data, list_periods, type_decompose='additive', filter_decompose=None, num_forecast=24):
gap = 0
if type_decompose == 'additive':
forecast_seasonal = np.zeros(num_forecast)
elif type_decompose == 'multiplicative':
forecast_seasonal = np.ones(num_forecast)
else:
raise ValueError('invalid variable type decompose {}.'.format(type_decompose))
for m in list_periods:
m = int(m)
len_data = len(data)
if m < len_data:
data, forecast_seasonal = decompose_series_forecast_seasonal(data, m, forecast_seasonal
, num_forecast=num_forecast
, type_decompose=type_decompose
, gap_pred=gap)
gap += int(m / 2)
else:
raise ValueError('invalid period {} to decompose because length of signal is {}.'.format(m, len_data))
return forecast_seasonal, data, gap
|
py | 1a3f12f0866b1bf4fd2c2d0727f1b701853a7b86 | # ======================================================================
# Handheld Halting
# Advent of Code 2020 Day 08 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# i n s t r u c t i o n . p y
# ======================================================================
"A single instruction for the Advent of Code 2020 Day 08 puzzle"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
INSTRUCTIONS = ['nop', 'acc', 'jmp']
# ======================================================================
# Instruction
# ======================================================================
class Instruction(object): # pylint: disable=R0902, R0205
"Single Instruction Object for Handheld Halting"
def __init__(self, text=None, part2=False):
# 1. Set the initial values
self.part2 = part2
self.text = text
self.operation = None
self.argument = 0
self.executed = 0
# 2. Process text (if any)
if text is not None and len(text) > 0:
parts = text.split(' ')
assert len(parts) == 2
assert parts[0] in INSTRUCTIONS
self.operation = parts[0]
self.argument = int(parts[1])
def execute(self, pc, acc, verbose=False, limit=0):
"Execute a single instruction, Returns T/F, pc, acc"
# 1. Don't execute instruction if limit has been reached
if self.executed >= limit:
return False, pc, acc
# 2. Execute the instruction
if self.operation == 'nop':
pc += 1
elif self.operation == 'acc':
acc += self.argument
pc += 1
else:
pc += self.argument
# 3. Record our passage
self.executed += 1
# 4. Return the results
return True, pc, acc
def reset(self):
"Reset the number of times the instruction is executed"
self.executed = 0
def repair(self):
"Repair jmp or nop instructions"
if self.operation == 'nop':
self.operation = 'jmp'
return True
if self.operation == 'jmp':
self.operation = 'nop'
return True
return False
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end i n s t r u c t i o n . p y end
# ======================================================================
|
py | 1a3f130290a7586b59dd0e6d720721260aa49e74 | from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext.termscoring.BetaPosterior import BetaPosterior
from scattertext.test.test_termDocMatrixFactory import build_hamlet_jz_corpus
class TestBetaPosterior(TestCase):
def test_get_score_df(self):
corpus = build_hamlet_jz_corpus()
beta_posterior = BetaPosterior(corpus).set_categories('hamlet')
score_df = beta_posterior.get_score_df()
scores = beta_posterior.get_scores()
np.testing.assert_almost_equal(scores[:5], [-0.3194860824225506, 1.0294085051562822, 1.0294085051562822,
1.234664219528909, 1.0294085051562822])
def test_get_name(self):
corpus = build_hamlet_jz_corpus()
self.assertEqual(BetaPosterior(corpus).get_name(), 'Beta Posterior')
|
py | 1a3f133b3f6815ab348620996bfcadffb1a38546 | import os
files = os.listdir()
for f in files:
if (f[-8:] != "m_NN.csv"):
continue
infile = f.split('.')[0]
print(infile)
os.system("csvmidi \"{}.csv\" \"{}.midi\"".format(infile, infile))
|
py | 1a3f153b6496e72131bae8cb8f13d77c521410a0 | # -*- coding: utf-8 -*-
import os
import commands
import operator
from optparse import OptionParser
parser = OptionParser(usage="%prog -f FILE, FILE,... -o FILE -l FILE")
parser.add_option("-f", "--files", dest="files",help ="The classification files separated by commas")
parser.add_option("-o", "--out", dest="out",help ="The output file name")
parser.add_option("-i", "--fas", dest="fas",help ="The fasta file name")
parser.add_option("-a", "--attr", dest="attr",help ="The attibutes file PDB")
parser.add_option("-p", "--path", dest="path",help ="Path to programs")
(args, options) = parser.parse_args()
def main(files=args.files,output=args.out,fas=args.fas,attr=args.attr, pathToProg=args.path):
#We retrieve the names of the classification files
if ',' in files: files = files.split(',')
else: files = files.split()
diQueriesSeq, diNewFamily, param = {}, {}, []
diAttrib, diNumSeqAndIdSeq = getIdSeqNumSeqAndColrs(fas,attr)
fastaFileName = fas.replace('.fasta', '')
if os.path.exists(fastaFileName +'_rClassif/'): print commands.getoutput('rm -r '+ fastaFileName +'_rClassif/')
print commands.getoutput('mkdir '+ fastaFileName +'_rClassif/')
####################################################################################################
#We retrieve only the test sequences
for idSeq, comment in diAttrib.items():
if comment == 'black' :
diQueriesSeq[idSeq]=[]
for i in range(len(files)):
diQueriesSeq[idSeq].append([[], []])
#For each file we replace each space with a line break and then retrieve the parameters of the file
for ifile in files:
print commands.getoutput("cat "+ ifile +" | tr \' \' \'\n\' > "+ ifile +'bis')
print commands.getoutput("rm "+ ifile)
print commands.getoutput("mv "+ ifile +'bis '+ ifile)
#looking for the parameters
liste, index1 = [], 0
if "_" in ifile: liste = ifile.split("_")
elem = [ elt for elt in liste if "-classif" in elt ]
for elt in liste:
if "-classif" not in elt: index1 += len(elt) + 1
else: index2 = elt.find('-classif')
index2 += index1
param.append(ifile[index1:index2])
###################################################################################################
"""
Here, if there are several classification files that are submitted, we run through each file and then recence
the information provided. A sequence may be classified according to a classification file, and may not be
classified according to another file. It depends on the parameters used for the construction of these files.
The parameters are those used since the alignment step (paloma)
"""
diFile_concepts, counter = {}, 0
for ifile in files:
fileName, diBlocks, diTriFile, diClassement = ifile, {}, {}, {}
xfile = open(ifile, 'r')
if "/" in fileName:
chemin = fileName.split('/')[1]
else:
chemin = os.getcwd()
lines = xfile.read().split('Answer:')[-1]
for iSeq in diQueriesSeq: diClassement[iSeq] = []
#=========================================================================================
if 'Optimization' in lines: lines = lines.split('Optimization')[0]; print 'Optimisation...'
elif 'Models' in lines: lines = lines.split('Models')[0]; print 'Models...'
#=========================================================================================
bestclassified = list(filter(lambda line: 'bestclassified' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
classified = list(filter(lambda line: 'classified' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
bestambiguous = list(filter(lambda line: 'bestambiguous' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
ambiguous = list(filter(lambda line: 'ambiguous' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
unclassified = list(filter(lambda line: 'unclassified' in line.strip().split('('), lines.split()))
new_family = list(filter(lambda line: 'support_new_family' in line, lines.split()))
#=========================================================================================
for line in bestclassified:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('best classified')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 6
for line in classified:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('classified')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 5
for line in bestambiguous:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('best ambiguous')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 3
for line in ambiguous:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('ambiguous')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 2
for line in unclassified:
idSeq = (line.split('("')[1]).strip('")')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append('unclassified')
diQueriesSeq[idSeq][counter][1].append('')
diClassement[idSeq].append('unclassified')
diTriFile[idSeq] = 1
##################################################################################################
#Search for comcepts, associated blocks & associated sequences
members_new = list(filter(lambda line: 'membernew(' in line, lines.split()))
blocks_new = list(filter(lambda line: 'blocknew(' in line, lines.split()))
test_quality = ['best classified', 'classified', 'best ambiguous', 'ambiguous', 'unclassified']
diConcept = {}
for line in new_family:
numConcept, iBlocks, iSeqs, infosConcept = (line.split('(')[1]).split(',')[0], [], [], []
#The blocks members of the concept per file
blocks_of_concept = list(filter(lambda line: 'blocknew('+numConcept+',' in line,blocks_new))
for iline in blocks_of_concept:
numBlock = iline.split(',')[1].strip(')')
iBlocks.append(numBlock)
infosConcept.append(iBlocks)
#The sequences members of the concept per file
members_new_concept = list(filter(lambda line: ','+ numConcept +')' in line, members_new))
for iline in members_new_concept:
idSeq = iline.split('(')[1].split(',')[0].strip('"')
#If the sequence is among the queries sequences
if idSeq in diQueriesSeq:
iSeqs.append(idSeq)
diQueriesSeq[idSeq][counter][0].append('new('+ numConcept +')')
if len(diQueriesSeq[idSeq][counter][1]) == 0:
diClassement[idSeq].append('new('+ numConcept +')')
diTriFile[idSeq] = 4
infosConcept.append(iSeqs)
diConcept[numConcept] = infosConcept
diFile_concepts['File_'+str(counter+1)] = diConcept
##################################################################################################
#Here we find the exceptions seauences ('except') if they exist.
for idSeq in diQueriesSeq:
if len(diQueriesSeq[idSeq][counter][0]) == 0:
diQueriesSeq[idSeq][counter][0].append('except')
diClassement[idSeq].append('except')
diTriFile[idSeq] = 0
#Sorting the dictionary in descending order
diTriFile = sorted(diTriFile.iteritems(), reverse=True, key=operator.itemgetter(1))
if "/" in fileName:
outPutFile=open(fastaFileName+'_rClassif/'+fileName.split('/')[2].replace('classif-out.lp','res')+'.csv','w')
else:
outPutFile=open(fastaFileName+'_rClassif/'+fileName.replace('classif-out.lp','res')+'.csv','w')
outPutFile.write('File: '+fastaFileName+', param: '+ param[counter]+'\n\n\n')
outPutFile.write('sequences , subfamily , quality \n\n'.upper())
#Writing results for each input classification file
for i in range(len(diTriFile)):
idSeq = diTriFile[i][0]
outPutFile.write(idSeq+ ',')
for Class in list(set(diClassement[idSeq])) : outPutFile.write(Class + ' ')
outPutFile.write(','+ str(diTriFile[i][1]))
outPutFile.write('\n')
xfileName = chemin+"/"+fastaFileName+"_"+param[counter]+"_plma.dot"
diBlocks = getBlocks(xfileName)
seqAndBlocks = getSeqAndInvolvedInBlocks(diNumSeqAndIdSeq,diBlocks)
#Writing blocks
outPutFile.write('\n\n news families \n\n\n'.upper())
if diConcept != {}:
outPutFile.write("Concepts ,Members,Number of sequences,Number of blocks, interesting blocks\n")
for numConcept, conceptInfos in diConcept.iteritems():
if conceptInfos[1] !=[]:
outPutFile.write(numConcept + ', ,'+ str(len(conceptInfos[1]))
+','+ str(len(conceptInfos[0])) +'\n')
for seq in list(set(conceptInfos[1])):
suite_of_block = ''
for numBlock in list(set(conceptInfos[0])):
if numBlock in seqAndBlocks[seq].keys():
suite_of_block += seqAndBlocks[seq][numBlock]+' '
outPutFile.write(","+ seq +',,,'+ suite_of_block+ "\n")
outPutFile.write('\n')
outPutFile.close()
#Part Coloring PLMA by Families
colorClassify(fas, attr, fileName, diQueriesSeq, diClassement, diConcept, param, counter, pathToProg)
counter += 1
xfile.close()
"""
Writing step in the .csv file of the globals results, each sequence is written in the file with its status i.e
Classified, ambiguous, unclassified etc. The subfamily field indicates the family (s) in which it was classified.
"""
outPutFile = open(fastaFileName+'_rClassif/'+output[:len(output)-4]+'Global'+output[len(output)-4:], 'w')
outPutFile.write('File: '+fastaFileName+'\n\n\n')
outPutFile.write(' sequences , parameters , subfamily , quality \n\n'.upper())
for idSeq, infosSeq in diQueriesSeq.iteritems():
outPutFile.write(idSeq)
i = 0
for liste in infosSeq:
outPutFile.write(',' + param[i] + ',')
for Class in list(set(liste[0])) : outPutFile.write(Class + ' ')
if len(liste[1]) > 0:
outPutFile.write(',' + liste[1][0] + '\n')
else: outPutFile.write(', ' + '\n')
i +=1
outPutFile.write('\n')
#For the new family
outPutFile.write('\n\n news families \n\n\n'.upper())
for File, Concept in diFile_concepts.iteritems():
#=======================================================================================
numFile = File[File.find('_')+1:]
xfileName = chemin+"/"+fastaFileName+'_'+param[int(numFile)-1]+'_plma.dot'
diBlocks = getBlocks(xfileName)
seqAndBlocks = getSeqAndInvolvedInBlocks(diNumSeqAndIdSeq,diBlocks)
#=======================================================================================
if Concept != {}:
numFile = File[File.find('_')+1:]
outPutFile.write(File + ": param : " + param[int(numFile) - 1]
+ ",Concepts ,Members,Number of sequences,Number of blocks, interesting blocks\n")
for numConcept, conceptInfos in Concept.iteritems() :
if conceptInfos[1] !=[]:
outPutFile.write(','+ numConcept + ', ,'+ str(len(conceptInfos[1]))
+','+ str(len(conceptInfos[0])) +'\n')
for seq in conceptInfos[1]:
suite_of_block = ''
for numBlock in list(set(conceptInfos[0])):
if numBlock in seqAndBlocks[seq].keys():
suite_of_block +=seqAndBlocks[seq][numBlock]+' '
outPutFile.write(", ,"+ seq +',,,'+ suite_of_block+ "\n")
outPutFile.write('\n')
outPutFile.close()
#########################################################################################################
def getIdSeqNumSeqAndColrs(fas,attr):
"""
This function returns two dictionaries where one of them, the keys are the id of the sequences & the values are
the comments for each sequence. The other dictionary (diNumSeqAndIdSeq) its keys are the numbers of the sequences
in the PLMA file and the values are the identifiers of the corresponding sequences.
"""
with open(fas, 'r') as fFile:
fastaFile=fFile.readlines()
fFile.close()
with open(attr, 'r') as aFile:
attrFile=aFile.readlines()
aFile.close()
diQueriesSeq, diNumSeqAndIdSeq, numSeq = {}, {}, 0
for fLine in fastaFile:
if fLine[0] == '>':
numSeq += 1
if '|' in fLine:
idSeq = fLine.split('|')[1].strip()
else:
idSeq = fLine[1:].strip()
diQueriesSeq[idSeq] = ''
diNumSeqAndIdSeq[str(numSeq)] = idSeq
for aLine in attrFile:
if 'range=' in aLine and 'comments=' in aLine:
borneInf = int(aLine.split('"')[1].split('-')[0])
borneSup = int(aLine.split('"')[1].split('-')[1])
if (borneInf <= numSeq and numSeq <= borneSup):
diQueriesSeq[idSeq] = aLine.split('"')[5]
return diQueriesSeq, diNumSeqAndIdSeq
#################################################################################################
def getBlocks(dotFile):
"""
This function returns a dictionary of all the PLMA blocks contained in a dot file
"""
with open(dotFile, 'r') as fd:
dotfile = fd.readlines()
subClustersDico = {}
concatDotFile = reduce(lambda line1, line2: line1.strip()+line2.strip(), dotfile)
subClusters = concatDotFile.split('subgraph cluster_')
for subCluster in subClusters[3:]:
subClusterTemp = subCluster.split('{')[1].split('"];')[:-1]
tmp = subClusterTemp[0].strip().split(';')[2]
subClusterTemp[0] = tmp
subClustersDico[subCluster.split('{')[0]] = subClusterTemp
lastSubCluster = subClusters[len(subClusters)-1:]
lastSubClusterTemp = lastSubCluster[0].split('{')[1].split('}')[0].split('"];')[:-1]
tmp = lastSubClusterTemp[0].strip().split(';')[2]
lastSubClusterTemp[0] = tmp
subClustersDico[lastSubCluster[0].split('{')[0]] = lastSubClusterTemp
return subClustersDico
#################################################################################################
def getSeqAndInvolvedInBlocks(diNumSeq, diBlocks):
diSeqBlocks = {}
for numSeq, idSeq in diNumSeq.items():
dico = {}
for numblock, valueBlock in diBlocks.items():
for line in valueBlock:
if '"('+numSeq+', ' in line:
dico[numblock] = line.split('label = "')[1]
diSeqBlocks[idSeq] = dico
return diSeqBlocks
##################################################################################################
def getNumSeqAndColrs(attribFile):
"""
This function will make it possible to recover the sequence numbers and the color of their families
"""
attributs = open(attribFile,'r')
dico = {}
for line in attributs.readlines():
if 'range=' in line:
ranger = line.split('"')[1]
borneInf, borneSup = int(ranger.split('-')[0]), int(ranger.split('-')[1])
color = line.split('"')[3]
if borneInf > borneSup:
error = "In the range section, the '-' has to find "
error += "between two numbers, and the first number "
error += "has to be smaller than the second one!"
printError(error)
elif borneInf == borneSup:
numSeq = borneInf
dico[str(numSeq)] = color
else:
for numSeq in range(borneInf, borneSup+1):
dico[str(numSeq)] = color
attributs.close()
return dico
#################################################################################################
def colorClassify(fas, attr, fileName, diQueriesSeq, diClassement, diConcept, param, counter, pathToProg):
fastaFileName = fastaFileName = fas.replace('.fasta', '')
plma_seq1, plma_seq2 = getIdSeqNumSeqAndColrs(fas, attr)
known_family = [family for family in list(set(plma_seq1.values())) if family != 'black']
plma_seq3 = getNumSeqAndColrs(attr)
colorNewFamily = "burlywood"
colorAmbiguous = "olive"
colorUnclassified = "black"
diColor_of_family ={}
for family in known_family:
colors = []
for numSeq in plma_seq3:
if plma_seq1[plma_seq2[numSeq]] == family.upper():
colors.append(plma_seq3[numSeq])
diColor_of_family[family] = list(set(colors))
colored_seq_by_family = {}
for numSeq in plma_seq3:
if plma_seq1[plma_seq2[numSeq]] != colorUnclassified:
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(plma_seq3[numSeq])
plma_seq2_temp = dict([[v,k] for v,k in plma_seq2.items()])
#Inverting a dictionary
invert_dict = dict([[v,k] for k,v in plma_seq2.items()])
plma_seq2 = invert_dict
for idSeq in plma_seq1:
if idSeq in diClassement:
numSeq = plma_seq2[idSeq]
colored_seq_by_family[numSeq] = []
for family, color_of_family in diColor_of_family.items():
if family.lower() in diClassement[idSeq]:
colored_seq_by_family[numSeq].append(color_of_family[0])
colored_seq_by_family_tmp = dict([[cle,val] for cle,val in colored_seq_by_family.items()])
#Give the color "colorNewFamily" for news families
for idSeq in diClassement:
for elem in diClassement[idSeq]:
if "new" in elem:
numSeq = plma_seq2[idSeq]
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(colorNewFamily)
#Give the color "colorAmbiguous" for ambiguous
for numSeq, list_color in colored_seq_by_family.items():
if len(list_color) > 1:
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(colorAmbiguous)
#pools of family
diFamily_by_colors = {}
list_tmp = [ elem[0] for elem in colored_seq_by_family.values() if elem != [] ]
if colorNewFamily in set(list_tmp):
diColor_of_family["new"] = [colorNewFamily]
#Reverse of the dictionary of families and their colors
invert_dict = dict([[v[0].lower(),k] for k,v in diColor_of_family.items()])
diColor_family = invert_dict
#A dictionary is created that contains the colors of the families and all the
#sequences belonging to families
for color_of_family in diColor_of_family.values():
NumSeqs = []
for numSeq, colorSeq in colored_seq_by_family.items():
if colorSeq != [] and colorSeq[0] == color_of_family[0]:
NumSeqs.append(numSeq)
diFamily_by_colors[color_of_family[0]] = NumSeqs
#Other unclassified sequences
unclassified_seqs, list_tmp2 = [], []
list_tmp1 = [ elem for elem in diFamily_by_colors.values()]
for liste in list_tmp1:
for elem in liste:
list_tmp2.append(elem)
list_tmp2 = list(set(list_tmp2))
for numSeq in plma_seq3:
if numSeq not in list_tmp2:
unclassified_seqs.append(numSeq)
#Looking for ambiguous sequences
ambiguous, reste_seqs, diClass = {}, {}, {}
for numSeq, tColor in colored_seq_by_family.items():
if numSeq in unclassified_seqs and tColor != []:
color = tColor[0]
ambiguous[numSeq] = color
elif numSeq in unclassified_seqs:
reste_seqs[numSeq] = colorUnclassified
for numSeq in unclassified_seqs:
color = colored_seq_by_family_tmp[numSeq]
if color != []: color = colored_seq_by_family_tmp[numSeq][0].lower()
else: color = ""
if color != "":
if numSeq in colored_seq_by_family_tmp:
classes = diColor_family[color]
for color in colored_seq_by_family_tmp[numSeq][1:]:
classes += ", " + diColor_family[color.lower()]
diClass[numSeq] = classes
#==================================================================================================================
#==================================================================================================================
dotInFile = "./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"_plma.dot"
dotOutFile = "./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.dot"
#==================================================================================================================
#==================================================================================================================
dic_blocks = {}
lines = open(fileName, "r").readlines()
#Looking for the characteristic blocks for each family
for Class in diColor_of_family:
blocks_support = list(filter(lambda line: 'characteristic_block' in line and Class.lower() in line, lines))
blocks = []
for line in blocks_support:
block = line.split(",")[2].split(")")[0]
blocks.append(block)
dic_blocks[Class] = list(set(blocks))
diChar_blocks = {}
for Class, blocks in dic_blocks.items():
for block in blocks:
diChar_blocks[block] = Class
####################################################################################################################
#Creating of a dictionary that contains all the clusters of the plmadot
dotFile = open(dotInFile, "r").readlines()
subClustersDico, colorsSeq = {}, {}
concatDotFile = reduce(lambda line1, line2: line1.strip()+line2.strip(), dotFile)
subClusters = concatDotFile.split('subgraph cluster_')
for subCluster in subClusters[1:]:
subClusterTemp = subCluster.split('{')[1].split('"];')[:-1]
tmp = subClusterTemp[0].strip().split(';')[2]
subClusterTemp[0] = tmp
subClustersDico[subCluster.split('{')[0]] = subClusterTemp
lastSubCluster = subClusters[len(subClusters)-1:]
lastSubClusterTemp = lastSubCluster[0].split('{')[1].split('}')[0].split('"];')[:-1]
tmp = lastSubClusterTemp[0].strip().split(';')[2]
lastSubClusterTemp[0] = tmp
subClustersDico[lastSubCluster[0].split('{')[0]] = lastSubClusterTemp
infoSeqs = lastSubCluster[0].split('{')[1].split('}')[1].split('];')[:-1]
#===================================================================================================================
#===================================================================================================================
#The Input plmadot file
inputFile = open(dotInFile, "r")
#The output plmadot file
outputFile = open(dotOutFile, "w")
lines = inputFile.readlines()
for index, elem in enumerate(lines):
if "subgraph" in elem:
if elem.strip() == "subgraph cluster_1":
index1 = index
if elem.strip() == "subgraph cluster_2":
index2 = index
if elem.strip() == "subgraph cluster_3":
index3 = index
head = lines[:index1]
cluster1 = lines[index1:index2]
cluster2 = lines[index2:index3]
#The sequences numbers and their labels
diCluster1_tmp = {}
for line in cluster1:
if 'label' in line:
numSeq = line.split(",")[0].split('(')[1]
label = line.split(')"')[1]
diCluster1_tmp[numSeq] = label
diCluster2_tmp = {}
for line in cluster2:
if 'label' in line:
numSeq = line.split(",")[0].split('(')[1]
diCluster2_tmp[numSeq] = line
#===================================================================================================================
#===================================================================================================================
#The head of the dot is written
for line in head:
outputFile.write(line)
#===================================================================================================================
#===================================================================================================================
#Part for cluster 1
for line in cluster1:
if "cluster" in line:
outputFile.write(line)
outputFile.write("{\n")
elif "node" in line:
colorSeq = line.split('color =')[1].strip().split(',')[0]
line = line.replace(colorSeq.strip(), "black")
outputFile.write(line)
elif "style" in line:
style_of_cluster = line.split("style =")[1].split(";")[0]
line = line.replace(style_of_cluster.strip(), "filled")
outputFile.write(line)
#Writing for the sub-families (cluster 1)
i = 1
allNewBlocks = []
for color, NumSeqs in diFamily_by_colors.items():
if color != colorNewFamily:
outputFile.write("subgraph cluster_" + str(i) +"p1 \n")
outputFile.write("{\n")
outputFile.write("label = \"Family: "+ diColor_family[color.lower()] +"\nNumber: "+ str(i) +"\";\n")
outputFile.write("node [shape = record, color = black, fontcolor = black];\n")
for numSeq in NumSeqs:
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = diCluster1_tmp[numSeq].replace("\"];", " [**]\"];")
outputFile.write('"('+numSeq+', 1, 0)"' + line)
else: outputFile.write('"('+numSeq+', 1, 0)"' + diCluster1_tmp[numSeq])
outputFile.write('}\n')
i += 1
#Case for pools of new families (if there are several)
else:
i = 1
for concept, infosConcept in diConcept.iteritems():
outputFile.write("subgraph cluster_new" + str(i) +" \n")
outputFile.write("{\n")
outputFile.write("label = \"Family: "+ diColor_family[color.lower()] + "\nNumber: "+ str(i)
+"\";\n")
outputFile.write("node [shape = record, color = black, fontcolor = black];\n")
for idSeq in infosConcept[1]:
numSeq = plma_seq2[idSeq]
if idSeq in diQueriesSeq:
line = diCluster1_tmp[numSeq].replace("\"];", " [**]\"];")
outputFile.write('"('+numSeq+', 1, 0)"' + line)
else: outputFile.write('"('+numSeq+', 1, 0)"' + diCluster1_tmp[numSeq])
outputFile.write('}\n')
allNewBlocks += list(set(infosConcept[0]))
i += 1
#We add the characteristic blocks of the new families
for bloc in allNewBlocks:
diChar_blocks[bloc] = "new"
#The rest of the sequences (cluster 1)
for line in cluster1:
if 'label' in line: numSeq = line.split(",")[0].split('(')[1]
if numSeq in unclassified_seqs:
color = colored_seq_by_family_tmp[numSeq]
if color != []:
color = colored_seq_by_family_tmp[numSeq][0].lower()
else: color = ""
if color != "":
if numSeq in colored_seq_by_family_tmp:
classes = diColor_family[color]
for color in colored_seq_by_family_tmp[numSeq][1:]:
classes += ", " + diColor_family[color.lower()]
line = line.replace(numSeq+ ':', "[" + classes.upper() +"] "+ numSeq+":")
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = line.replace("\"];", " [**]\"];")
outputFile.write(line)
else:
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = line.replace("\"];", " [**]\"];")
outputFile.write(line)
outputFile.write("}\n")
#=================================================================================================================
#=================================================================================================================
#Part for cluster2
for line in cluster2:
if "cluster" in line:
outputFile.write(line)
outputFile.write("{\n")
elif "node" in line:
colorSeq = line.split('color =')[1].strip().split(',')[0]
line = line.replace(colorSeq.strip(), "black")
outputFile.write(line)
elif "style" in line:
style_of_cluster = line.split("style =")[1].split(";")[0]
line = line.replace(style_of_cluster.strip(), "filled")
outputFile.write(line)
outputFile.write("fontcolor = gray;\n")
#Writing for the sub-families (cluster 2)
i = 1
for color, NumSeqs in diFamily_by_colors.items():
if color != colorNewFamily:
outputFile.write("subgraph cluster_" + str(i) +"p2 \n")
outputFile.write("{\n")
outputFile.write("node [shape = record,style = filled, color = "+color.lower()
+", fontcolor = black];\n")
outputFile.write("color = "+color.lower()+";\n")
for numSeq in NumSeqs:
outputFile.write(diCluster2_tmp[numSeq])
outputFile.write('}\n')
i += 1
else:
i = 1
for concept, infosConcept in diConcept.iteritems():
outputFile.write("subgraph cluster_new" + str(i) +"\n")
outputFile.write("{\n")
outputFile.write("node [shape = record,style = filled, color = "+color.lower()
+", fontcolor = black];\n")
outputFile.write("color = "+color.lower()+";\n")
for idSeq in infosConcept[1]:
numSeq = plma_seq2[idSeq]
outputFile.write(diCluster2_tmp[numSeq])
outputFile.write('}\n')
i += 1
#The rest of the sequences (cluster 2)
for line in cluster2:
if 'label' in line: numSeq = line.split(",")[0].split('(')[1]
if numSeq in unclassified_seqs: outputFile.write(line)
outputFile.write("}\n")
#=================================================================================================================
#=================================================================================================================
#Part for the rest of the clusters (PLMA blocks)
for numCluster, cluster in subClustersDico.items():
if numCluster in diChar_blocks:
outputFile.write("subgraph cluster_"+numCluster+"\n{\n")
outputFile.write("node [shape = record, style = filled, color = yellow, fontcolor = black];\n")
outputFile.write("color = "+diColor_of_family[diChar_blocks[numCluster]][0].lower()+";\n")
for line in cluster:
numSeq = line.split(",")[0].split("(")[1]
outputFile.write(line + "\"];\n")
outputFile.write("}\n")
elif numCluster not in ["1","2"]:
outputFile.write("subgraph cluster_"+numCluster+"\n{\n")
outputFile.write("node [shape = record, style = filled, color = yellow, fontcolor = black];\n")
outputFile.write("color = black;\n")
for line in cluster:
outputFile.write(line+"\"];\n")
outputFile.write("}\n")
#Part for arrows
for line in infoSeqs:
if '->' in line:
numSeqTemp, numSeq = line.split('label = ')[1], ''
if ':' in line:
numSeq = numSeqTemp.split(':')[0].strip('"')
else:
numSeq = numSeqTemp.split(',')[0]
colorSeq = line.split(', color =')[1].strip().split(',')[0]
if numSeq in ambiguous:
line = line.replace("fontsize = 8","fontsize = 15")
line = line.replace("label = " + numSeq+ ',', "label = "+ numSeq +"("+ diClass[numSeq].upper()+")\"")
line = line.replace(colorSeq.strip(), ambiguous[numSeq].lower())
elif numSeq in reste_seqs:
color = plma_seq3[numSeq].lower()
if color != colorUnclassified:
classe = diColor_family[color]
line = line.replace("label = "+ numSeq+ ',', "label = \""+ numSeq+"("+ classe.upper() +")\"")
line = line.replace("fontsize = 8","fontsize = 15")
line = line.replace(colorSeq.strip(), "black")
elif numSeq in colored_seq_by_family:
if numSeq in colored_seq_by_family_tmp and colored_seq_by_family_tmp[numSeq] != []:
color = plma_seq3[numSeq].lower()
line = line.replace("fontsize = 8","fontsize = 15")
if color != colorUnclassified:
classe = diColor_family[color]
line = line.replace("label = "+numSeq+ ',',"label = \""+ numSeq+" ("+ classe.upper() +")\"")
else:
line = line.replace("label = "+numSeq+ ',',"label = \""+ numSeq+" (?)\"")
elif colored_seq_by_family_tmp[numSeq] == []:
color = colored_seq_by_family[numSeq][0]
line = line.replace("fontsize = 8","fontsize = 15")
classe = diColor_family[color]
line = line.replace("label = "+numSeq+ ',',"label = \"" + numSeq+" (?)\"")
line = line.replace(colorSeq.strip(), colored_seq_by_family[numSeq][0].lower())
outputFile.write(line+"];\n")
outputFile.write("}\n")
inputFile.close()
outputFile.close()
#================================================================================================================
#================================================================================================================
#Converting the product dot file to pdf format
print commands.getoutput("python ./"+ pathToProg +"/plmadot2pdf.py -f ./"+fastaFileName+"_paloma/"+fastaFileName+"_"
+ param[counter] +"-col.dot")
print commands.getoutput("rm "+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.ps")
print commands.getoutput("mv ./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.pdf ./"
+fastaFileName+"_rClassif")
#main
if __name__ == '__main__':
main()
|
py | 1a3f155ad550ec5d577354f3af17976cd349bd09 | #!/usr/bin/env python
import os
import re
#Definitions
def run(files=None,verbose=True,overwrite=None,output=None,macros={},build='',compile_string=''):
l=create_file_objs(files,macros)
mod2fil=file_objs_to_mod_dict(file_objs=l)
depends=get_depends(fob=l,m2f=mod2fil)
if verbose:
for i in depends.keys():
print("\033[032m"+i+"\033[039m depends on :\033[034m")
for j in depends[i]: print("\t"+j)
print("\033[039m")
if output is None:
output = "makefile.dep"
tmp=write_depend(outfile=output,dep=depends,overwrite=overwrite,build=build,compile_string=compile_string)
return depends
def write_depend(outfile="makefile.dep",dep=[],overwrite=False,build='',compile_string=''):
"Write the dependencies to outfile"
#Test file doesn't exist
if os.path.exists(outfile):
if not(overwrite):
print("\033[031mWarning file exists.\033[039m")
opt=raw_input("Overwrite? Y... for yes.")
else:
opt="y"
if opt.lower().startswith("y"):
pass
else:
return
#Open file
f=open(outfile,'w')
f.write('# This file is generated automatically. DO NOT EDIT!\n')
for i in dep.keys():
fil,_=os.path.splitext(i)
# make each object file depend on it's source file
stri="\n"+os.path.join(build, fil+".o"+" : src/defines.inc src/third_party/mersenne_twister.o "+i)
for j in dep[i]:
fdep,_=os.path.splitext(j)
stri=stri+" \\\n\t"+os.path.join(build, fdep+".o")
stri=stri+"\n"
if compile_string:
stri=stri+"\t"+compile_string+" "+i
stri=stri+" -o "+os.path.join(build, fil+".o")
f.write(stri)
f.close()
return
def get_source(ext=[".f90",".F90"]):
"Return all files ending with any of ext"
tmp=os.listdir(".")
fil=[]
for i in ext:
fil.extend(filter(lambda x: x.endswith(i),tmp))
return fil
def create_file_objs(files=None, macros={}):
l=[]
if files is None:
files = get_source()
for i in files:
source_file = file_obj()
source_file.file_name = i
source_file.uses = get_uses(i,macros)
source_file.contains = get_contains(i)
l.append(source_file)
return l
def get_uses(infile=None, macros={}):
"Return which modules are used in infile after expanding macros"
p=re.compile("^\s*use\s*(?P<moduse>\w*)\s*(,)?\s*(only)?\s*(:)?.*?$",re.IGNORECASE).match
intrinsic = re.compile("^\s*use\s*(,)?\s*(only)?\s*,\s*intrinsic.*$",re.IGNORECASE).match
uses=[]
with open(infile,'r') as f:
t=f.readlines()
for i in t:
tmp=p(i)
if tmp and not intrinsic(i):
uses.append(tmp.group('moduse').strip())
# Remove duplicates
uniq_mods = list(set(uses))
for i, mod in enumerate(uniq_mods):
for k, v in macros.items():
if re.match(k, mod, re.IGNORECASE):
uniq_mods[i] = mod.replace(k,v)
return uniq_mods
def get_contains(infile=None):
"Return all the modules that are in infile"
p=re.compile("^\s*module\s*(?P<modname>\w*)",re.IGNORECASE).match
contains=[]
with open(infile,'r') as f:
t=f.readlines()
for i in t:
tmp=p(i)
if tmp:
contains.append(tmp.group('modname').strip())
# Remove duplicates before returning
return list(set(contains))
def file_objs_to_mod_dict(file_objs=[]):
"Turn a list of file_objs in a dictionary, containing which modules depend on which files"
dic={}
for i in file_objs:
for j in i.contains:
dic[j.lower()]=i.file_name
return dic
def get_depends(fob=[],m2f=[]):
deps={}
for i in fob:
tmp=[]
for j in i.uses:
try:
tmp.append(m2f[j.lower()])
except:
print("\033[031mWarning:\033[039m module \033[032m"+j+"\033[039m used in "+i.file_name+" not defined in any files. Skipping...")
deps[i.file_name]=tmp
return deps
class file_obj:
def __init__(self):
self.file_name=None
self.uses=None
self.contains=None
self.depends_on=None
#Script
if __name__ == "__main__":
import argparse
# Add command line arguments
parser = argparse.ArgumentParser(description='Generate Fortran dependencies')
parser.add_argument('-f','--files',nargs='+',help='Files to process')
parser.add_argument('-D',nargs='+',action='append',metavar='NAME=DESCRIPTION',
help="""The macro NAME is replaced by DEFINITION in 'use' statements""")
parser.add_argument('-b','--build',nargs=1,help='Build Directory (prepended to all files in output',
default='')
parser.add_argument('-o','--output',nargs=1,help='Output file')
parser.add_argument('-v','--verbose',action='store_true',help='explain what is done')
parser.add_argument('-w','--overwrite',action='store_true',help='Overwrite output file without warning')
parser.add_argument('-c','--compile-string',nargs=1,help='String to compile with')
# Parse the command line arguments
args = parser.parse_args()
# Assemble a dictionary out of the macro definitions
macros = {}
if args.D:
for arg in args.D:
for var in arg:
temp = var.split('=')
macros[temp[0]] = temp[1]
output = args.output[0] if args.output else None
build = args.build[0] if args.build else ''
compile_string = args.compile_string[0] if args.compile_string else ''
run(files=args.files, verbose=args.verbose, overwrite=args.overwrite,
macros=macros, output=output, build=build,
compile_string=compile_string)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.