ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df808f80823e9a9117d18a574c0ce73cf534bd4 | import logging
from unittest import mock
from unittest.mock import MagicMock
from django.test import TestCase
from django.utils import timezone
from eth_account import Account
from gnosis.eth.ethereum_client import (
EthereumClient,
EthereumClientProvider,
EthereumNetwork,
)
from ...history.tests.utils import just_test_if_mainnet_node
from ...utils.redis import get_redis
from ..services import PriceService, PriceServiceProvider
from ..tasks import (
EthValueWithTimestamp,
calculate_token_eth_price_task,
fix_pool_tokens_task,
)
logger = logging.getLogger(__name__)
class TestTasks(TestCase):
def setUp(self) -> None:
PriceServiceProvider.del_singleton()
get_redis().flushall()
def tearDown(self) -> None:
get_redis().flushall()
@mock.patch(
"safe_transaction_service.tokens.tasks.get_ethereum_network",
return_value=EthereumNetwork.MAINNET,
)
def test_fix_pool_tokens_task(self, get_network_mock: MagicMock):
self.assertEqual(fix_pool_tokens_task.delay().result, 0)
get_network_mock.return_value = EthereumNetwork.RINKEBY
self.assertIsNone(fix_pool_tokens_task.delay().result)
@mock.patch.object(
PriceService, "get_token_eth_value", autospec=True, return_value=4815
)
@mock.patch.object(timezone, "now", return_value=timezone.now())
def test_calculate_token_eth_price_task(
self, timezone_now_mock: MagicMock, get_token_eth_value_mock: MagicMock
):
random_token_address = Account.create().address
random_redis_key = Account.create().address
expected = EthValueWithTimestamp(
get_token_eth_value_mock.return_value, timezone_now_mock.return_value
)
self.assertEqual(
calculate_token_eth_price_task.delay(
random_token_address, random_redis_key
).result,
expected,
)
# Check caching works even if we change the token_address
another_token_address = Account.create().address
self.assertEqual(
calculate_token_eth_price_task.delay(
another_token_address, random_redis_key
).result,
expected,
)
with self.settings(CELERY_ALWAYS_EAGER=False):
random_token_address = Account.create().address
random_redis_key = Account.create().address
calculate_token_eth_price_task.delay(random_token_address, random_redis_key)
def test_calculate_token_eth_price_task_without_mock(self):
mainnet_node_url = just_test_if_mainnet_node()
EthereumClientProvider.instance = EthereumClient(mainnet_node_url)
dai_address = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
dai_address, random_redis_key
)
self.assertGreater(eth_value_with_timestamp.eth_value, 0.0)
pool_together_address = "0x334cBb5858417Aee161B53Ee0D5349cCF54514CF"
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
pool_together_address, random_redis_key
)
self.assertGreater(eth_value_with_timestamp.eth_value, 0.0)
random_token_address = Account.create().address
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
random_token_address, random_redis_key
)
self.assertEqual(eth_value_with_timestamp.eth_value, 0.0)
del EthereumClientProvider.instance
|
py | 7df80a1901d396aff9b92112d89eabfa45fca959 | # Copyright 2017 Palantir Technologies, Inc.
import logging
from pyls import hookimpl, uris
log = logging.getLogger(__name__)
@hookimpl
def pyls_definitions(config, document, position):
params = {k: v for k, v in config.plugin_settings('jedi_definition').items() if v is not None}
definitions = document.jedi_script(position).goto_assignments(**params)
definitions = [
d for d in definitions
if d.is_definition() and d.line is not None and d.column is not None and d.module_path is not None
]
return [{
'uri': uris.uri_with(document.uri, path=d.module_path),
'range': {
'start': {'line': d.line - 1, 'character': d.column},
'end': {'line': d.line - 1, 'character': d.column + len(d.name)}
}
} for d in definitions]
|
py | 7df80b5b0095e0178986a8bf5dc07a70ef4c832c | #!/usr/bin/env python3
import psutil
import datetime
import time
while(1):
for p in psutil.process_iter():
try:
cmd = " ".join(p.cmdline())
# if("sudo -S" in cmd or "id -u" in cmd or "grep root" in cmd or "grep win" in cmd):
# if("nginx -c" in cmd or "nginx -s stop" in cmd):
if("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p" in cmd or "sshpass -p" in cmd or
"sshd -f /etc/ssh/sshd_test_config" in cmd or "sleep 1" in cmd):
age = datetime.datetime.now() - datetime.datetime.fromtimestamp(p.create_time())
if(age.seconds > 6):
try:
print(cmd + " " + str(p.pid) + " " + str(age.seconds))
except UnicodeEncodeError:
continue
p.kill()
except psutil.NoSuchProcess:
continue
time.sleep(2)
|
py | 7df80b7751811a7a2c8133c67da585974e23be46 | from typing import List
import dgl
import torch
from PIL import Image
from dgl.nn.pytorch import GATConv
from torch import nn
from torchnlp.word_to_vector.glove import GloVe
from torchvision.models import resnet18
from torchvision.transforms import transforms
import constants
from constants import DATA_DIR, DEVICE, DUMP_DIR, GAT_HEAD, GAT_LAYER, GAT_INTER_DIM, ACT_FN, RESIDUAL
class QueryEncoderExpand(nn.Module):
"""
Weighted average for node initialization.
"""
EMPTY_IMAGE = torch.zeros(3, 64, 64)
transform = transforms.Compose([
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
def __init__(self, raw_data):
super().__init__()
self.raw_data = raw_data
self.vocab = self.raw_data.dialog_vocab
self.vocab_size = len(self.vocab)
self.images = raw_data.images
pretrained_embedding = self.get_pretrained_embedding()
self.word_embed = nn.Embedding(self.vocab_size, 300).from_pretrained(pretrained_embedding).to(DEVICE)
self.resnet = nn.Sequential(*list(resnet18(pretrained=True).children())[:-2]).to(DEVICE)
# self.image_fc = nn.Linear(2048, 300).to(DEVICE)
self.image_fc = nn.Linear(2048, 1024)
self.text_fc = nn.Linear(300, 1024)
self.sen_embed_weight = nn.Linear(300, 1)
self.utt_embed_weight = nn.Linear(300, 1)
self.dia_embed_weight = nn.Linear(300, 1)
self.apply(self._init_weights)
# The GATConv has its own initialization method.
self.gat = nn.ModuleList()
act_fn = {
"elu": torch.nn.ELU(),
"gelu": torch.nn.GELU(),
}[ACT_FN]
for i in range(GAT_LAYER):
if i == 0:
self.gat.append(GATConv(in_feats=1024, out_feats=GAT_INTER_DIM, num_heads=GAT_HEAD,
feat_drop=constants.GAT_FEAT_DROPOUT, attn_drop=constants.GAT_ATT_DROPOUT,
residual=RESIDUAL, activation=act_fn))
elif i < GAT_LAYER - 1:
self.gat.append(GATConv(in_feats=GAT_HEAD * GAT_INTER_DIM, out_feats=GAT_INTER_DIM,
feat_drop=constants.GAT_FEAT_DROPOUT, attn_drop=constants.GAT_ATT_DROPOUT,
num_heads=GAT_HEAD, residual=RESIDUAL, activation=act_fn))
else:
self.gat.append(GATConv(in_feats=GAT_HEAD * GAT_INTER_DIM, out_feats=512,
feat_drop=constants.GAT_FEAT_DROPOUT, attn_drop=constants.GAT_ATT_DROPOUT,
num_heads=1, residual=RESIDUAL, activation=None))
print(f"query encoder using DGL {self.__class__.__name__} is loaded.")
print(f"query encoder parameters: layer: {GAT_LAYER}\tinter_dim: {GAT_INTER_DIM}\thead_num: {GAT_HEAD}\t"
f"feature_dropout: {constants.GAT_FEAT_DROPOUT}\tatt_dropout: {constants.GAT_ATT_DROPOUT}\t"
f"act_fn: {ACT_FN}, residual: {RESIDUAL}")
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_pretrained_embedding(self):
pretrained_embedding = GloVe(is_include=lambda x: x in self.vocab, cache=str(DUMP_DIR / '.word_vectors_cache'))
embedding_weights = torch.zeros((self.vocab_size, 300))
for i, token in enumerate(self.vocab):
embedding_weights[i] = pretrained_embedding[token]
return embedding_weights
def forward(self, graph: dgl.DGLGraph, graph_word_num: List[int], graph_image_num: List[int],
all_words: torch.Tensor, all_images: List[int],
sentences: torch.Tensor, sentence_mask: torch.Tensor,
utterances: torch.Tensor, utterance_mask: torch.Tensor, session_ids: torch.Tensor):
all_words = all_words.to(DEVICE)
sentences = sentences.to(DEVICE)
sentence_mask = sentence_mask.to(DEVICE)
utterances = utterances.to(DEVICE)
utterance_mask = utterance_mask.to(DEVICE)
session_ids = session_ids.to(DEVICE)
graph = graph.to(DEVICE)
all_word_embedding = self.word_embed(all_words)
num_images = len(all_images)
image_embeddings = []
if num_images:
for image_id in all_images:
image_path = DATA_DIR / 'images' / self.images[image_id]
if image_id != 0 and image_path.is_file():
try:
image = Image.open(image_path).convert("RGB")
image = QueryEncoderExpand.transform(image)
except OSError:
image = QueryEncoderExpand.EMPTY_IMAGE
else:
image = QueryEncoderExpand.EMPTY_IMAGE
image_embeddings.append(image)
image_embeddings = torch.stack(image_embeddings).to(DEVICE)
image_embeddings = self.resnet(image_embeddings).view(num_images, -1)
image_embeddings = self.image_fc(image_embeddings)
batch, sent_num, word_num = sentences.size()
sentence_word_embed = self.word_embed(sentences.view(-1, word_num)).reshape(batch, sent_num, word_num, -1)
# sentence_word_embed = (sentence_word_embed * sentence_mask[:, :, :, None]).sum(dim=2)
valid_word_num = sentence_mask.sum(dim=2, keepdim=True) # (batch, sent_num, 1)
valid_sent_num = (valid_word_num.squeeze(-1) > 0).sum(dim=1) # (batch)
# valid_word_num[valid_word_num == 0] = 1.
# sentence_word_embed = sentence_word_embed / valid_word_num
sentence_word_weight = (self.sen_embed_weight(sentence_word_embed).squeeze(-1) + (1 - sentence_mask) * -10000.0).softmax(dim=-1)
sentence_word_embed = torch.einsum("bsw,bswh->bsh", sentence_word_weight, sentence_word_embed)
# (batch, utt_num=2, sent_num)
utt_num, utt_sent_num = utterances.size(1), utterances.size(2)
utterances = utterances.view(batch, utt_num * utt_sent_num)
utterance_index = utterances[:, :, None].expand(-1, -1, sentence_word_embed.size(-1))
utterance_embed = sentence_word_embed.gather(dim=1, index=utterance_index) # (batch, utt_num * utt_sent_num, h)
utterance_embed = utterance_embed.reshape(batch, utt_num, utt_sent_num, -1)
# utterance_embed = (utterance_embed * utterance_mask[:, :, :, None]).sum(dim=2) # (batch, utt_num=2, h)
# valid_utt_sent_num = utterance_mask.sum(dim=2, keepdim=True) # (batch, utt_num=2, 1)
# valid_utt_sent_num[valid_utt_sent_num == 0] = 1.
# utterance_embed = utterance_embed / valid_utt_sent_num
utterance_weight = (self.utt_embed_weight(utterance_embed).squeeze(-1) + (1 - utterance_mask) * -10000.0).softmax(dim=-1)
utterance_embed = torch.einsum("bsw,bswh->bsh", utterance_weight, utterance_embed)
# session_embed = utterance_embed.mean(dim=1, keepdim=True) # (batch, 1, h)
session_weight = self.dia_embed_weight(utterance_embed).squeeze(-1).softmax(dim=-1)
session_embed = torch.einsum("bs,bsh->bh", session_weight, utterance_embed).unsqueeze(1)
all_word_embedding = self.text_fc(all_word_embedding)
sentence_word_embed = self.text_fc(sentence_word_embed)
utterance_embed = self.text_fc(utterance_embed)
session_embed = self.text_fc(session_embed)
word_offset = 0
image_offset = 0
node_features = []
for batch_id in range(batch):
node_features.extend([
all_word_embedding[word_offset: word_offset + graph_word_num[batch_id]],
image_embeddings[image_offset: image_offset + graph_image_num[batch_id]],
sentence_word_embed[batch_id, :valid_sent_num[batch_id].item()],
utterance_embed[batch_id],
session_embed[batch_id]
])
word_offset = word_offset + graph_word_num[batch_id]
image_offset = image_offset + graph_image_num[batch_id]
# print(len(node_features))
node_features = torch.cat(node_features, dim=0)
num_nodes = node_features.size(0)
for layer_idx in range(GAT_LAYER):
node_features = self.gat[layer_idx](graph, node_features)
node_features = node_features.reshape(num_nodes, -1)
session_index = session_ids.unsqueeze(-1).expand(-1, node_features.size(-1))
session_hidden = node_features.gather(dim=0, index=session_index)
return session_hidden
|
py | 7df80df1f502c7a5b29d0331fcedc5fff9b7066e | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 16:14:52 2019
@author: Phoenix
"""
import os
from .Stage import Stage
from .Map import Map
import Global
class WaterStage(Stage):
def __init__(self):
Stage.__init__(self)
self.active = True
self.id = "Test 2d Level"
self.images = ["image/Wasser_1.png",
"image/Wasser_2.png",
"image/Wasser_3.png",
"image/Wasser_4.png",
"image/Level1Final.png"]
self.textures = []
self.unordered_draw = True
for i in self.images:
index, width, height = Global.master.screen.loadImage(i)
vbo, vbo_count = Global.master.screen.create_image_vbo(width,height)
self.textures.append((index, width, height, vbo, vbo_count))
self.map = Map.createFromLevel(os.path.join(Global.master.dir,"image/level.bmp"))
self.do_draw_vbo = False
def update(self):
Stage.update(self)
def draw(self, screen):
if self.unordered_draw:
screen.disableImageDepth()
#screen.drawImageSimple(self.textures[4][0],0,0,5,5,-3)
#screen.drawImageSimple(self.textures[11][0],0,0,self.textures[11][1]*6/1920,self.textures[11][2]*6/1920,-4+constants.mue)#-2)
screen.draw_vbo_pos((0,0,0),self.textures[4][0],self.textures[4][3],self.textures[4][4])
#screen.draw_vbo_pos((0,0,-4+constants.mue),self.textures[11][0],self.textures[11][3],self.textures[11][4])
#self.drawVbo(screen)
if self.do_draw_vbo:
self.map.drawVbo(screen)
self.drawUnits(screen)
if self.player is not None:
self.player.draw(screen)
if self.unordered_draw:
screen.enableImageDepth() |
py | 7df80e0351d5ecfcf304afbc834e80d522668d11 | def FourierPower(db1Trace, inSampleRate, varargin):
'''
STILL NOT IMPLEMENTED IN PYTHON!!!
[DBWINLENSEC, INNSTEP, BLDOPLOT]) computes the spectral power of DB1TRACE
over time. DB1TRACE is divided into segments whose length is specified by
DBWINLENSEC and INSAMPLERATE. INNSTEP controls the overlap between
segments (e.g. if INNSTEP is 4, then segments are spaced with 1/4 of the
length of the window). Defautl for DBWINLENSEC is 0.5 sec. Default for
INNSTEP is 4. The power spectrum is returned in DB2POWER. In addition, the
indices of the center of each segment on DB1TRACE are returned in
IN1CNTRIDX.
The code is adapted from the help page 'power spectral density estimate
using fft' and is the same as DoFourierPowerSpectrum
2016-11-15 QP: Transcripted from DoFourierPowerSpectrum
'''
switch length(varargin)
case 0
dbWinLenSec = 0.5;
inNStep = 5;
blDoPlot = false;
case 1
dbWinLenSec = varargin{1};
inNStep = 5;
blDoPlot = false;
case 2
dbWinLenSec = varargin{1};
inNStep = varargin{2};
blDoPlot = false;
otherwise
dbWinLenSec = varargin{1};
inNStep = varargin{2};
blDoPlot = varargin{3};
end
%Checks that step divides the traces without remainder
inWinLen = round(dbWinLenSec * inSampleRate);
inStepLen = inWinLen / inNStep;
if mod(inStepLen, 1) ~= 0
error('The step taken does not divide the length into segments of equal length');
end
# Checks for the maximal number of chunks that can fit the trace and removes
# the points that might not be used
inNChunk = floor(length(db1Trace) / inStepLen);
db1Trace = db1Trace(1:inNChunk * inStepLen); %Removes unused points
inNChunk = inNChunk - (inNStep - 1);
# Substract the mean of the the trace in order to remove the zeros frequency
# componant
db1Trace = db1Trace - mean(db1Trace);
# Create a matrix, whose columns are samples of the signal whose length is
# windowLengthSec. Successive samples (columns) are overlapping and spaced
# by windowLength/step seconds. Each sample is then multiplied by a Hamming
# window.
in1ChunkBegIdx = ((1:inNChunk) * inStepLen) - (inStepLen - 1);
in1ChunkIdx = ((1:inWinLen) - 1)';
in2ChunkIdx = repmat(in1ChunkBegIdx, inWinLen, 1) + repmat(in1ChunkIdx, 1, inNChunk);
# Convolutes with a hamming taper
db1Taper = hamming(size(in2ChunkIdx, 1), 'periodic');
if size(in2ChunkIdx, 2) ~= 1
db2TaperTrace = db1Trace(in2ChunkIdx).*repmat(db1Taper , 1, size(in2ChunkIdx, 2));
else
db2TaperTrace = db1Trace(in2ChunkIdx)'.*repmat(db1Taper, 1, size(in2ChunkIdx, 2));
end
# Computes the FFT and calculate the spectrum as the modulus of the FFT
myFFT=fft(db2TaperTrace);
myFFT=myFFT(1:size(db2TaperTrace,1)/2+1,:);
# db2Power = (1/(inSampleRate*size(db2TaperTrace,1))).*abs(myFFT).^2;
db2Power = (1/(inSampleRate*sum(db1Taper))).*abs(myFFT).^2;
db2Power(2:end-1,:) = 2*db2Power(2:end-1,:);
# Gets the central index of each window
in1CntrIdx = ceil(median(in2ChunkIdx));
# Plots the result if needed
if blDoPlot
dbTraceLenSec = length(db1Trace)/inSampleRate;
inTopFreq = 120;
figure
imagesc(dbWinLenSec/2: dbWinLenSec/inNStep : dbTraceLenSec-(dbWinLenSec/2), 0:1/dbWinLenSec:inTopFreq, ...
10*log10(db2Power(1:inTopFreq*dbWinLenSec+1,:)));
xlabel('Time (s)')
ylabel('Frequency (Hz)')
set(gca, 'YDir', 'normal')
colorbar
end
return db2Power, in1CntrIdx |
py | 7df80ec01245a7fe820c79d5879458c4cd0a93cb | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os as _os
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.tools import component_api_helper as _component_api_helper
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=('tensorflow_estimator.python.estimator.api.estimator'))
# API IMPORTS PLACEHOLDER
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
app.flags = flags # pylint: disable=undefined-variable
|
py | 7df80fad1fe0523af3db0524bcd75243bbfab325 | """Methods to deal with IUPAC encode bases."""
IUPAC_BASES = (
"A", "C",
"T", "G",
"U",
"R", "Y",
"S", "W",
"K", "M",
"B", "D",
"H", "V",
"N")
def complement(base: str) -> (str):
"""
Complement the base with respect to the IUPAC code.
:param base: the base to complement
:return: the complemented base
"""
# assert base in IUPAC_BASES
if base == 'A':
return 'T'
elif base == 'T':
return 'A'
elif base == 'G':
return 'C'
elif base == 'C':
return 'G'
elif base == 'U':
return 'A'
elif base == 'N':
return 'N'
elif base == 'R':
return 'Y'
elif base == 'Y':
return 'R'
elif base == 'W':
return 'S'
elif base == 'S':
return 'W'
elif base == 'K':
return 'M'
elif base == 'M':
return 'K'
elif base == 'B': # CGT
return 'V' # GCT
elif base == 'D': # AGT
return 'H' # TCA
elif base == 'H': # ACT
return 'D' # TGA
elif base == 'V': # ACG
return 'B' # TGC
else:
raise ValueError(
"base {0} is not present in the IUPAC alphabet".format(
base))
def reverse_complement(seq: str) -> (str):
"""
Reverse and complement a sequence.
:param seq: the sequence to reverse complement
:return: the reverse complemented sequence
"""
retval = [complement(b) for b in seq]
retval.reverse()
return ''.join(retval)
def to_regexp(seq: str) -> (str):
"""
Convert IUPAC to regular expresions.
Decodes a sequence which is IUPAC and convert
this to a regular expression friendly sequence.
:param seq: the sequence to encode
:return: the regular expression
"""
# convert IUPAC bases
seq = seq.replace('R', '[A,G]')
seq = seq.replace('Y', '[C,T]')
seq = seq.replace('S', '[G,C]')
seq = seq.replace('W', '[A,T]')
seq = seq.replace('K', '[G,T]')
seq = seq.replace('M', '[A,C]')
seq = seq.replace('B', '[C,G,T]')
seq = seq.replace('D', '[A,G,T]')
seq = seq.replace('H', '[A,C,T]')
seq = seq.replace('V', '[A,C,G]')
seq = seq.replace('N', '.')
# return the sequence
return seq
def expand_iupac(base: str, fill_n: bool=False) -> (str):
"""
Expand the IUPAC base
:param base: the IUPAC base to expand
:param fill_n: should we fill N or leave it empty
:return: a string with all the primary
bases that are encoded by the
IUPAC bases
"""
if base == "N":
if not fill_n:
return ""
else:
return "ACGT"
elif base == "V":
return "ACG"
elif base == "H":
return "ACT"
elif base == "D":
return "AGT"
elif base == "B":
return "CGT"
elif base == "M":
return "AC"
elif base == "K":
return "GT"
elif base == "S":
return "GC"
elif base == "W":
return "AT"
elif base == "R":
return "AG"
elif base == "Y":
return "CT"
else:
return base
def to_list(seq: str, fill_n: bool= False) -> (list):
"""Convert a sequence to a list."""
retval = []
for base in seq:
retval.append(expand_iupac(base, fill_n))
return retval
|
py | 7df80fd928104c53b70dae35d414475dfb821c58 | """
WSGI config for automated_survey project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twilio_sample_project.settings.production")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
py | 7df8109c5f21035cc8abbbb9e6126b80966945ea | from flask import Flask, render_template #fá render template
from markupsafe import escape
#Listi fyrir frettir á fréttasíðu
frettir = [
["0","Latur og kærulaus","Wim Kieft, fyrrverandi landsliðsmaður Hollands í knattspyrnu, skaut föstum skotum að Virgil van Dijk, varnarmanni Liverpool, í viðtali við hollenska miðilinn De Telegraaf á dögunum. Kieft er 57 ára gamall í dag en hann lék með liðum á borð við Ajax, Torino, PSV og Bordeaux á sínum ferli og þá lék hann 42 landsleiki fyrir Holland frá 1981 til 1993. Van Dijk virkar eins og hann sé með hausinn einhversstaðar allt annarsstaðar en hann á að vera,“ sagði Kieft í samtali við hollenska miðilinn.\nHann hagar sér eins og útbrunnin stjarna, bæði með Liverpool og hollenska landsliðinu og mér finnst hann ekki leggja nærri því jafn mikið á sig og liðsfélagar hans gera í leikjum. Hann er latur og kærulaus í þokkabót og hleypur oft á tíðum frá boltamanninum í stað þess að gefa sig allan í tæklingarnar.\nÞað tók miðvörðinn langan tíma að komast í fremstu röð en hann hefur verið einn sá besti undanfarin tvö tímabil. Hann má ekki hætta núna og þarf að halda áfram að vera gagnrýninn á sjálfan sig. Hann þarf að stíga upp og það þarf að hrista hann duglega því hann hefur verið langt frá sínu besta í fyrstu tveimur leikjum tímabilsins með Liverpool,“ bætti Kieft við.","Höfundur"],
["1","Helgi Björns tekur við Borginni","Þau stórtíðindi berast úr veitingageiranum að sjálfur Helgi Björns sé að taka við rekstri á veitingarými Hótels Borgar en þar er meðal annars að finna hinn fornfræga Gyllta sal. Helgi greinir frá þessu í helgarblaði Fréttablaðsins en þar segist hann ekki hafa geta skorast undan þegar tækifærið bauðst. Hann hyggst, að eigin sögn, hefja Borgina aftur til fyrri dýrðar ásamt Guðfinni Karlssyni veitingamanni. Það verði dansað á ný í Gyllta salnum og væntanlega boðið upp á ítalskan mat með.","Höfundur"],
["2","Safnplata með lögum Ragga Bjarna komin út","Í dag kom út safnplata sem inniheldur 45 lög í flutningi Ragnars Bjarnasonar frá glæsilegum 65 ára ferli hans. Platan ber titilinn Þannig týnist tíminn: Vinsælustu lög Ragga Bjarna. 22. september er fæðingardagur Ragnars en hann hefði orðið 86 ára í dag. Ragnar lést þann 25. febrúar á þessu ári 85 ára að aldri. Platan verður fyrst um sinn aðgengileg á Spotify og öðrum streymisveitum en þegar nær dregur jólum kemur hún út á vínyl og geisladiski.","Höfundur"],
["3","Karl kallar eftir Marshall-aðstoð við umhverfið","Karl Bretaprins kallar eftir því að heimurinn setji sig í stríðsstellingar til þess að takast á við loftslagsvána. Í ávarpi sínu á loftslagsviku New York-borgar sagði Karl að sú yfirvofandi hætta sem loftslagsváin skapaði, sem og tap líffræðilegs fjölbreytileika, myndi áhrif kórónuveirufaraldursins að engu gera.","Höfundur"]
]
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/a-hluti")
def ahluti():
return render_template("kennitala.html")
@app.route("/b-hluti")
def bhluti():
return render_template("frettir.html", frettir=frettir)
@app.route("/frett/<int:id>")
def frett(id):
return render_template("frett.html",frett=frettir[id],nr=id)
@app.route("/ktala/<kt>") #seinni kt kemur fra kennitala template
def ktalan(kt): # setjum kt inn i þettana streng
summa = 0
for item in kt:
summa = summa + int(item) #þurfum að breita í int því allt er strengur enþá
return render_template("ktsum.html",kt=kt,summa=summa) # skilar bæði kt og summu
@app.errorhandler(404)
def pagenotfound(error):
return render_template("pagenotfound.html"), 404
@app.errorhandler(500)
def servererror(error):
return render_template("servererror.html"), 500
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
py | 7df8112ef7ae37e8c0fe74403968b99d05eae439 | from datetime import date
maior = 0
menor = 0
ano = date.today().year
for c in range(1, 8):
dnasc = int(input('Em que ano nasceu a {}ª pessoa? - '.format(c)))
if dnasc + 18 >= ano:
maior += 1
else:
menor += 1
print('\nAo todo tivemos {} maiores de idade e {} menores de idade'.format(maior, menor))
|
py | 7df81181b92260e10cce0a9c26cbd321f1904733 | # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpx
import urllib3
from mergify_engine import logs
LOG = logs.getLogger(__name__)
RETRY = urllib3.Retry(
total=None,
redirect=3,
connect=5,
read=5,
status=5,
backoff_factor=0.2,
status_forcelist=list(range(500, 599)) + [429],
method_whitelist=[
"HEAD",
"TRACE",
"GET",
"PUT",
"OPTIONS",
"DELETE",
"POST",
"PATCH",
],
raise_on_status=False,
)
DEFAULT_CLIENT_OPTIONS = {
"headers": {
"Accept": "application/vnd.github.machine-man-preview+json",
"User-Agent": "Mergify/Python",
},
"trust_env": False,
}
class HTTPClientSideError(httpx.HTTPError):
@property
def message(self):
# TODO(sileht): do something with errors and documentation_url when present
# https://developer.github.com/v3/#client-errors
return self.response.json()["message"]
@property
def status_code(self):
return self.response.status_code
class HTTPNotFound(HTTPClientSideError):
pass
httpx.HTTPClientSideError = HTTPClientSideError
httpx.HTTPNotFound = HTTPNotFound
STATUS_CODE_TO_EXC = {404: HTTPNotFound}
class Client(httpx.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# httpx doesn't support retries yet, but the sync client uses urllib3 like request
# https://github.com/encode/httpx/blob/master/httpx/_dispatch/urllib3.py#L105
real_urlopen = self.dispatch.pool.urlopen
def _mergify_patched_urlopen(*args, **kwargs):
kwargs["retries"] = RETRY
return real_urlopen(*args, **kwargs)
self.dispatch.pool.urlopen = _mergify_patched_urlopen
def request(self, method, url, *args, **kwargs):
LOG.debug("http request start", method=method, url=url)
try:
r = super().request(method, url, *args, **kwargs)
r.raise_for_status()
return r
except httpx.HTTPError as e:
if e.response and 400 <= e.response.status_code < 500:
exc_class = STATUS_CODE_TO_EXC.get(
e.response.status_code, HTTPClientSideError
)
message = e.args[0]
gh_message = e.response.json().get("message")
if gh_message:
message = f"{message}\nGitHub details: {gh_message}"
raise exc_class(
message, *e.args[1:], request=e.request, response=e.response,
)
raise
finally:
LOG.debug("http request end", method=method, url=url)
|
py | 7df8121f7f3480c7426c56ed10a2649420e712c2 | #!/usr/bin/python3
# -*-coding:utf-8-*-
# 输入相关
# Python提供了一个input(),可以让用户输入字符串,并存放到一个变量里
name = input()
print(name)
name1 = input("请输入您的姓名:")
print(name1)
## 练习:求正方形的面积
## 练习:求长方形的面积 |
py | 7df8128b9d26dc81b30208fa240554c3c123e669 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from collections import OrderedDict
from importlib import import_module
import os.path
import sys
import unittest
from tests.common_functions import create_abstract_model, \
add_components_and_load_data
from tests.project.operations.common_functions import \
get_project_operational_timepoints
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints", "temporal.operations.horizons",
"temporal.investment.periods", "geography.load_zones",
"geography.spinning_reserves_balancing_areas", "project",
"project.capacity.capacity"]
NAME_OF_MODULE_BEING_TESTED = \
"project.operations.reserves.spinning_reserves"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module("." + NAME_OF_MODULE_BEING_TESTED,
package='gridpath')
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED +
" to test.")
class TestLFReservesUpProvision(unittest.TestCase):
"""
"""
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_data_loaded_correctly(self):
"""
Test that the data loaded are as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
instance = m.create_instance(data)
# Set: SPINNING_RESERVES_PROJECTS
expected_projects = sorted([
"Gas_CCGT", "Gas_CCGT_New", "Gas_CCGT_New_Binary", "Gas_CCGT_z2",
"Battery", "Battery_Binary", "Battery_Specified", "Hydro", "Hydro_NonCurtailable"
])
actual_projects = sorted([
prj for prj in instance.SPINNING_RESERVES_PROJECTS
])
self.assertListEqual(expected_projects, actual_projects)
# Param: spinning_reserves_zone
expected_reserves_zone = OrderedDict(sorted(
{"Gas_CCGT": "Zone1", "Gas_CCGT_New": "Zone1",
"Gas_CCGT_New_Binary": "Zone1",
"Gas_CCGT_z2": "Zone2", "Battery": "Zone1", "Battery_Binary": "Zone1",
"Battery_Specified": "Zone1", "Hydro": "Zone1",
"Hydro_NonCurtailable": "Zone1"}.items()
)
)
actual_reserves_zone = OrderedDict(sorted(
{prj: instance.spinning_reserves_zone[prj]
for prj in instance.SPINNING_RESERVES_PROJECTS}.items()
)
)
self.assertDictEqual(expected_reserves_zone, actual_reserves_zone)
# Set: SPINNING_RESERVES_PRJ_OPR_TMPS
expected_prj_op_tmps = sorted(
get_project_operational_timepoints(expected_projects)
)
actual_prj_op_tmps = sorted([
(prj, tmp) for (prj, tmp) in
instance.SPINNING_RESERVES_PRJ_OPR_TMPS
])
self.assertListEqual(expected_prj_op_tmps, actual_prj_op_tmps)
# Param: spinning_reserves_derate (defaults to 1 if not specified)
expected_derate = OrderedDict(sorted(
{"Battery": 1, "Battery_Binary": 1, "Battery_Specified": 0.5,
"Gas_CCGT": 1,
"Gas_CCGT_New": 1, "Gas_CCGT_New_Binary": 1,
"Gas_CCGT_z2": 1, "Hydro": 1, "Hydro_NonCurtailable": 1}.items()
)
)
actual_derate = OrderedDict(sorted(
{prj: instance.spinning_reserves_derate[prj]
for prj in instance.SPINNING_RESERVES_PROJECTS}.items()
)
)
self.assertDictEqual(expected_derate, actual_derate)
# Param: spinning_reserves_reserve_to_energy_adjustment
# (defaults to 0 if not specified)
expected_adjustment = OrderedDict(sorted(
{"Zone1": 0.1, "Zone2": 0}.items()
)
)
actual_adjustment = OrderedDict(sorted(
{z: instance.
spinning_reserves_reserve_to_energy_adjustment[z]
for z in instance.SPINNING_RESERVES_ZONES}.items()
)
)
self.assertDictEqual(expected_adjustment, actual_adjustment)
if __name__ == "__main__":
unittest.main()
|
py | 7df8132f20b85002920fb13ea28a0a3f8fc1b955 | #! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
import os
import re
import commonl.testing
import tcfl.tc
import tcfl.tl
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
os.path.join(srcdir, "conf_00_lib.py"),
os.path.join(srcdir, "conf_zephyr_tests.py"),
os.path.join(srcdir, "conf_zephyr_tests3.py"),
os.path.join(srcdir, "conf_07_zephyr.py"),
])
tcfl.tc.target_c.extension_register(tcfl.app_zephyr.zephyr)
if not tcfl.app.driver_valid(tcfl.app_zephyr.app_zephyr.__name__):
tcfl.app.driver_add(tcfl.app_zephyr.app_zephyr)
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"))
class _01_simple(tcfl.tc.tc_c):
"""
Expect to find Hello World in the most simple way
"""
# app_zephyr provides start() methods start the targets
@staticmethod
def eval(target):
target.expect("Hello World! %s" % target.bsp,
# In the multiple BSP simulation we have, each
# BSP prints to a different console
console = target.bsp)
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"))
class _02_expecter_loop(tcfl.tc.tc_c):
"""
Expect to find Hello World but by setting hooks and running the
expecter loop, which shall return a pass exception when all the
expectations are met.
"""
@staticmethod
def setup(target):
target.on_console_rx("Hello World! %s" % target.bsp, 20,
console = target.kws.get('console', None))
# app_zephyr provides start() methods start the targets
def eval(self):
self.expecter.run()
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"))
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"))
class _03_multi_bsp_expecter_loop(tcfl.tc.tc_c):
"""
With a special multi-BSP target we defined in the configuration,
try to run multiple Hello Worlds on each BSP and ensure we find
them when running the expectation loop.
"""
@staticmethod
def setup(target, target1):
for t in target, target1:
t.on_console_rx("Hello World! %s" % t.bsp_model, 20,
console = t.kws.get('console', None))
# app_zephyr provides start() methods start the targets
def eval(self):
self.expecter.run()
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = {
'arm': os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world")
})
class _03_multi_bsp_only_one_bsp(tcfl.tc.tc_c):
"""
With a multi-BSP target, we run only in one and the rest get
stubbed automatically. We see nothing in the output of the others.
"""
@staticmethod
def setup(target):
for bsp in target.bsps_all:
if bsp == 'arm':
continue
# Any output on the other BSPs means the stub is not being silent
target.on_console_rx(re.compile(".+"),
result = 'fail', timeout = None,
# Each BSP has a console named after it
console = bsp)
@staticmethod
def eval(target):
target.expect("Hello World! arm")
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = dict(
x86 = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"),
arm = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"),
nios2 = os.path.join(os.environ['ZEPHYR_BASE'],
"samples", "hello_world"),
)
)
class _04_multi_bsp_three_hello_world(tcfl.tc.tc_c):
"""
With a special three-BSP target we defined in the configuration,
try to run multiple Hello Worlds on each BSP and ensure we find
them when running the expectation loop.
"""
@staticmethod
def setup(target):
for bsp in target.bsps_all:
target.bsp_set(bsp)
target.on_console_rx("Hello World! %s" % target.bsp, 20,
console = bsp)
def eval(self):
self.expecter.run()
|
py | 7df813a8d5b6593beca8ca4c7d4ff5bcba421515 | from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from app import bcrypt
import os
def main():
# Connect to the DB
connection = MongoClient(os.getenv("DB_URL"), 27017)
db = connection["drfms"]
collection = db['users']
# Ask for data to store
user = raw_input("Enter your username: ")
password = raw_input("Enter your password: ")
pass_hash = bcrypt.generate_password_hash(password)
# Insert the user in the DB
try:
collection.insert({"_id": user, "password": pass_hash})
print "User created."
except DuplicateKeyError:
print "User already present in DB."
if __name__ == '__main__':
main()
|
py | 7df813fa685bc7de2fcc80f25e96e9d3416d125b | import tkinter as tk
from .templates import templates
from .game.questions import questions
class GameScreen:
def __init__(self, mainframe, root):
self.mainframe = mainframe
self.start_card_loop()
self.main_menu = root
def start_card_loop(self):
templates.clear_frame(self.mainframe)
# TODO: Função de gerar um card loop sem tempo e sem loop, fodase
self.generate_question_card(
questions["1"]["enun"],
questions["1"]["alts"],
# questions["1"]["corr"]
)
# TODO: make a beautiful card generator
def generate_question_card(self, enun, alts):
self.quest_enun = templates.template_label(
self.mainframe,
enun,
(0, 0)
)
self.alt_a = templates.template_card_button(
self.mainframe,
alts["a"],
(0.0153, 0.2775)
)
self.alt_b = templates.template_card_button(
self.mainframe,
alts["b"],
(0.0153, 0.6565)
)
self.alt_c = templates.template_card_button(
self.mainframe,
alts["c"],
(0.71, 0.2775)
)
self.alt_a = templates.template_card_button(
self.mainframe,
alts["d"],
(0.71, 0.6565)
)
def exit_game_screen(self, event):
self.main_menu.config_main_menu()
|
py | 7df814f1d1c851a1a6d70b0d54fdb2438134a494 | # /*
#
# MIT License
#
# Copyright (c) 2021 AI4Finance
#
# Author: Berend Gort
#
# Year: 2021
#
# GitHub_link_author: https://github.com/Burntt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import threading
import time
from datetime import datetime, timedelta
import alpaca_trade_api as tradeapi
import numpy as np
import pandas as pd
import torch
from finrl_meta.data_processors.ccxt import CCXTProcessor
class AlpacaPaperTradingMultiCrypto():
def __init__(self, ticker_list, time_interval, drl_lib, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
API_BASE_URL, tech_indicator_list,
max_stock=1e2, latency=None):
# load agent
self.drl_lib = drl_lib
if agent != 'ppo':
raise ValueError('Agent input is NOT supported yet.')
if drl_lib == 'elegantrl':
from elegantrl.agent import AgentPPO
# load agent
try:
agent = AgentPPO()
agent.init(net_dim, state_dim, action_dim)
agent.save_or_load_agent(cwd=cwd, if_save=False)
self.act = agent.act
self.device = agent.device
except:
raise ValueError('Fail to load agent!')
# connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, API_BASE_URL, 'v2')
print('Connected to Alpaca API!')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
# CCXT uses different time_interval than Alpaca (confusing I know)
self.CCTX_time_interval = time_interval
# read trading time interval
if self.CCTX_time_interval == '1m':
self.time_interval = 60
elif self.CCTX_time_interval == '1h':
self.time_interval = 60 ** 2
elif self.CCTX_time_interval == '1d':
self.time_interval = 60 ** 2 * 24
else:
raise ValueError('Time interval input is NOT supported yet.')
# read trading settings
self.tech_indicator_list = tech_indicator_list
self.max_stock = max_stock
self.previous_candles = 250
self.lookback = 1
self.action_dim = action_dim
self.action_decimals = 2
# initialize account
self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None # cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index=ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
stockUniverse = []
for stock in ticker_list:
stock = stock.replace("USDT", "USD")
stockUniverse.append(stock)
self.ticker_list = ticker_list
self.stockUniverse = stockUniverse
self.equities = []
def test_latency(self, test_times=10):
total_time = 0
for _ in range(test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time / test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
while True:
print('\n' + '#################### NEW CANDLE ####################')
print('#################### NEW CANDLE ####################' + '\n')
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time, last_equity])
time.sleep(self.time_interval)
def trade(self):
# Get state
state = self.get_state()
# Get action
if self.drl_lib != 'elegantrl':
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(float)
print('\n' + 'ACTION: ', action, '\n')
# Normalize action
action_norm_vector = []
for price in self.price:
print('PRICE: ', price)
x = math.floor(math.log(price, 10)) - 2
print('MAG: ', x)
action_norm_vector.append(1 / ((10) ** x))
print('NORM VEC: ', action_norm_vector)
for i in range(self.action_dim):
norm_vector_i = action_norm_vector[i]
action[i] = action[i] * norm_vector_i
print('\n' + 'NORMALIZED ACTION: ', action, '\n')
# Trade
self.stocks_cd += 1
min_action = 10 ** -(self.action_decimals) # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(float(sell_num_shares))
qty = round(qty, self.action_decimals)
print('SELL, qty:', qty)
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
tmp_cash = max(self.cash, 0)
print('current cash:', tmp_cash)
# Adjusted part to accept decimal places up to two
buy_num_shares = min(tmp_cash / self.price[index], abs(float(action[index])))
qty = abs(float(buy_num_shares))
qty = round(qty, self.action_decimals)
print('BUY, qty:', qty)
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
print('Trade finished')
def get_state(self):
datetime_today = datetime.today()
if self.CCTX_time_interval == '1m':
start_date = (datetime_today - timedelta(minutes=self.previous_candles)).strftime("%Y%m%d %H:%M:%S")
end_date = datetime.today().strftime("%Y%m%d %H:%M:%S")
elif self.CCTX_time_interval == '1h':
start_date = (datetime_today - timedelta(hours=self.previous_candles)).strftime("%Y%m%d %H:%M:%S")
end_date = datetime.today().strftime("%Y%m%d %H:%M:%S")
elif self.CCTX_time_interval == '1d':
start_date = (datetime_today - timedelta(days=self.previous_candles)).strftime("%Y%m%d %H:%M:%S")
end_date = datetime.today().strftime("%Y%m%d %H:%M:%S")
print('fetching latest ' + str(self.previous_candles) + ' candles..')
CCXT_instance = CCXTProcessor()
CCXT_instance.download_data(self.ticker_list, start_date, end_date, self.CCTX_time_interval)
CCXT_instance.add_technical_indicators(self.ticker_list, self.tech_indicator_list)
price_array, tech_array, _ = CCXT_instance.df_to_ary(self.ticker_list, self.tech_indicator_list)
self.price_array = price_array
self.tech_array = tech_array
print('downloaded candles..')
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = (abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype=float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
# latest price and tech arrays
self.price = price_array[-1]
# Stack cash and stocks
state = np.hstack((self.cash * 2 ** -18, self.stocks * 2 ** -3))
for i in range(self.lookback):
tech_i = self.tech_array[-1 - i]
normalized_tech_i = tech_i * 2 ** -15
state = np.hstack((state, normalized_tech_i)).astype(np.float32)
print('\n' + 'STATE:')
print(state)
return state
def submitOrder(self, qty, stock, side, resp):
if (qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | "
+ str(qty)
+ " "
+ stock
+ " " + side + " | completed.")
resp.append(True)
except Exception as e:
print('ALPACA API ERROR: ', e)
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
|
py | 7df8153772ed3ef72f01930a8c1a02791e938b89 | #!/usr/bin/env python3
# === IMPORTS ===
import logging
import os
import redpipe
import unittest
from inovonics.cloud.datastore import InoRedis
# === GLOBALS ===
logging.basicConfig(level=logging.DEBUG)
# === FUNCTIONS ===
# === CLASSES ===
class TestCasesInoRedis(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(type(self).__name__)
super().__init__(*args, **kwargs)
def setUp(self):
self.redis_host = os.getenv('REDIS_HOST', 'localhost')
self.redis_port = os.getenv('REDIS_PORT', 6379)
self.redis_db = os.getenv('REDIS_DB', 0)
def test_connect_to_db(self):
# Connect to the database
dstore = InoRedis(host=self.redis_host, port=self.redis_port, db=self.redis_db)
# Flush the database
dstore.redis.flushdb()
# Write a value to a key
dstore.redis.set('TESTKEY', 'TESTVALUE')
# Read the value back from the key
test_value = dstore.redis.get('TESTKEY').decode('utf-8')
# Compare the values to make sure everything is happy
self.logger.debug("test_value: %s, type: %s", test_value, type(test_value))
self.assertEqual(test_value, 'TESTVALUE')
# Flush the database
dstore.redis.flushdb()
# Force deletion of the dstore object (to force garbage collection). This is due to an issue on the Travis-CI
# environment of instantiating the next test before the current test is garbage collected.
del dstore
def test_connect_redpipe(self):
# Connect to the database
dstore = InoRedis(host=self.redis_host, port=self.redis_port, db=self.redis_db)
# Flush the database
dstore.redis.flushdb()
# Add an item with a redpipe pipeline
with redpipe.autoexec() as pipe:
pipe.set('TESTKEY2', 'TESTVALUE2')
# Get the item with without a pipeline
test_value2 = None
with redpipe.autoexec() as pipe:
test_value2 = pipe.get('TESTKEY2')
test_value2 = test_value2.decode('utf-8')
# Compare the values to make sure everything is happy
self.logger.debug("test_value2: %s, type: %s", test_value2, type(test_value2))
self.assertEqual(test_value2, 'TESTVALUE2')
# Flush the database
dstore.redis.flushdb()
# Force deletion of the dstore object (to force garbage collection). This is due to an issue on the Travis-CI
# environment of instantiating the next test before the current test is garbage collected.
del dstore
def tearDown(self):
pass
# === MAIN ===
if __name__ == '__main__':
# THIS SCRIPT SHOULD NEVER BE CALLED DIRECTLY
# Call from the base directory of the package with the following command:
# python3 -m unittest tests.<name_of_module>.<name_of_class>
pass
|
py | 7df81558867659e7bff96f08eb2567151af00b3f | # https://github.com/hplgit/web4sciapps/blob/master/doc/src/web4sa/src-web4sa/apps/flask_apps/vib1/controller.py
from model import InputForm
from flask import Flask, render_template, request
from compute import compute
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
form = InputForm(request.form)
if request.method == 'POST' and form.validate():
result = compute(form.input_expression.data,
form.inference_rule.data,
form.feed.data,
form.output_expression.data)
else:
result = None
return render_template('view.html', form=form, result=result)
if __name__ == '__main__':
app.run(debug=True)
|
py | 7df8169fd2d73a04f3231c8cafb12541d6acc004 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vm_names
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
enabled_state = self._enabled_states_map[si.EnabledState]
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_('Creating VM %s'), vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug(_('Setting memory for vm %s'), vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
(job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
return self._lookup_vm_check(vm_name)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks_count(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return len(volumes)
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
#Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
#Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
#Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
#Invalid state for current operation (32775) typically means that
#the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug(_("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._IDE_DISK_RES_SUB_TYPE,
self._IDE_DVD_RES_SUB_TYPE]]
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _clone_wmi_obj(self, wmi_class, wmi_obj):
"""Clone a WMI object."""
cl = getattr(self._conn, wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
if prop == "VirtualSystemIdentifiers":
strguid = []
strguid.append(str(uuid.uuid4()))
newinst.Properties_.Item(prop).Value = strguid
else:
prop_value = wmi_obj.Properties_.Item(prop).Value
newinst.Properties_.Item(prop).Value = prop_value
return newinst
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
|
py | 7df81739c4f14759efd6d7c85a7e8b4020f6de7d | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import Any
import libcst as cst
from libcst import parse_statement
from libcst._nodes.tests.base import CSTNodeTest
from libcst.helpers import ensure_type
from libcst.metadata import CodeRange
from libcst.testing.utils import data_provider
class GlobalConstructionTest(CSTNodeTest):
@data_provider(
(
# Single global statement
{"node": cst.Global((cst.NameItem(cst.Name("a")),)), "code": "global a"},
# Multiple entries in global statement
{
"node": cst.Global(
(cst.NameItem(cst.Name("a")), cst.NameItem(cst.Name("b")))
),
"code": "global a, b",
},
# Whitespace rendering test
{
"node": cst.Global(
(
cst.NameItem(
cst.Name("a"),
comma=cst.Comma(
whitespace_before=cst.SimpleWhitespace(" "),
whitespace_after=cst.SimpleWhitespace(" "),
),
),
cst.NameItem(cst.Name("b")),
),
whitespace_after_global=cst.SimpleWhitespace(" "),
),
"code": "global a , b",
"expected_position": CodeRange((1, 0), (1, 15)),
},
)
)
def test_valid(self, **kwargs: Any) -> None:
self.validate_node(**kwargs)
@data_provider(
(
# Validate construction
{
"get_node": lambda: cst.Global(()),
"expected_re": "A Global statement must have at least one NameItem",
},
# Validate whitespace handling
{
"get_node": lambda: cst.Global(
(cst.NameItem(cst.Name("a")),),
whitespace_after_global=cst.SimpleWhitespace(""),
),
"expected_re": "Must have at least one space after 'global' keyword",
},
# Validate comma handling
{
"get_node": lambda: cst.Global(
(cst.NameItem(cst.Name("a"), comma=cst.Comma()),)
),
"expected_re": "The last NameItem in a Global cannot have a trailing comma",
},
# Validate paren handling
{
"get_node": lambda: cst.Global(
(
cst.NameItem(
cst.Name(
"a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)
)
),
)
),
"expected_re": "Cannot have parens around names in NameItem",
},
)
)
def test_invalid(self, **kwargs: Any) -> None:
self.assert_invalid(**kwargs)
class GlobalParsingTest(CSTNodeTest):
@data_provider(
(
# Single global statement
{"node": cst.Global((cst.NameItem(cst.Name("a")),)), "code": "global a"},
# Multiple entries in global statement
{
"node": cst.Global(
(
cst.NameItem(
cst.Name("a"),
comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")),
),
cst.NameItem(cst.Name("b")),
)
),
"code": "global a, b",
},
# Whitespace rendering test
{
"node": cst.Global(
(
cst.NameItem(
cst.Name("a"),
comma=cst.Comma(
whitespace_before=cst.SimpleWhitespace(" "),
whitespace_after=cst.SimpleWhitespace(" "),
),
),
cst.NameItem(cst.Name("b")),
),
whitespace_after_global=cst.SimpleWhitespace(" "),
),
"code": "global a , b",
},
)
)
def test_valid(self, **kwargs: Any) -> None:
self.validate_node(
parser=lambda code: ensure_type(
parse_statement(code), cst.SimpleStatementLine
).body[0],
**kwargs,
)
|
py | 7df817ce2491199a5dc90dcfdd47f252ee8d46e0 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath(".."))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "FinalProject"
copyright = """2022, Daniel Roy Greenfeld"""
author = "Daniel Roy Greenfeld"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
|
py | 7df8187163aa4888aea8d2019c043b3881631dae | #!/usr/bin/python
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Piotr Minkina <projects[i.am.spammer]@piotrminkina.pl>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from os import path
from types import FunctionType
class FilterModule(object):
""" Common pathname manipulations """
def filters(self):
get = path.__dict__.get
return {'path_' + name: get(name)
for name in dir(path)
if isinstance(get(name), FunctionType)}
|
py | 7df818ab48d7e589a2f667f5369251d813965b49 | """
Calculating the loss with Categorical Cross Entropy
Associated with YT NNFS tutorial: https://www.youtube.com/watch?v=dEXPMQXoiLc
"""
import math
softmax_output = [0.7, 0.1, 0.2]
target_output = [1, 0, 0]
loss = -(math.log(softmax_output[0]) * target_output[0] +
math.log(softmax_output[1]) * target_output[1] +
math.log(softmax_output[2]) * target_output[2])
print(loss)
print(-math.log(0.7))
print(-math.log(0.5))
|
py | 7df8192736e12829b8c28c9f260858d36e667227 | import pathlib
import subprocess
import click
class ContextObj():
def __init__(self, pkgname, release, architecture):
self._base = pathlib.Path.cwd()
self.pkgname = pkgname or self._base.name
self.release = release
self.architecture = architecture
def setup(self):
if self.release is None:
err = ('Undefined release.\n\nSet the release either with the '
'"--release" flag or by setting the environment variable '
'WOCK.\n\nExample:\nexport WOCK=el6\n')
raise click.ClickException(err)
mockdir = pathlib.Path('/etc/mock')
pattern = '{}?{}.cfg'.format(self.release, self.architecture)
configs = list(mockdir.glob(pattern))
if len(configs) == 0:
raise click.ClickException('no matching mock configs found')
if len(configs) > 1:
err = 'multiple matching mock configs found'
for config in configs:
err += '\n'
err += config.as_posix()
raise click.ClickException(err)
self._mockcfg = configs[0]
self.mockcfg = self._mockcfg.as_posix()
self.root = self._mockcfg.stem
def build_setup(self):
self._sources = self._base / 'SOURCES'
if not self._sources.is_dir():
self._sources.mkdir()
self.sources = self._sources.as_posix()
self._results = self._base / 'MOCK' / self.root
if not self._results.is_dir():
self._results.mkdir(parents=True)
self.results = self._results.as_posix()
self._spec = self._base / 'SPECS' / (self.pkgname + '.spec')
if not self._spec.exists():
err = 'spec file {} does not exist'.format(self.spec)
raise click.ClickException(err)
self.spec = self._spec.as_posix()
def get_sources(self):
command = ['spectool',
'--directory', self.sources,
'--get-files', self.spec]
click.secho(' '.join(command), fg='cyan')
self._run(command)
@property
def srpm(self):
command = ['rpm',
'--define', 'dist .{}'.format(self.release),
'--query',
'--queryformat', '%{name}-%{version}-%{release}\n',
'--specfile', self.spec]
output = subprocess.check_output(command,
universal_newlines=True,
stderr=subprocess.DEVNULL)
srpm_name = output.split('\n')[0] + '.src.rpm'
self._srpm = self._results / srpm_name
if self._srpm.exists():
return self._srpm.as_posix()
else:
err = 'srpm {} does not exist'.format(self._srpm)
raise click.ClickException(err)
def _run(self, command):
with subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True) as process:
for line in process.stdout:
print(line, end='')
def init(self):
self.setup()
command = ['mock', '--root', self.root, '--init']
click.secho(' '.join(command), fg='cyan')
self._run(command)
def clean(self):
self.setup()
command = ['mock', '--root', self.root, '--clean']
click.secho(' '.join(command), fg='cyan')
self._run(command)
def install(self, packages):
self.setup()
command = ['mock', '--root', self.root, '--install']
command.extend(packages)
click.secho(' '.join(command), fg='cyan')
self._run(command)
def shell(self, task):
self.setup()
command = ['mock', '--root', self.root, '--shell', task]
click.secho(' '.join(command), fg='cyan')
self._run(command)
def build(self, just_srpm):
self.setup()
self.build_setup()
self.get_sources()
command = ['mock',
'--root', self.root,
'--define', 'dist .{}'.format(self.release),
'--buildsrpm',
'--spec', self.spec,
'--sources', self.sources,
'--resultdir', self.results,
'--no-clean',
'--no-cleanup-after']
click.secho(' '.join(command), fg='cyan')
self._run(command)
if not just_srpm:
command = ['mock',
'--root', self.root,
'--define', 'dist .{}'.format(self.release),
'--rebuild', self.srpm,
'--spec', self.spec,
'--sources', self.sources,
'--resultdir', self.results,
'--no-clean',
'--no-cleanup-after']
click.secho(' '.join(command), fg='cyan')
self._run(command)
|
py | 7df81a2516c57cf3626d641bbd190830fda97127 | # -*- coding: utf-8 -*-
import json
import time
from threading import RLock
from uuid import uuid4
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from ws4py.client.threadedclient import WebSocketClient
from pywebostv.discovery import discover
SIGNATURE = ("eyJhbGdvcml0aG0iOiJSU0EtU0hBMjU2Iiwia2V5SWQiOiJ0ZXN0LXNpZ25pbm" +
"ctY2VydCIsInNpZ25hdHVyZVZlcnNpb24iOjF9.hrVRgjCwXVvE2OOSpDZ58hR" +
"+59aFNwYDyjQgKk3auukd7pcegmE2CzPCa0bJ0ZsRAcKkCTJrWo5iDzNhMBWRy" +
"aMOv5zWSrthlf7G128qvIlpMT0YNY+n/FaOHE73uLrS/g7swl3/qH/BGFG2Hu4" +
"RlL48eb3lLKqTt2xKHdCs6Cd4RMfJPYnzgvI4BNrFUKsjkcu+WD4OO2A27Pq1n" +
"50cMchmcaXadJhGrOqH5YmHdOCj5NSHzJYrsW0HPlpuAx/ECMeIZYDh6RMqaFM" +
"2DXzdKX9NmmyqzJ3o/0lkk/N97gfVRLW5hA29yeAwaCViZNCP8iC9aO0q9fQoj" +
"oa7NQnAtw==")
REGISTRATION_PAYLOAD = {
"forcePairing": False,
"manifest": {
"appVersion": "1.1",
"manifestVersion": 1,
"permissions": [
"LAUNCH",
"LAUNCH_WEBAPP",
"APP_TO_APP",
"CLOSE",
"TEST_OPEN",
"TEST_PROTECTED",
"CONTROL_AUDIO",
"CONTROL_DISPLAY",
"CONTROL_INPUT_JOYSTICK",
"CONTROL_INPUT_MEDIA_RECORDING",
"CONTROL_INPUT_MEDIA_PLAYBACK",
"CONTROL_INPUT_TV",
"CONTROL_POWER",
"READ_APP_STATUS",
"READ_CURRENT_CHANNEL",
"READ_INPUT_DEVICE_LIST",
"READ_NETWORK_STATE",
"READ_RUNNING_APPS",
"READ_TV_CHANNEL_LIST",
"WRITE_NOTIFICATION_TOAST",
"READ_POWER_STATE",
"READ_COUNTRY_INFO"
],
"signatures": [
{
"signature": SIGNATURE,
"signatureVersion": 1
}
],
"signed": {
"appId": "com.lge.test",
"created": "20140509",
"localizedAppNames": {
"": "LG Remote App",
"ko-KR": u"리모컨 앱",
"zxx-XX": u"ЛГ Rэмotэ AПП"
},
"localizedVendorNames": {
"": "LG Electronics"
},
"permissions": [
"TEST_SECURE",
"CONTROL_INPUT_TEXT",
"CONTROL_MOUSE_AND_KEYBOARD",
"READ_INSTALLED_APPS",
"READ_LGE_SDX",
"READ_NOTIFICATIONS",
"SEARCH",
"WRITE_SETTINGS",
"WRITE_NOTIFICATION_ALERT",
"CONTROL_POWER",
"READ_CURRENT_CHANNEL",
"READ_RUNNING_APPS",
"READ_UPDATE_INFO",
"UPDATE_FROM_REMOTE_APP",
"READ_LGE_TV_INPUT_EVENTS",
"READ_TV_CURRENT_TIME"
],
"serial": "2f930e2d2cfe083771f68e4fe7bb07",
"vendorId": "com.lge"
}
},
"pairingType": "PROMPT"
}
class WebOSWebSocketClient(WebSocketClient):
@property
def handshake_headers(self):
headers = super(WebOSWebSocketClient, self).handshake_headers
return [(k, v) for k, v in headers if k.lower() != 'origin']
class WebOSClient(WebOSWebSocketClient):
PROMPTED = 1
REGISTERED = 2
def __init__(self, host):
ws_url = "ws://{}:3000/".format(host)
super(WebOSClient, self).__init__(ws_url)
self.waiters = {}
self.waiter_lock = RLock()
self.subscribers = {}
self.subscriber_lock = RLock()
self.send_lock = RLock()
@staticmethod
def discover():
res = discover("urn:schemas-upnp-org:device:MediaRenderer:1",
keyword="LG", hosts=True, retries=3)
return [WebOSClient(x) for x in res]
def register(self, store, timeout=60):
if "client_key" in store:
REGISTRATION_PAYLOAD["client-key"] = store["client_key"]
queue = self.send_message('register', None, REGISTRATION_PAYLOAD,
get_queue=True)
while True:
try:
item = queue.get(block=True, timeout=timeout)
except Empty:
raise Exception("Timeout.")
if item.get("payload", {}).get("pairingType") == "PROMPT":
yield WebOSClient.PROMPTED
elif item["type"] == "registered":
store["client_key"] = item["payload"]["client-key"]
yield WebOSClient.REGISTERED
break
else:
# TODO: Better exception.
raise Exception("Failed to register.")
def send_message(self, request_type, uri, payload, unique_id=None,
get_queue=False, callback=None, cur_time=time.time):
if unique_id is None:
unique_id = str(uuid4())
if get_queue:
wait_queue = Queue()
callback = wait_queue.put
if callback is not None:
with self.waiter_lock:
self.waiters[unique_id] = (callback, cur_time())
obj = {"type": request_type, "id": unique_id}
if uri is not None:
obj["uri"] = uri
if payload is not None:
obj["payload"] = payload
with self.send_lock:
self.send(json.dumps(obj))
if get_queue:
return wait_queue
def subscribe(self, uri, unique_id, callback, payload=None):
def func(obj):
callback(obj.get("payload"))
with self.subscriber_lock:
self.subscribers[unique_id] = uri
self.send_message('subscribe', uri, payload, unique_id=unique_id,
callback=func, cur_time=lambda: None)
return unique_id
def unsubscribe(self, unique_id):
with self.subscriber_lock:
uri = self.subscribers.pop(unique_id, None)
if not uri:
raise ValueError("Subscription not found: {}".format(unique_id))
with self.waiter_lock:
self.waiters.pop(unique_id)
self.send_message('unsubscribe', uri, payload=None)
def received_message(self, msg):
obj = json.loads(str(msg))
with self.waiter_lock:
self.clear_old_waiters()
if "id" in obj and obj["id"] in self.waiters:
callback, created_time = self.waiters[obj["id"]]
callback(obj)
def clear_old_waiters(self, delta=60):
to_clear = []
cur_time = time.time()
for key, value in self.waiters.items():
callback, created_time = value
if created_time and created_time + delta < cur_time:
to_clear.append(key)
for key in to_clear:
self.waiters.pop(key)
|
py | 7df81a735f73272c5be8deefb23be782e05839d8 | def hail_caesar(enc_str):
return enc_str.decode('rot13')
|
py | 7df81a908931bbd220e6e99e9d1df279d02db961 | # -*- coding: utf-8 -*-
"""
Part3.py
a little programme to stop using educated guess
Created on Fri Nov 20 15:28:43 2020
@author: Sebastian Tamon Hascilowicz
"""
import os
import csv
import matplotlib.pyplot as plt
def Holzer(imax, omega, Ks, Is, tq, ang):
"""
Holzer method rotating system calculator using the equation provided
in Marine Engineering Lecutre 11 slides.
Parameters
----------
imax : int
Number of is. Should be integer
omega : float
Natural Frequency
Ks : list of floats
List of torsional stiffnecess.
Is : list of floats
List of moments of inertia.
tq : float
Initial torque value.
ang : float
Initial angle displacement.
Returns
-------
angs : List
List of obtained angular displacements
tqs : List
List of obtained torques
"""
angs = []
tqs = []
for i in range(imax):
angout = ang + (tq / Ks[i])
tqout = -1 * omega ** 2 * Is[i] * ang + \
(1 - (omega ** 2) * Is[i] / Ks[i]) * tq
angs.append(angout)
tqs.append(tqout)
ang = angout
tq = tqout
return angs, tqs
def iterator(sta, end, d, dep, imax, Ks, Is, tq, ang):
"""
Recursive function that speeds up the calculation. It is not reccomentded
to go any further than 14 as it really takes some time to calculate and
our computer really does not like this...
"""
if d == dep:
out = [round(sta, dep)]
angs, tqs = Holzer(imax, sta, Ks, Is, tq, ang)
out.append(angs)
out.append(tqs)
return out
else:
inc = 10 ** (-1 * d)
i = sta
while i <= end:
angs1, tqs1 = Holzer(imax, i, Ks, Is, tq, ang)
angs2, tqs2 = Holzer(imax, i+inc, Ks, Is, tq, ang)
if tqs1[imax-1] * tqs2[imax-1] <= 0:
return iterator(i, i+inc, d+1, dep, imax, Ks, Is, tq, ang)
i = i + inc
def exporter(data):
"""
This function returns the data in clean way
"""
plt.clf()
topstring = ' f(rad/s)'
fields = ['Freq(rad/s)']
for i in range(len(data[0][1])):
fields.append('theta'+str(i+1))
topstring = topstring + ' | theta' + str(i+1)
for j in range(len(data[0][1])):
fields.append('torque'+str(j+1))
topstring = topstring + ' | torque' + str(j+1)
print(topstring)
# make line
line = '-' * len(topstring)
print(line + '-')
# makes data
outer = []
d = ''
for k in range(len(data)):
y = data[k][1]
d = ' '
if len(str(data[k][0])) < 7: # corrects the lengh
spacenum = 7 - len(str(data[k][0]))
spaces = ' ' * spacenum
num = spaces + str(data[k][0])
else:
num = str(data[k][0])[:7]
d = d + ' ' + num
inner = [data[k][0]]
x = []
y = data[k][1]
for ii in range(len(data[k][1])):
x.append(ii)
if len(str(data[k][1][ii])) < 7: # corrects the lengh
spacenum = 7 - len(str(data[k][1][ii]))
spaces = ' ' * spacenum
num = spaces + str(data[k][1][ii])
else:
num = str(data[k][1][ii])[:7]
d = d + ' | ' + num
inner.append(data[k][1][ii])
for iii in range(len(data[k][2])):
if len(str(data[k][2][iii])) < 7: # corrects the lengh
spacenum = 7 - len(str(data[k][2][iii]))
spaces = ' ' * spacenum
num = spaces + str(data[k][2][iii])
else:
num = str(data[k][2][iii])[:7]
d = d + ' | ' + num
inner.append(data[k][2][iii])
print(d)
outer.append(inner)
plt.plot(x, y, label="Omega="+str(data[k][0])[:5])
plt.style.use('bmh')
plt.grid(b=True, axis='both')
plt.legend(loc='best')
plt.ylabel('Deflection(rad)')
# if saving is required
option = input('Save Results? (y/n) : ')
if option == 'y':
directory = os.getcwd()
name = input('Enter filename : ')
form = input('plot type (png/pdf/svg) : ')
filename = name + '.csv'
plotname = name + '.' + form
with open(filename, 'w') as f:
write = csv.writer(f)
write.writerow(fields)
write.writerows(outer)
print(filename + ' saved at ' + directory)
plt.savefig(plotname, format=form)
print(plotname + ' saved at ' + directory)
def main():
"""
Main function
Returns
-------
result : List
Returns the result for part 3.
"""
# this get's the maximum value for i
print('This Programme gets natural frequencies of Shafts using lumped')
print('parameter method iterating through frequencies.')
print('')
print('== Input Parameters ==========================================')
imax = int(input('Enter max value for i : '))
# collects the data
Is = []
for i in range(imax):
In = float(input('Enter I' + str(i+1) + '(kgm^2) : '))
Is.append(In)
Ks = [1]
for j in range(imax-1):
K = float(input('Enter k' + str(j+2) + ' (MNm/rad) : ')) * 10 ** 6
Ks.append(K)
tq = 0
ang = 1
# analysis setup
print('')
print('== Analysis Setup ============================================')
startk = int(input('Enter start freq (rad/sec): '))
endk = int(input('Enter end freq (rad/sec): '))
decis = int(input('Enter number of decimal places \
(up to 14) : '))
# first loop finds roughly how many zero crossings there are.
# Then it throws to the iterator wich gets more exact value
result = []
k = startk
while k <= endk:
angs1, tqs1 = Holzer(imax, k, Ks, Is, tq, ang)
angs2, tqs2 = Holzer(imax, k+1, Ks, Is, tq, ang)
if tqs1[imax-1] * tqs2[imax-1] <= 0:
result.append(iterator(k, k+1, 1, decis, imax, Ks, Is, tq, ang))
k = k + 1
# data format and export
print('')
print('== Exporter ==================================================')
exporter(result)
main()
|
py | 7df81bb4df4818c11109e2497926fde422691b53 | class Link:
def __init__(self, caIndex):
self.caIndex = caIndex |
py | 7df81bc4534c2a8a5ecb420ec890969ccb8f5593 | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
# Log the user in automatically in next two lines
new_user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
)
login(request, new_user)
# TODO Redirect should be updated to request the '?next='' paramter
return HttpResponseRedirect("/story/")
else:
form = UserCreationForm()
return render(request, "registration/signup.html", {
'form': form,
})
def user(request):
user = User.objects.all()
return render(request, 'users/signin.html', {'user': user})
|
py | 7df81c6a2b76bb8fa9f2f177708a2606eded49ae | import requests
from requests.auth import HTTPBasicAuth
api_key = "" # Your API key here, required
url = "https://api-us.musiio.com/v1/catalog/info"
response = requests.get(url, auth=HTTPBasicAuth(api_key, ''))
print(response.text.encode('utf8'))
|
py | 7df81c8b4c01f3b1c5d957b4bb9fcfad2139f923 | import pytest
from bottery.message import Message
from bottery.telegram import reply
from bottery.telegram.engine import TelegramChat, TelegramEngine, TelegramUser
@pytest.fixture
def engine():
return TelegramEngine
@pytest.fixture
def user():
return TelegramUser
@pytest.fixture
def chat():
return TelegramChat
@pytest.fixture()
def message():
return Message(
id=1,
platform='telegram',
text='',
user=user,
chat=chat,
timestamp='',
raw='',
)
@pytest.fixture
def message_data():
return {
'message': {
'chat': {
'first_name': 'John',
'id': 12345678,
'last_name': 'Snow',
'type': 'private',
'username': 'johnsnow'
},
'date': 1516787847,
'from': {
'first_name': 'John',
'id': 12345678,
'is_bot': False,
'language_code': 'en-US',
'last_name': 'Snow',
'username': 'johnsnow'
},
'message_id': 2,
'text': 'Hi bot, how are you?'
},
'update_id': 987456321
}
@pytest.fixture
def edited_message_data(message_data):
return {'edited_message': message_data['message']}
@pytest.mark.parametrize('chat_type,id_expected', [
('group', 456),
('private', 123),
])
def test_platform_telegram_engine_get_chat_id(chat_type,
id_expected, engine, message):
setattr(message.chat, 'id', id_expected)
setattr(message.chat, 'type', chat_type)
setattr(message.user, 'id', id_expected)
assert engine.get_chat_id(engine, message) == id_expected
@pytest.mark.parametrize('message_input,message_key,message_edited', [
(pytest.lazy_fixture('message_data'), 'message', False),
(pytest.lazy_fixture('edited_message_data'), 'edited_message', True)
])
def test_build_message(engine, message_input, message_key, message_edited):
message = engine.build_message(engine, message_input)
assert message.id == message_input[message_key]['message_id']
assert message.text == message_input[message_key]['text']
assert message.timestamp == message_input[message_key]['date']
assert message.raw == message_input
assert message.edited == message_edited
def test_build_message_without_text(message_data, engine):
'''
Telegram can send a message without text.
For example, when a bot is added to a group.
'''
message_data_without_text = message_data
del message_data_without_text['message']['text']
message = engine.build_message(engine, message_data_without_text)
assert message.id == message_data_without_text['message']['message_id']
assert message.text is not None
assert message.text == ''
assert message.timestamp == message_data_without_text['message']['date']
assert message.raw == message_data
def test_reply_decorator(message):
@reply()
def view(message):
return ''
view(message)
assert message._request_payload['reply_to_message_id'] == message.id
def test_reply_decorator_to_previous_message(message):
@reply(to=lambda message: message.id - 2)
def view(message):
return ''
view(message)
assert message._request_payload['reply_to_message_id'] == message.id - 2
|
py | 7df81e9c3aab49f00a490e883bee7056bd1bcf61 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def driver():
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = Options()
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--headless")
return webdriver.Chrome(ChromeDriverManager().install(),options=chrome_options)
|
py | 7df81f15c8d42c039994e9aa0d96ca75d587d8d0 | from mlf_core.common.suggest_similar_commands import SIMILARITY_SUGGEST_FACTOR, SIMILARITY_USE_FACTOR
def levensthein_dist(input_command: str, candidate: str) -> int:
"""
Implement the Levenshtein distance algorithm to determine, in case of a non-existing handle,
if theres a very similar command to suggest.
:param input_command: The non-existing handle the user gave as input
:param candidate: The (possible similar) alternative command
:return: The similarity between the two strings measured by the levensthein distance
"""
if not input_command or not candidate:
return max(len(input_command), len(candidate)) # at least one string is empty
dp_table = [[0 for col in range(len(input_command) + 1)] for row in range(len(candidate) + 1)]
dp_table[0] = list(range(0, len(input_command) + 1))
for i in range(1, len(candidate) + 1):
dp_table[i][0] = i
# now choose minimum levensthein distance from the three option delete/replace/insert
# if chars are the same -> levensthein distance is the same as for those substring without these chars of input_command
# and candidate
for i in range(1, len(candidate) + 1):
for j in range(1, len(input_command) + 1):
# choose minimum edit distance from delete, replace or insert at current substring
if input_command[j - 1] == candidate[i - 1]:
dp_table[i][j] = dp_table[i - 1][j - 1]
else:
dp_table[i][j] = min(min(dp_table[i][j - 1], dp_table[i - 1][j - 1]), dp_table[i - 1][j]) + 1
return dp_table[len(candidate)][len(input_command)]
def most_similar_command(command: str, command_list: set) -> (list, str):
"""
Determine whether its possible to suggest a similar command.
The similarity is determined by the levensthein distance and a factor (currently 1/3)
sets a limit where a similar command is useful to be automatically used. If the difference diff is 1/3 < diff <= 2/3, one
or more similar commands could be suggested, but not used automatically.
:param command_list: The commands that are available by the users specific action
:param command: The command given by the user
:return: A list of similar command(s) or the empty string if there's none and a string that indicates the action to be taken
"""
min_use = 999999 # some random large integer -> we will never have handles that are larger than 1000 character
min_suggest = 999999
sim_command_use = []
sim_command_suggest = []
# for each valid handle calculate the levensthein distance and if one is found that is a new minimal distance,
# replace it and take this handle as the most similar command.
for handle in command_list:
dist = levensthein_dist(command, handle)
# the more restrict condition for automatic use
lim_use = int(len(command) * SIMILARITY_USE_FACTOR)
# the weaker condition for command suggestion
lim_suggest = int(len(command) * SIMILARITY_SUGGEST_FACTOR)
# check if the command is close to the inputted command so it can be automatically used
if lim_use >= dist:
if min_use > dist: # and min >= dist:
min_use = dist
sim_command_use = [handle]
elif min_use == dist:
sim_command_use.append(handle)
# the input is not very close to any command, but maybe a similar one can be suggested?
elif lim_use < dist <= lim_suggest:
if min_suggest > dist: # and min >= dist:
min_suggest = dist
sim_command_suggest = [handle]
elif min_suggest == dist:
sim_command_suggest.append(handle)
# return the use list, as those are closer, but if its empty, return the list of suggested commands (or if that is empty too, an empty list)
return (sim_command_use, 'use') if sim_command_use else (sim_command_suggest, 'suggest') if sim_command_suggest else ([], '')
|
py | 7df81fe66a1037044b70e15a097234a309bed9bb | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from collections import OrderedDict
import numpy as np
from sklearn import metrics
from sklearn.metrics import confusion_matrix
logger = logging.getLogger(__name__)
class ConfusionMatrix:
def __init__(self, conditions, predictions, labels=None, sample_weight=None):
# assert (len(predictions) == len(conditions))
min_length = min(len(predictions), len(conditions))
self.predictions = predictions[:min_length]
self.conditions = conditions[:min_length]
if labels is not None:
self.label2idx = {label: idx for idx, label in enumerate(labels)}
self.idx2label = {idx: label for idx, label in enumerate(labels)}
labels = list(range(len(labels)))
else:
self.label2idx = {
str(label): idx for idx, label in enumerate(np.unique([self.predictions, self.conditions]))
}
self.idx2label = {
idx: str(label) for idx, label in enumerate(np.unique([self.predictions, self.conditions]))
}
self.cm = confusion_matrix(self.conditions, self.predictions, labels=labels, sample_weight=sample_weight)
# if labels is not None:
# self.labels_dict = {label: idx for idx, label in enumerate(labels)}
# else:
# if conditions.dtype.char == 'S': # it's an array of strings
# self.labels_dict = {str(label): idx for idx, label in
# enumerate(np.unique([predictions, conditions]))}
# else: # numerical
# max_label = np.concatenate([predictions, conditions]).max()
# self.labels_dict = {str(i): i for i in range(max_label + 1)}
# labels = [str(i) for i in range(max_label + 1)]
# self.cm = confusion_matrix(conditions, predictions, labels, sample_weight)
self.sum_predictions = np.sum(self.cm, axis=0)
self.sum_conditions = np.sum(self.cm, axis=1)
self.all = np.sum(self.cm)
def label_to_idx(self, label):
return self.label2idx[label]
def true_positives(self, idx):
return self.cm[idx, idx]
def true_negatives(self, idx):
return self.all - self.sum_predictions[idx] - self.sum_conditions[idx] + self.true_positives(idx)
def false_positives(self, idx):
return self.sum_predictions[idx] - self.true_positives(idx)
def false_negatives(self, idx):
return self.sum_conditions[idx] - self.true_positives(idx)
def true_positive_rate(self, idx):
nom = self.true_positives(idx)
den = self.sum_conditions[idx]
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def true_negative_rate(self, idx):
nom = tn = self.true_negatives(idx)
den = tn + self.false_positives(idx)
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def positive_predictive_value(self, idx):
nom = self.true_positives(idx)
den = self.sum_predictions[idx]
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def negative_predictive_value(self, idx):
nom = tn = self.true_negatives(idx)
den = tn + self.false_negatives(idx)
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def false_negative_rate(self, idx):
return 1.0 - self.true_positive_rate(idx)
def false_positive_rate(self, idx):
return 1.0 - self.true_negative_rate(idx)
def false_discovery_rate(self, idx):
return 1.0 - self.positive_predictive_value(idx)
def false_omission_rate(self, idx):
return 1.0 - self.negative_predictive_value(idx)
def accuracy(self, idx):
nom = self.true_positives(idx) + self.true_negatives(idx)
den = self.all
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def precision(self, idx):
return self.positive_predictive_value(idx)
def recall(self, idx):
return self.true_positive_rate(idx)
def fbeta_score(self, beta, idx):
beta_2 = np.power(beta, 2)
precision = self.precision(idx)
recall = self.recall(idx)
nom = (1 + beta_2) * precision * recall
den = (beta_2 * precision) + recall
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def f1_score(self, idx):
return self.fbeta_score(1, idx)
def sensitivity(self, idx):
return self.true_positive_rate(idx)
def specificity(self, idx):
return self.true_negative_rate(idx)
def hit_rate(self, idx):
return self.true_positive_rate(idx)
def miss_rate(self, idx):
return self.false_negative_rate(idx)
def fall_out(self, idx):
return self.false_positive_rate(idx)
def matthews_correlation_coefficient(self, idx):
tp = self.true_positives(idx)
tn = self.true_negatives(idx)
fp = self.false_positives(idx)
fn = self.false_negatives(idx)
nom = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if den == 0 or den == np.nan:
return 0
else:
return nom / den
def informedness(self, idx):
return self.true_positive_rate(idx) + self.true_negative_rate(idx) - 1
def markedness(self, idx):
return self.positive_predictive_value(idx) + self.negative_predictive_value(idx) - 1
def token_accuracy(self):
return metrics.accuracy_score(self.conditions, self.predictions)
def avg_precision(self, average="macro"):
return metrics.precision_score(self.conditions, self.predictions, average=average)
def avg_recall(self, average="macro"):
return metrics.recall_score(self.conditions, self.predictions, average=average)
def avg_f1_score(self, average="macro"):
return metrics.f1_score(self.conditions, self.predictions, average=average)
def avg_fbeta_score(self, beta, average="macro"):
return metrics.fbeta_score(self.conditions, self.predictions, beta=beta, average=average)
def kappa_score(self):
return metrics.cohen_kappa_score(self.conditions, self.predictions)
def class_stats(self, idx):
return {
"true_positives": self.true_positives(idx),
"true_negatives": self.true_negatives(idx),
"false_positives": self.false_positives(idx),
"false_negatives": self.false_negatives(idx),
"true_positive_rate": self.true_positive_rate(idx),
"true_negative_rate": self.true_negative_rate(idx),
"positive_predictive_value": self.positive_predictive_value(idx),
"negative_predictive_value": self.negative_predictive_value(idx),
"false_negative_rate": self.false_negative_rate(idx),
"false_positive_rate": self.false_positive_rate(idx),
"false_discovery_rate": self.false_discovery_rate(idx),
"false_omission_rate": self.false_omission_rate(idx),
"accuracy": self.accuracy(idx),
"precision": self.precision(idx),
"recall": self.recall(idx),
"f1_score": self.f1_score(idx),
"sensitivity": self.sensitivity(idx),
"specificity": self.specificity(idx),
"hit_rate": self.hit_rate(idx),
"miss_rate": self.miss_rate(idx),
"fall_out": self.fall_out(idx),
"matthews_correlation_coefficient": self.matthews_correlation_coefficient(idx),
"informedness": self.informedness(idx),
"markedness": self.markedness(idx),
}
def per_class_stats(self):
stats = OrderedDict()
for idx in sorted(self.idx2label.keys()):
stats[self.idx2label[idx]] = self.class_stats(idx)
return stats
def stats(self):
return {
"token_accuracy": self.token_accuracy(),
"avg_precision_macro": self.avg_precision(average="macro"),
"avg_recall_macro": self.avg_recall(average="macro"),
"avg_f1_score_macro": self.avg_f1_score(average="macro"),
"avg_precision_micro": self.avg_precision(average="micro"),
"avg_recall_micro": self.avg_recall(average="micro"),
"avg_f1_score_micro": self.avg_f1_score(average="micro"),
"avg_precision_weighted": self.avg_precision(average="micro"),
"avg_recall_weighted": self.avg_recall(average="micro"),
"avg_f1_score_weighted": self.avg_f1_score(average="weighted"),
"kappa_score": self.kappa_score(),
}
def roc_curve(conditions, prediction_scores, pos_label=None, sample_weight=None):
return metrics.roc_curve(conditions, prediction_scores, pos_label=pos_label, sample_weight=sample_weight)
def roc_auc_score(conditions, prediction_scores, average="micro", sample_weight=None):
try:
return metrics.roc_auc_score(conditions, prediction_scores, average=average, sample_weight=sample_weight)
except ValueError as ve:
logger.info(ve)
def precision_recall_curve(conditions, prediction_scores, pos_label=None, sample_weight=None):
return metrics.precision_recall_curve(
conditions, prediction_scores, pos_label=pos_label, sample_weight=sample_weight
)
def average_precision_score(conditions, prediction_scores, average="micro", sample_weight=None):
# average == [micro, macro, sampled, weidhted]
return metrics.average_precision_score(conditions, prediction_scores, average=average, sample_weight=sample_weight)
|
py | 7df82039c3255b3fab2af242e9de984189f82f7a | from fastai.vision import *
from fastai.metrics import error_rate
from fastai.callbacks import *
from torch.utils.data.sampler import WeightedRandomSampler
__all__ = ['OverSamplingCallback']
class OverSamplingCallback(LearnerCallback):
def __init__(self,learn:Learner,weights:torch.Tensor=None):
super().__init__(learn)
self.weights = weights
def on_train_begin(self, **kwargs):
ds,dl = self.data.train_ds,self.data.train_dl
self.labels = ds.y.items
assert np.issubdtype(self.labels.dtype, np.integer), "Can only oversample integer values"
_,self.label_counts = np.unique(self.labels,return_counts=True)
if self.weights is None: self.weights = torch.DoubleTensor((1/self.label_counts)[self.labels])
self.total_len_oversample = int(self.data.c*np.max(self.label_counts))
sampler = WeightedRandomSampler(self.weights, self.total_len_oversample)
self.data.train_dl = dl.new(shuffle=False, sampler=sampler)
#init learn
def trainfor(num_epochs,size):
np.random.seed(33)
torch.random.manual_seed(33)
data : ImageDataBunch= ImageDataBunch.from_folder("./image_data",train='.',valid_pct=0.2, ds_tfms=get_transforms(flip_vert=False), size=size, bs=bs,num_workers=4).normalize(imagenet_stats)
#beware of num_workers, many errors are because of it
learn.data = data
learn.lr_find()
learn.recorder.plot(suggestion=True)
try:
min_grad_lr = learn.recorder.min_grad_lr
except:
min_grad_lr = 3e-5
learn.fit_one_cycle(num_epochs, min_grad_lr,callbacks=[SaveModelCallback(learn, every='epoch', monitor='error_rate')])
if __name__ == "__main__":
bs = 24
data : ImageDataBunch= ImageDataBunch.from_folder("./image_data",train='.',valid_pct=0.2, ds_tfms=get_transforms(flip_vert=False), size=50, bs=bs,num_workers=4).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet50, metrics=[accuracy,Precision(),Recall(),FBeta()] , callback_fns=[OverSamplingCallback])
print('******************FROZEN*********************')
learn.path=Path("./learners/endgame/frozen")
trainfor(8,128)
print('******************FROZEN BIGGER *********************')
learn.path=Path("./learners/endgame/frozen-bigger")
trainfor(6,256)
learn.unfreeze()
print('******************UNFROZEN *********************')
learn.path=Path("./learners/endgame/unfrozen")
trainfor(6,256)
print('******************UNFROZEN BIGGER*********************')
learn.path=Path("./learners/endgame/unfrozen-bigger")
trainfor(4,512)
trainfor(4,512)
learn.export()
learn.path=Path("./learners/endgame/unfrozen-bigger2")
trainfor(1,1024)
learn.export() |
py | 7df820aecac77f5f66d27d049f511db7ccc50544 | import argparse
from . import PersonalDataCommand
class ConvertCmd(PersonalDataCommand):
name = 'convert'
def exec(self, command_args):
parser = argparse.ArgumentParser(self.name)
parser.add_argument('--path', type=str, required=True, help='Path to a serialized database')
parser.add_argument('--converted_path', type=str, required=True, help='Path to save a converted database')
args = parser.parse_args(command_args)
self.convert(args.path, args.converted_path)
def convert(self, db_path, converted_db_path):
storage = self.create_storage()
storage.load_from_file(db_path)
storage.save_to_file(converted_db_path)
print("Conversion finished")
|
py | 7df8216419533b687781d794ee8181fca2038def | # Language: Python 3
i = 4
d = 4.0
s = 'HackerRank '
int2 = int(input())
double2 = float(input())
string2 = input()
int2_i = i + int2
double2_d = d + double2
string2_s = s + string2
print(int2_i, double2_d, string2_s, sep="\n")
|
py | 7df82176b3efd8b5c15fb78bc4b750bc74be771f | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from libs.EfficientDet.generators.common import Generator
import os
import numpy as np
from pycocotools.coco import COCO
import cv2
class CocoGenerator(Generator):
"""
Generate data from the COCO dataset.
See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.
"""
def __init__(self, data_dir, set_name, **kwargs):
"""
Initialize a COCO data generator.
Args
data_dir: Path to where the COCO dataset is stored.
set_name: Name of the set to parse.
"""
self.data_dir = data_dir
self.set_name = set_name
if set_name in ['train2017', 'val2017']:
self.coco = COCO(os.path.join(data_dir, 'annotations', 'instances_' + set_name + '.json'))
else:
self.coco = COCO(os.path.join(data_dir, 'annotations', 'image_info_' + set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
super(CocoGenerator, self).__init__(**kwargs)
def load_classes(self):
"""
Loads the class to label mapping (and inverse) for COCO.
"""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def size(self):
""" Size of the COCO dataset.
"""
return len(self.image_ids)
def num_classes(self):
""" Number of classes in the dataset. For COCO this is 80.
"""
return 90
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def coco_label_to_label(self, coco_label):
""" Map COCO label to the label as used in the network.
COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.
"""
return self.coco_labels_inverse[coco_label]
def coco_label_to_name(self, coco_label):
""" Map COCO label to name.
"""
return self.label_to_name(self.coco_label_to_label(coco_label))
def label_to_coco_label(self, label):
""" Map label as used by the network to labels as used by COCO.
"""
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
# {'license': 2, 'file_name': '000000259765.jpg', 'coco_url': 'http://images.cocodataset.org/test2017/000000259765.jpg', 'height': 480, 'width': 640, 'date_captured': '2013-11-21 04:02:31', 'id': 259765}
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {'labels': np.empty((0,), dtype=np.float32), 'bboxes': np.empty((0, 4), dtype=np.float32)}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [a['category_id'] - 1]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][0],
a['bbox'][1],
a['bbox'][0] + a['bbox'][2],
a['bbox'][1] + a['bbox'][3],
]]], axis=0)
return annotations
|
py | 7df8219c0f7ed04f31d6a6f42ad1ac90cd9e6c8c | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.auth_log_level_level import AuthLogLevelLevel # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestAuthLogLevelLevel(unittest.TestCase):
"""AuthLogLevelLevel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthLogLevelLevel(self):
"""Test AuthLogLevelLevel"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.auth_log_level_level.AuthLogLevelLevel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 7df822d07b018f040612f195edb18f7f76ff6975 | from interval import Interval
from numlike import Numlike
from nplike import Nplike
from npinterval import NpInterval
from theanointerval import TheanoInterval
from utils import assert_numlike
__all__ = ['Interval', 'Numlike', 'Nplike', 'TheanoInterval',
'NpInterval', 'assert_numlike']
|
py | 7df822df1f370b6e09109e1f62f405a791721e0b | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['Instance']
class Instance(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
blueprint_id: Optional[pulumi.Input[str]] = None,
bundle_id: Optional[pulumi.Input[str]] = None,
key_pair_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Lightsail Instance. Amazon Lightsail is a service to provide easy virtual private servers
with custom software already setup. See [What is Amazon Lightsail?](https://lightsail.aws.amazon.com/ls/docs/getting-started/article/what-is-amazon-lightsail)
for more information.
> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
## Example Usage
```python
import pulumi
import pulumi_aws as aws
# Create a new GitLab Lightsail Instance
gitlab_test = aws.lightsail.Instance("gitlabTest",
availability_zone="us-east-1b",
blueprint_id="string",
bundle_id="string",
key_pair_name="some_key_name",
tags={
"foo": "bar",
})
```
## Availability Zones
Lightsail currently supports the following Availability Zones (e.g. `us-east-1a`):
- `ap-northeast-1{a,c,d}`
- `ap-northeast-2{a,c}`
- `ap-south-1{a,b}`
- `ap-southeast-1{a,b,c}`
- `ap-southeast-2{a,b,c}`
- `ca-central-1{a,b}`
- `eu-central-1{a,b,c}`
- `eu-west-1{a,b,c}`
- `eu-west-2{a,b,c}`
- `eu-west-3{a,b,c}`
- `us-east-1{a,b,c,d,e,f}`
- `us-east-2{a,b,c}`
- `us-west-2{a,b,c}`
## Blueprints
Lightsail currently supports the following Blueprint IDs:
### OS Only
- `amazon_linux_2018_03_0_2`
- `centos_7_1901_01`
- `debian_8_7`
- `debian_9_5`
- `freebsd_11_1`
- `opensuse_42_2`
- `ubuntu_16_04_2`
- `ubuntu_18_04`
### Apps and OS
- `drupal_8_5_6`
- `gitlab_11_1_4_1`
- `joomla_3_8_11`
- `lamp_5_6_37_2`
- `lamp_7_1_20_1`
- `magento_2_2_5`
- `mean_4_0_1`
- `nginx_1_14_0_1`
- `nodejs_10_8_0`
- `plesk_ubuntu_17_8_11_1`
- `redmine_3_4_6`
- `wordpress_4_9_8`
- `wordpress_multisite_4_9_8`
## Bundles
Lightsail currently supports the following Bundle IDs (e.g. an instance in `ap-northeast-1` would use `small_2_0`):
### Prefix
A Bundle ID starts with one of the below size prefixes:
- `nano_`
- `micro_`
- `small_`
- `medium_`
- `large_`
- `xlarge_`
- `2xlarge_`
### Suffix
A Bundle ID ends with one of the following suffixes depending on Availability Zone:
- ap-northeast-1: `2_0`
- ap-northeast-2: `2_0`
- ap-south-1: `2_1`
- ap-southeast-1: `2_0`
- ap-southeast-2: `2_2`
- ca-central-1: `2_0`
- eu-central-1: `2_0`
- eu-west-1: `2_0`
- eu-west-2: `2_0`
- eu-west-3: `2_0`
- us-east-1: `2_0`
- us-east-2: `2_0`
- us-west-2: `2_0`
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] availability_zone: The Availability Zone in which to create your
instance (see list below)
:param pulumi.Input[str] blueprint_id: The ID for a virtual private server image
(see list below)
:param pulumi.Input[str] bundle_id: The bundle of specification information (see list below)
:param pulumi.Input[str] key_pair_name: The name of your key pair. Created in the
Lightsail console (cannot use `ec2.KeyPair` at this time)
:param pulumi.Input[str] name: The name of the Lightsail Instance. Names be unique within each AWS Region in your Lightsail account.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] user_data: launch script to configure server with additional user data
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if availability_zone is None:
raise TypeError("Missing required property 'availability_zone'")
__props__['availability_zone'] = availability_zone
if blueprint_id is None:
raise TypeError("Missing required property 'blueprint_id'")
__props__['blueprint_id'] = blueprint_id
if bundle_id is None:
raise TypeError("Missing required property 'bundle_id'")
__props__['bundle_id'] = bundle_id
__props__['key_pair_name'] = key_pair_name
__props__['name'] = name
__props__['tags'] = tags
__props__['user_data'] = user_data
__props__['arn'] = None
__props__['cpu_count'] = None
__props__['created_at'] = None
__props__['ipv6_address'] = None
__props__['is_static_ip'] = None
__props__['private_ip_address'] = None
__props__['public_ip_address'] = None
__props__['ram_size'] = None
__props__['username'] = None
super(Instance, __self__).__init__(
'aws:lightsail/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
blueprint_id: Optional[pulumi.Input[str]] = None,
bundle_id: Optional[pulumi.Input[str]] = None,
cpu_count: Optional[pulumi.Input[float]] = None,
created_at: Optional[pulumi.Input[str]] = None,
ipv6_address: Optional[pulumi.Input[str]] = None,
is_static_ip: Optional[pulumi.Input[bool]] = None,
key_pair_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
public_ip_address: Optional[pulumi.Input[str]] = None,
ram_size: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'Instance':
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the Lightsail instance (matches `id`).
:param pulumi.Input[str] availability_zone: The Availability Zone in which to create your
instance (see list below)
:param pulumi.Input[str] blueprint_id: The ID for a virtual private server image
(see list below)
:param pulumi.Input[str] bundle_id: The bundle of specification information (see list below)
:param pulumi.Input[str] created_at: The timestamp when the instance was created.
* `availability_zone`
* `blueprint_id`
* `bundle_id`
* `key_pair_name`
* `user_data`
:param pulumi.Input[str] key_pair_name: The name of your key pair. Created in the
Lightsail console (cannot use `ec2.KeyPair` at this time)
:param pulumi.Input[str] name: The name of the Lightsail Instance. Names be unique within each AWS Region in your Lightsail account.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] user_data: launch script to configure server with additional user data
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["availability_zone"] = availability_zone
__props__["blueprint_id"] = blueprint_id
__props__["bundle_id"] = bundle_id
__props__["cpu_count"] = cpu_count
__props__["created_at"] = created_at
__props__["ipv6_address"] = ipv6_address
__props__["is_static_ip"] = is_static_ip
__props__["key_pair_name"] = key_pair_name
__props__["name"] = name
__props__["private_ip_address"] = private_ip_address
__props__["public_ip_address"] = public_ip_address
__props__["ram_size"] = ram_size
__props__["tags"] = tags
__props__["user_data"] = user_data
__props__["username"] = username
return Instance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the Lightsail instance (matches `id`).
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[str]:
"""
The Availability Zone in which to create your
instance (see list below)
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="blueprintId")
def blueprint_id(self) -> pulumi.Output[str]:
"""
The ID for a virtual private server image
(see list below)
"""
return pulumi.get(self, "blueprint_id")
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> pulumi.Output[str]:
"""
The bundle of specification information (see list below)
"""
return pulumi.get(self, "bundle_id")
@property
@pulumi.getter(name="cpuCount")
def cpu_count(self) -> pulumi.Output[float]:
return pulumi.get(self, "cpu_count")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The timestamp when the instance was created.
* `availability_zone`
* `blueprint_id`
* `bundle_id`
* `key_pair_name`
* `user_data`
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="ipv6Address")
def ipv6_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "ipv6_address")
@property
@pulumi.getter(name="isStaticIp")
def is_static_ip(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_static_ip")
@property
@pulumi.getter(name="keyPairName")
def key_pair_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of your key pair. Created in the
Lightsail console (cannot use `ec2.KeyPair` at this time)
"""
return pulumi.get(self, "key_pair_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Lightsail Instance. Names be unique within each AWS Region in your Lightsail account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="publicIpAddress")
def public_ip_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter(name="ramSize")
def ram_size(self) -> pulumi.Output[float]:
return pulumi.get(self, "ram_size")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userData")
def user_data(self) -> pulumi.Output[Optional[str]]:
"""
launch script to configure server with additional user data
"""
return pulumi.get(self, "user_data")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
return pulumi.get(self, "username")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7df823c987187d2171707cd929f367eaa1b5f336 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
import sys
import asyncio.log
import unittest
import contextlib
import re
import types
import asyncio
import inspect
import warnings
import testslide.mock_callable
import testslide.mock_constructor
import testslide.matchers
import testslide.patch_attribute
from testslide.strict_mock import StrictMock # noqa
if sys.version_info < (3, 6):
raise RuntimeError("Python >=3.6 required.")
def _importer(target):
components = target.split(".")
import_path = components.pop(0)
thing = __import__(import_path)
def dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
for comp in components:
import_path += ".%s" % comp
thing = dot_lookup(thing, comp, import_path)
return thing
class _ContextData(object):
"""
To be used as a repository of context specific data, used during each
example execution.
"""
def _init_sub_example(self):
self._sub_examples_agg_ex = AggregatedExceptions()
def real_assert_sub_examples(self):
if self._sub_examples_agg_ex.exceptions:
self._sub_examples_agg_ex.raise_correct_exception()
if self._example.is_async:
async def assert_sub_examples(self):
real_assert_sub_examples(self)
else:
def assert_sub_examples(self):
real_assert_sub_examples(self)
self.after(assert_sub_examples)
def _init_mocks(self):
self.mock_callable = testslide.mock_callable.mock_callable
self.mock_async_callable = testslide.mock_callable.mock_async_callable
self.mock_constructor = testslide.mock_constructor.mock_constructor
self.patch_attribute = testslide.patch_attribute.patch_attribute
self._mock_callable_after_functions = []
def register_assertion(assertion):
if self._example.is_async:
async def f(_):
assertion()
else:
f = lambda _: assertion()
self._mock_callable_after_functions.append(f)
testslide.mock_callable.register_assertion = register_assertion
def __init__(self, example):
self._example = example
self._context = example.context
self._after_functions = []
self._test_case = unittest.TestCase()
self._init_sub_example()
self._init_mocks()
@staticmethod
def _not_callable(self):
raise BaseException("This function should not be called outside test code.")
@property
def _all_methods(self):
return self._context.all_context_data_methods
@property
def _all_attributes(self):
return self._context.all_context_data_memoizable_attributes
def __getattr__(self, name):
if name in self._all_methods.keys():
def static(*args, **kwargs):
return self._all_methods[name](self, *args, **kwargs)
self.__dict__[name] = static
if name in self._all_attributes.keys():
attribute_code = self._all_attributes[name]
if self._example.is_async and inspect.iscoroutinefunction(attribute_code):
raise ValueError(
f"Function can not be a coroutine function: {repr(attribute_code)}"
)
self.__dict__[name] = attribute_code(self)
try:
return self.__dict__[name]
except KeyError:
# Forward assert* methods to unittest.TestCase
if re.match("^assert", name) and hasattr(self._test_case, name):
return getattr(self._test_case, name)
raise AttributeError(
"Context '{}' has no attribute '{}'".format(self._context, name)
)
def after(self, after_code):
"""
Use this to decorate a function to be registered to be executed after
the example code.
"""
self._after_functions.append(after_code)
return self._not_callable
@contextmanager
def sub_example(self, name=None):
"""
Use this as a context manager many times inside the same
example. Failures in the code inside the context manager
will be aggregated, and reported individually at the end.
"""
with self._sub_examples_agg_ex.catch():
yield
class AggregatedExceptions(Exception):
"""
Aggregate example execution exceptions.
"""
def __init__(self):
super(AggregatedExceptions, self).__init__()
self.exceptions = []
def append_exception(self, exception):
if isinstance(exception, AggregatedExceptions):
self.exceptions.extend(exception.exceptions)
else:
self.exceptions.append(exception)
@contextmanager
def catch(self):
try:
yield
except BaseException as exception:
self.append_exception(exception)
def __str__(self):
return "{} failures.\n".format(len(self.exceptions)) + "\n".join(
f"{type(e)}: {str(e)}" for e in self.exceptions
)
def raise_correct_exception(self):
if not self.exceptions:
return
ex_types = {type(ex) for ex in self.exceptions}
if Skip in ex_types or unittest.SkipTest in ex_types:
raise Skip()
elif len(self.exceptions) == 1:
raise self.exceptions[0]
else:
raise self
if len(self.exceptions) == 1:
raise self.exceptions[0]
else:
raise self
class Skip(Exception):
"""
Raised by an example when it is skipped
"""
pass
class UnexpectedSuccess(Exception):
"""
Raised by an example when it unexpectedly succeeded
"""
class SlowCallback(Exception):
"""
Raised by TestSlide when an asyncio slow callback warning is detected
"""
class _ExampleRunner:
def __init__(self, example):
self.example = example
@staticmethod
async def _fail_if_not_coroutine_function(func, *args, **kwargs):
if not inspect.iscoroutinefunction(func):
raise ValueError(f"Function must be a coroutine function: {repr(func)}")
return await func(*args, **kwargs)
async def _real_async_run_all_hooks_and_example(
self, context_data, around_functions=None
):
"""
***********************************************************************
***********************************************************************
WARNING
***********************************************************************
***********************************************************************
This function **MUST** be keep the exact same execution flow of
_sync_run_all_hooks_and_example()!!!
"""
if around_functions is None:
around_functions = list(reversed(self.example.context.all_around_functions))
if not around_functions:
aggregated_exceptions = AggregatedExceptions()
with aggregated_exceptions.catch():
for before_code in self.example.context.all_before_functions:
await self._fail_if_not_coroutine_function(
before_code, context_data
)
await self._fail_if_not_coroutine_function(
self.example.code, context_data
)
after_functions = []
after_functions.extend(context_data._mock_callable_after_functions)
after_functions.extend(self.example.context.all_after_functions)
after_functions.extend(context_data._after_functions)
for after_code in reversed(after_functions):
with aggregated_exceptions.catch():
await self._fail_if_not_coroutine_function(after_code, context_data)
aggregated_exceptions.raise_correct_exception()
return
around_code = around_functions.pop()
wrapped_called = []
async def async_wrapped():
wrapped_called.append(True)
await self._real_async_run_all_hooks_and_example(
context_data, around_functions
)
await self._fail_if_not_coroutine_function(
around_code, context_data, async_wrapped
)
if not wrapped_called:
raise RuntimeError(
"Around hook "
+ repr(around_code.__name__)
+ " did not execute example code!"
)
@contextlib.contextmanager
def _raise_if_asyncio_warnings(self, context_data):
if sys.version_info < (3, 7):
yield
return
original_showwarning = warnings.showwarning
caught_failures = []
def showwarning(message, category, filename, lineno, file=None, line=None):
failure_warning_messages = {
RuntimeWarning: "^coroutine '.+' was never awaited"
}
warning_class = type(message)
pattern = failure_warning_messages.get(warning_class, None)
if pattern and re.compile(pattern).match(str(message)):
caught_failures.append(message)
else:
original_showwarning(message, category, filename, lineno, file, line)
warnings.showwarning = showwarning
original_logger_warning = asyncio.log.logger.warning
def logger_warning(msg, *args, **kwargs):
if re.compile("^Executing .+ took .+ seconds$").match(str(msg)):
msg = (
f"{msg}\n"
"During the execution of the async test a slow callback "
"that blocked the event loop was detected.\n"
"Tip: you can customize the detection threshold with:\n"
" asyncio.get_running_loop().slow_callback_duration = seconds"
)
caught_failures.append(SlowCallback(msg % args))
else:
original_logger_warning(msg, *args, **kwargs)
asyncio.log.logger.warning = logger_warning
aggregated_exceptions = AggregatedExceptions()
try:
with aggregated_exceptions.catch():
yield
finally:
warnings.showwarning = original_showwarning
asyncio.log.logger.warning = original_logger_warning
for failure in caught_failures:
with aggregated_exceptions.catch():
raise failure
aggregated_exceptions.raise_correct_exception()
def _async_run_all_hooks_and_example(self, context_data):
coro = self._real_async_run_all_hooks_and_example(context_data)
with self._raise_if_asyncio_warnings(context_data):
if sys.version_info < (3, 7):
loop = asyncio.events.new_event_loop()
try:
loop.set_debug(True)
loop.run_until_complete(coro)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
loop.close()
else:
asyncio.run(coro, debug=True)
@staticmethod
def _fail_if_coroutine_function(func, *args, **kwargs):
if inspect.iscoroutinefunction(func):
raise ValueError(f"Function can not be a coroutine function: {repr(func)}")
return func(*args, **kwargs)
def _sync_run_all_hooks_and_example(self, context_data, around_functions=None):
"""
***********************************************************************
***********************************************************************
WARNING
***********************************************************************
***********************************************************************
This function **MUST** be keep the exact same execution flow of
_real_async_run_all_hooks_and_example()!!!
"""
if around_functions is None:
around_functions = list(reversed(self.example.context.all_around_functions))
if not around_functions:
aggregated_exceptions = AggregatedExceptions()
with aggregated_exceptions.catch():
for before_code in self.example.context.all_before_functions:
self._fail_if_coroutine_function(before_code, context_data)
self._fail_if_coroutine_function(self.example.code, context_data)
after_functions = []
after_functions.extend(context_data._mock_callable_after_functions)
after_functions.extend(self.example.context.all_after_functions)
after_functions.extend(context_data._after_functions)
for after_code in reversed(after_functions):
with aggregated_exceptions.catch():
self._fail_if_coroutine_function(after_code, context_data)
aggregated_exceptions.raise_correct_exception()
return
around_code = around_functions.pop()
wrapped_called = []
def wrapped():
wrapped_called.append(True)
self._sync_run_all_hooks_and_example(context_data, around_functions)
self._fail_if_coroutine_function(around_code, context_data, wrapped)
if not wrapped_called:
raise RuntimeError(
"Around hook "
+ repr(around_code.__name__)
+ " did not execute example code!"
)
def run(self):
try:
if self.example.skip:
raise Skip()
context_data = _ContextData(self.example)
if self.example.is_async:
self._async_run_all_hooks_and_example(context_data)
else:
self._sync_run_all_hooks_and_example(context_data)
finally:
sys.stdout.flush()
sys.stderr.flush()
testslide.mock_callable.unpatch_all_callable_mocks()
testslide.mock_constructor.unpatch_all_constructor_mocks()
testslide.patch_attribute.unpatch_all_mocked_attributes()
class Example(object):
"""
Individual example.
"""
def __init__(self, name, code, context, skip=False, focus=False):
self.name = name
self.code = code
self.is_async = inspect.iscoroutinefunction(self.code)
self.context = context
self.__dict__["skip"] = skip
self.__dict__["focus"] = focus
@property
def full_name(self):
return "{context_full_name}: {example_name}".format(
context_full_name=self.context.full_name, example_name=self.name
)
@property
def skip(self):
"""
True if the example of its context is marked to be skipped.
"""
return any([self.context.skip, self.__dict__["skip"]])
@property
def focus(self):
"""
True if the example of its context is marked to be focused.
"""
return any([self.context.focus, self.__dict__["focus"]])
def __call__(self):
"""
Run the example, including all around, before and after hooks.
"""
_ExampleRunner(self).run()
def __str__(self):
return self.name
class _TestSlideTestResult(unittest.TestResult):
"""
Concrete unittest.TestResult to allow unttest.TestCase integration, by
aggregating failures at an AggregatedExceptions instance.
"""
def __init__(self):
super(_TestSlideTestResult, self).__init__()
self.aggregated_exceptions = AggregatedExceptions()
def _add_exception(self, err):
exc_type, exc_value, exc_traceback = err
self.aggregated_exceptions.append_exception(exc_value)
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
super(_TestSlideTestResult, self).addError(test, err)
self._add_exception(err)
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
super(_TestSlideTestResult, self).addFailure(test, err)
self._add_exception(err)
def addSkip(self, test, reason):
"""Called when the test case test is skipped. reason is the reason
the test gave for skipping."""
super(_TestSlideTestResult, self).addSkip(test, reason)
self._add_exception((type(Skip), Skip(), None))
def addUnexpectedSuccess(self, test):
"""Called when the test case test was marked with the expectedFailure()
decorator, but succeeded."""
super(_TestSlideTestResult, self).addUnexpectedSuccess(test)
self._add_exception((type(UnexpectedSuccess), UnexpectedSuccess(), None))
def addSubTest(self, test, subtest, err):
"""Called at the end of a subtest.
'err' is None if the subtest ended successfully, otherwise it's a
tuple of values as returned by sys.exc_info().
"""
super(_TestSlideTestResult, self).addSubTest(test, subtest, err)
if err:
self._add_exception(err)
class Context(object):
"""
Container for example contexts.
"""
_SAME_CONTEXT_NAME_ERROR = "A context with the same name is already defined"
# List of all top level contexts created
all_top_level_contexts = []
# Constructor
def __init__(
self, name, parent_context=None, shared=False, skip=False, focus=False
):
"""
Creates a new context.
"""
# Validate context name
if parent_context:
current_level_contexts = parent_context.children_contexts
else:
current_level_contexts = self.all_top_level_contexts
if name in [context.name for context in current_level_contexts]:
raise RuntimeError(self._SAME_CONTEXT_NAME_ERROR)
self.name = name
self.parent_context = parent_context
self.shared = shared
self.__dict__["skip"] = skip
self.__dict__["focus"] = focus
self.children_contexts = []
self.examples = []
self.before_functions = []
self.after_functions = []
self.around_functions = []
self.context_data_methods = {}
self.context_data_memoizable_attributes = {}
self.shared_contexts = {}
self._runtime_attributes = []
if not self.parent_context and not self.shared:
self.all_top_level_contexts.append(self)
# Properties
@property
def parent_contexts(self):
"""
Returns a list of all parent contexts, from bottom to top.
"""
final_list = []
parent = self.parent_context
while parent:
final_list.append(parent)
parent = parent.parent_context
return final_list
@property
def depth(self):
"""
Number of parent contexts this context has.
"""
return len(self.parent_contexts)
def _all_parents_as_dict(original): # noqa: B902
"""
Use as a decorator for empty functions named all_attribute_name, to make
them return a dict with self.parent_context.all_attribute_name and
self.attribute_name.
"""
def get_all(self):
final_dict = {}
if self.parent_context:
final_dict.update(getattr(self.parent_context, original.__name__))
final_dict.update(getattr(self, original.__name__.split("all_")[1]))
return final_dict
return get_all
def _all_parents_as_list(original): # noqa: B902
"""
Use as a decorator for empty functions named all_attribute_name, to make
them return a list with self.parent_context.all_attribute_name and
self.attribute_name.
"""
def get_all(self):
final_list = []
if self.parent_context:
final_list.extend(getattr(self.parent_context, original.__name__))
final_list.extend(getattr(self, original.__name__.split("all_")[1]))
return final_list
return get_all
@property # type: ignore
@_all_parents_as_dict
def all_context_data_methods(self):
"""
Returns a combined dict of all context_data_methods, including from
parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_dict
def all_context_data_memoizable_attributes(self):
"""
Returns a combined dict of all context_data_memoizable_attributes,
including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_around_functions(self):
"""
Return a list of all around_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_before_functions(self):
"""
Return a list of all before_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_after_functions(self):
"""
Return a list of all after_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_dict
def all_shared_contexts(self):
"""
Returns a combined dict of all shared_contexts, including from parent
contexts.
"""
pass
@property
def all_examples(self):
"""
List of of all examples in this context and nested contexts.
"""
final_list = []
final_list.extend(self.examples)
for child_context in self.children_contexts:
final_list.extend(child_context.all_examples)
return final_list
@property
def hierarchy(self):
"""
Returns a list of all contexts in this hierarchy.
"""
return [context for context in list(reversed(self.parent_contexts)) + [self]]
@property
def full_name(self):
"""
Full context name, including parent contexts.
"""
return ", ".join(str(context) for context in self.hierarchy)
@property
def skip(self):
"""
True if this context of any parent context are tagged to be skipped.
"""
return any(context.__dict__["skip"] for context in self.hierarchy)
@property
def focus(self):
"""
True if this context of any parent context are tagged to be focused.
"""
return any(context.__dict__["focus"] for context in self.hierarchy)
def __str__(self):
return self.name
def add_child_context(self, name, skip=False, focus=False):
"""
Creates a nested context below self.
"""
if name in [context.name for context in self.children_contexts]:
raise RuntimeError(self._SAME_CONTEXT_NAME_ERROR)
child_context = Context(name, parent_context=self, skip=skip, focus=focus)
self.children_contexts.append(child_context)
return child_context
def add_example(self, name, example_code, skip=False, focus=False):
"""
Add an example to this context.
"""
if name in [example.name for example in self.examples]:
raise RuntimeError("An example with the same name is already defined")
self.examples.append(
Example(name, code=example_code, context=self, skip=skip, focus=focus)
)
return self.examples[-1]
def has_attribute(self, name):
return any(
[
name in self.context_data_methods.keys(),
name in self.context_data_memoizable_attributes.keys(),
name in self._runtime_attributes,
]
)
def add_function(self, name, function_code):
"""
Add given function to example execution scope.
"""
if self.has_attribute(name):
raise AttributeError(
'Attribute "{}" already set for context "{}"'.format(name, self)
)
self.context_data_methods[name] = function_code
def register_runtime_attribute(self, name):
"""
Register name as a new runtime attribute, that can not be registered
again.
"""
if name in self._runtime_attributes:
raise AttributeError(
'Attribute "{}" already set for context "{}"'.format(name, self)
)
self._runtime_attributes.append(name)
def add_memoized_attribute(self, name, memoizable_code):
"""
Add given attribute name to execution scope, by lazily memoizing the return
value of memoizable_code().
"""
if self.has_attribute(name):
raise AttributeError(
'Attribute "{}" already set for context "{}"'.format(name, self)
)
self.context_data_memoizable_attributes[name] = memoizable_code
def add_shared_context(self, name, shared_context_code):
"""
Create a shared context.
"""
if name in self.shared_contexts:
raise RuntimeError("A shared context with the same name is already defined")
self.shared_contexts[name] = shared_context_code
def add_test_case(self, test_case, attr_name):
"""
Add around hooks to context from given unittest.TestCase class. Only
hooks such as setUp or tearDown will be called, no tests will be
included.
"""
def wrap_test_case(self, example):
def test_test_slide(_):
example()
def exec_body(ns):
ns.update({"test_test_slide": test_test_slide})
# Build a child class of given TestCase, with a defined test that
# will run TestSlide example.
test_slide_test_case = types.new_class(
"TestSlideTestCase", bases=(test_case,), exec_body=exec_body
)
# This suite will only contain TestSlide's example test.
test_suite = unittest.TestLoader().loadTestsFromName(
"test_test_slide", test_slide_test_case
)
setattr(self, attr_name, list(test_suite)[0])
result = _TestSlideTestResult()
test_suite(result=result)
if not result.wasSuccessful():
result.aggregated_exceptions.raise_correct_exception()
self.around_functions.append(wrap_test_case)
def reset():
"""
Clear all defined contexts and hooks.
"""
Context.all_top_level_contexts.clear()
class TestCase(unittest.TestCase):
"""
A subclass of unittest.TestCase that adds TestSlide's features.
"""
def setUp(self):
testslide.mock_callable.register_assertion = lambda assertion: self.addCleanup(
assertion
)
self.addCleanup(testslide.mock_callable.unpatch_all_callable_mocks)
self.addCleanup(testslide.mock_constructor.unpatch_all_constructor_mocks)
self.addCleanup(testslide.patch_attribute.unpatch_all_mocked_attributes)
super(TestCase, self).setUp()
@staticmethod
def mock_callable(*args, **kwargs):
return testslide.mock_callable.mock_callable(*args, **kwargs)
@staticmethod
def mock_async_callable(*args, **kwargs):
return testslide.mock_callable.mock_async_callable(*args, **kwargs)
@staticmethod
def mock_constructor(*args, **kwargs):
return testslide.mock_constructor.mock_constructor(*args, **kwargs)
@staticmethod
def patch_attribute(*args, **kwargs):
return testslide.patch_attribute.patch_attribute(*args, **kwargs)
|
py | 7df823f27620d87e789022b705206ddc6195bdca | import glob
import os
import re
from importlib import import_module
from unwind.models.meta import MigrateMeta
from malibu.util import log
from malibu.util.decorators import function_marker, function_registrator
modules = glob.glob(os.path.dirname(__file__) + "/*.py")
__all__ = [os.path.basename(f)[:-3] for f in modules
if not os.path.basename(f).startswith('_') and
not f.endswith('__init__.py') and os.path.isfile(f)]
__UPGRADE__ = []
__DOWNGRADE__ = []
MIGRATION_ORDERING_RGX = re.compile(r"^(?P<order>[\d]+)(?:.*)$")
""" upgrade and downgrade are function decorators that designate a migration
function as a database upgrader or a database downgrader. A migration
function should take one argument, which would be an instance of
peewee's playhouse.migrate.SchemaMigrator.
"""
__upgrade_reg = function_registrator(__UPGRADE__)
__downgrade_reg = function_registrator(__DOWNGRADE__)
__upgrade_mark = function_marker("mig_type", "upgrade")
__downgrade_mark = function_marker("mig_type", "downgrade")
def upgrade(func):
__upgrade_reg(func)
__upgrade_mark(func)
return func
def downgrade(func):
__downgrade_reg(func)
__downgrade_mark(func)
return func
def load_migrations():
""" Loads all migration modules.
Migration functions should be defined with @upgrade and @downgrade.
Only labelled functions will be executed with the specified migration
case!
Migration functions will be loaded into __UPGRADE__ or __DOWNGRADE__
depending on how they are marked. Because of the way migrations are
marked, stored, and loaded, this function doesn't return a list of
migration functions, but returns a list of migration modules.
"""
LOG = log.LoggingDriver.find_logger()
migrations = []
for migration in __all__:
LOG.info("Checking for migrations in {}.{}".format(
__package__, migration))
module = import_module("{}.{}".format(__package__, migration))
order_match = MIGRATION_ORDERING_RGX.match(migration)
if not order_match:
LOG.warning("No leading migration number found in '%s', skipping"
% (migration))
continue
mig_num = order_match.groups(
order_match.groupdict({"order": None})["order"])[0]
if not mig_num:
LOG.warning("No leading migration number found in '%s', skipping"
% (migration))
continue
migrations.append(module)
mig_num = int(mig_num)
for obj_n in dir(module):
obj = getattr(module, obj_n)
if hasattr(obj, "mig_type"):
# This function is a migration. Tag it!
obj.mig_order = mig_num
LOG.info("%s migration (%s) loaded from %s (order: %s)"
% (obj.mig_type, obj.__name__, module.__name__,
mig_num))
else:
continue
return migrations
def run_migrations(migrator, migrations, delete_meta=False):
LOG = log.LoggingDriver.find_logger()
ordered_migrations = sorted(
migrations,
key=lambda item: item.mig_order)
for mig in ordered_migrations:
try:
mig_meta = MigrateMeta.get(
MigrateMeta.upgrade_number == mig.mig_order)
except:
mig_meta = None
if delete_meta and mig_meta:
try:
mig_meta.delete()
except:
LOG.info("Could not delete migration meta: %s %s"
% (mig.__name__, mig.mig_order))
elif not delete_meta and not mig_meta:
mig_meta = MigrateMeta.create(
upgrade_number=mig.mig_order)
mig_meta.save()
elif not delete_meta and mig_meta:
LOG.info("Can't perform migration, meta for migration %s "
"already exists (migration has already been applied)"
% (mig.mig_order))
continue
elif delete_meta and not mig_meta:
LOG.info("Not going to perform downgrade migration, migration "
"meta does not exist: %s" % (mig.mig_order))
continue
try:
mig(migrator)
LOG.info("Finished migration: %s (num: %s)"
% (mig.__name__, mig.mig_order))
except Exception:
LOG.error("An error occured while running a migration (order: %s)"
% (mig.mig_order))
raise
def migrate_single(migrator, migrate_action, migrate_num):
""" Runs a single migration specified by migrate_num.
migrate_action should be a string, either "upgrade" or
"downgrade".
"""
LOG = log.LoggingDriver.find_logger()
mig_list = []
if migrate_action == "upgrade":
mig_list = __UPGRADE__
elif migrate_action == "downgrade":
mig_list = __DOWNGRADE__
else:
LOG.error("Invalid migration action! Only 'upgrade' and 'downgrade' "
"are allowed.")
return False
migrations = filter(
lambda item: item.mig_order == migrate_num,
mig_list)
if len(migrations) == 0:
LOG.error("No migrations found for migration number %s"
% (migrate_num))
return False
try:
mig_meta = MigrateMeta.get(
MigrateMeta.upgrade_number == migrate_num)
except:
mig_meta = None
if migrate_action == "downgrade" and mig_meta:
try:
mig_meta.delete()
except:
LOG.info("Could not delete migration meta: %s %s"
% (mig_meta.__name__, migrate_num))
elif migrate_action == "upgrade" and not mig_meta:
mig_meta = MigrateMeta.create(
upgrade_number=migrate_num)
mig_meta.save()
elif migrate_action == "upgrade" and mig_meta:
LOG.info("Can't perform migration, meta for migration %s "
"already exists (migration has already been applied)"
% (migrate_num))
return False
elif migrate_action == "downgrade" and not mig_meta:
LOG.info("Not going to perform downgrade migration, migration "
"meta does not exist: %s" % (migrate_num))
return False
for mig in migrations:
try:
mig(migrator)
LOG.info("Finished migration: %s (num: %s)"
% (mig.__name__, mig.mig_order))
except Exception:
LOG.error("An error occured while running a migration (order: %s)"
% (mig.mig_order))
raise
return True
migrate_upgrades = lambda migrator: run_migrations(
migrator, __UPGRADE__, delete_meta=False)
migrate_downgrades = lambda migrator: run_migrations(
migrator, __DOWNGRADE__, delete_meta=True)
|
py | 7df824522cddccb09c82b7c9b5b124aff9b6c0c2 | #!/usr/bin/python
##############################################################################
# Copyright Gimpel Software LLC 2017-2019. All rights reserved.
# Confidential and proprietary. No part of this file may be redistributed
# without express written permission of Gimpel Software LLC.
#
# This file is provided by Gimpel Software LLC (https://www.gimpel.com) for
# use exclusively with PC-lint Plus. Redistribution to and use by licensed
# users is permitted. Any such redistribution must preserve this notice and,
# if the redistributed file has been modified, provide notice that the file
# has been modified from the original.
##############################################################################
from __future__ import print_function
import regex
import yaml
import os
import sys
import subprocess
import argparse
import string
import stat
import tempfile
import ntpath
from datetime import datetime
__version__ = "1.3.0"
def emit_note(text):
sys.stderr.write("Note: " + text)
def emit_warning(text):
sys.stderr.write("Warning: " + text)
def emit_error(text):
sys.stderr.write("Error: " + text)
sys.exit(1)
def makeHeaderGuardName(fname):
prefix = "GS_PCLP_"
fname = ntpath.basename(fname)
fname = regex.sub("[.-]", "_", fname)
fname = regex.sub("[^[:alnum:]_]", "", fname)
return prefix + fname.upper()
def processConfig(config_file):
try:
config = yaml.load(open(config_file), Loader=yaml.Loader);
return config
except yaml.YAMLError as exc:
emit_error("unable to parse configuration file '" + config_file + "': " + str(exc) + "\n")
except IOError as exc:
if not os.path.isabs(config_file):
# If we didn't find a config file specified with
# a relative path in the working directory, try
# again in this script's own directory.
script_dir = os.path.dirname(os.path.abspath(__file__))
abs_config = os.path.join(script_dir, config_file)
try:
config = yaml.load(open(abs_config), Loader=yaml.Loader);
return config
except yaml.YAMLError as exc_abs:
emit_error("unable to parse configuration file '" + abs_config + "': " + str(exc_abs) + "\n")
except IOError as exc_abs:
emit_error("unable to open configuration file '" + config_file + "' in the working directory, '" +
os.getcwd() + "', nor in the script directory, '" + script_dir + "': " + str(exc) + ", " + str(exc_abs) + "\n")
emit_error("unable to open configuration file '" + config_file + "': " + str(exc) + "\n")
def runCommand(command, prog_input=None):
# Run a command and return the collected stdout, stderr, and return value
# Command should be list containing the executable and any arguments
# prog_input, if provided, is sent to the stdin stream of the program.
try:
if prog_input is None:
child_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_output, std_error = child_process.communicate()
exit_code = child_process.returncode
return std_output.decode('utf-8'), std_error.decode('utf-8'), exit_code
else:
child_process = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
std_output, std_error = child_process.communicate(input=prog_input.encode())
exit_code = child_process.returncode
return std_output.decode('utf-8'), std_error.decode('utf-8'), exit_code
except OSError as exc:
emit_error("unable to execute command '" + " ".join(command) + "': " + str(exc) + "\n")
def listSupportedCompilers(config):
# Dump the supported compilers and their descriptions to stdout.
print("{:20}{}".format("Compiler Family", "Description"))
print("{:20}{}".format("---------------", "-----------"))
if 'compilers' in config:
for compiler in sorted(config['compilers']):
if 'description' in config['compilers'][compiler]:
#print(compiler, "-", config['compilers'][compiler]['description'])
print("{:20}{}".format(compiler, config['compilers'][compiler]['description']))
def getCompilerVersion(config, compiler, exe):
# Run the compiler and extract the version information from it
version_instructions = config.get('compilers', {}).get(compiler, {}).get('version', {})
if version_instructions is None:
emit_warning("don't know how to extract compiler version for this compiler\n")
return None
version = None
if 'command' in version_instructions:
if exe is not None:
# We need to launch the compiler to extract the version number
command = version_instructions['command']
command.insert(0, exe)
std_out, std_err, ret_val = runCommand(command)
if 'match_expr' in version_instructions:
result = regex.search(version_instructions['match_expr'], std_out if version_instructions.get('channel', '') == 'stdout' else std_err, regex.MULTILINE)
if result is None:
emit_warning("unable to extract compiler version\n")
return None
return result.group('version')
else:
emit_warning("need to specify compiler location with --compiler-bin to extract version information\n")
return None
return None
def createTemporaryFileWithContents(contents):
t = tempfile.NamedTemporaryFile(mode='w', delete=False)
t.file.write(contents)
return t.name
def generateSizeOptions(config, args):
# Find the instructions for generating PCLP size options for compiler
# from the configuration database and execute the instructions returning
# the result.
compiler = args.compiler
base_options = []
if args.compiler_options:
base_options = args.compiler_options.split()
exe = args.compiler_bin
size_instructions = config.get('compilers', {}).get(compiler, {}).get('size_options')
if size_instructions is None:
emit_note("size options for this compiler cannot be determined automatically, please set size options manually in the generated .lnt file\n")
return None
# The presence of 'command' means we should try to extract the information by
# invoking the compiler.
if 'command' in size_instructions and exe is not None:
# We need to launch the compiler to extract the size options
# The options we pass to the compiler (if any) are stored in 'command'
# The input sent to the compiler's stdin stream, if any, is in 'input'
command = [exe]
if base_options:
command = command + base_options
if size_instructions['command']:
command = command + size_instructions['command']
tempfilename = None
if 'tempfile' in size_instructions:
tempfilename = createTemporaryFileWithContents(size_instructions['tempfile'])
command.append(tempfilename)
compiler_input = size_instructions.get('input')
std_out, std_err, ret_val = runCommand(command, compiler_input)
if tempfilename:
os.remove(tempfilename)
# The 'channel' determines where in the output we should look for the size options
result_channel = size_instructions.get('channel')
result_text = std_err if result_channel == 'stderr' else std_out
# 'match_expr' is the pattern to use to extract the size options from
# the output channel. If this doesn't exist, we'll just return it all.
match_expr = size_instructions.get('match_expr')
if match_expr is None:
return result_text
match_result = regex.search(match_expr, result_text, regex.MULTILINE | regex.DOTALL)
if match_result is not None:
if 'size_options' in match_result.groupdict():
# The size options should be in a named capture group called 'size_options'
matched_portion = match_result.group('size_options')
if matched_portion is not None:
return matched_portion
if {'size_name', 'size_value'} <= set(match_result.groupdict()):
matched_names = match_result.captures('size_name')
matched_values = match_result.captures('size_value')
if len(matched_names) == len(matched_values):
size_option_list = zip(matched_names, matched_values)
size_options = []
for size_name, size_value in size_option_list:
size_options.append('-' + size_name + size_value)
return " ".join(size_options)
if 'fallback_values' in size_instructions:
return size_instructions.get('fallback_values')
if 'command' in size_instructions and exe is None:
emit_warning("size options could not be extracted because the compiler binary was not provided, please set size options manually in the generated .lnt file or rerun with the --compiler-bin option\n")
return None
def generateIncludeOptions(config, args):
# Find the built-in compiler include paths, typically used to find standard
# and system headers. These can reside in 'cpp_include_paths', 'c_include_paths',
# and 'include_paths'. Well process all we find, dedupe them, and put them in
# the order listed above.
compiler = args.compiler
base_options = args.compiler_options.split()
exe = args.compiler_bin
include_paths = config.get('compilers', {}).get(compiler, {}).get('include_paths')
c_include_paths = config.get('compilers', {}).get(compiler, {}).get('c_include_paths')
cpp_include_paths = config.get('compilers', {}).get(compiler, {}).get('cpp_include_paths')
all_include_paths = []
if cpp_include_paths is not None:
all_include_paths.append(cpp_include_paths)
if c_include_paths is not None:
all_include_paths.append(c_include_paths)
if include_paths is not None:
all_include_paths.append(include_paths)
if len(all_include_paths) == 0:
return None
found_paths = []
for include_path in all_include_paths:
result_text = ''
if 'command' in include_path:
if not exe:
emit_warning("unable to extract include path information from compiler, use --compiler-bin to specify compiler locations\n")
return None
# We'll be invoking the compiler to paths
command = [exe]
if base_options:
command.extend(base_options)
command.extend(include_path['command'])
compiler_input = include_path.get('input')
std_out, std_err, ret_val = runCommand(command, compiler_input)
# The 'channel' determines where in the output we should look for the include paths
result_channel = include_path.get('channel')
result_text = std_err if result_channel == 'stderr' else std_out
elif 'env_var' in include_path:
result_text = os.environ.get(include_path['env_var'], '')
# 'match_expr' is the pattern to use to extract the include paths from
# the output channel. If this doesn't exist, we'll just continue.
match_expr = include_path.get('match_expr')
if match_expr is None:
continue
match_result = regex.search(match_expr, result_text, regex.MULTILINE | regex.DOTALL)
if match_result is not None:
# The paths should be in one or more named capture groups called 'include_dir'
matched_portions = match_result.captures('include_dir')
if matched_portions is not None:
for matched_portion in matched_portions:
matched_portion = matched_portion.strip()
matched_portion = regex.sub(r'\s+', " ", matched_portion)
if matched_portion not in found_paths:
found_paths.append(matched_portion)
return found_paths
def defaultIgnoredMacroNames():
ignored_names = ( '_Pragma', '__BASE_FILE__', '__COUNTER__', '__DATE__',
'__FILE__', '__INCLUDE_LEVEL__', '__LINE__', '__TIMESTAMP__',
'__TIME__', '__VA_ARGS__', '__cplusplus', '__has_attribute', '__has_builtin',
'__has_extension', '__has_feature', '__has_include',
'__has_include_next', '__has_warning', '__is_identifier', 'and',
'and_eq', 'bitand', 'bitor', 'compl', 'define', 'defined', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq')
return ignored_names
def extractPotentialMacros(filename):
"Extract and return a uniq list of potential macro names from file."
ignored_names = defaultIgnoredMacroNames()
names = set()
try:
with open(filename, "rb") as f:
s = ""
for c in f.read():
if (c in string.letters + '_') or (s and c in string.digits):
s += c
continue
elif s:
if s not in ignored_names:
names.add(s)
s = ""
except IOError as exc:
emit_error("unable to scavenge macros from file '" + filename + "': " + str(exc) + "\n")
return names
def createScavengeData(names):
# Produce scavenger data for preprocessing
scavenge_data = ''
for name in names:
scavenge_data += ("#ifdef %s\n-d%s{%s}\n#endif\n" % (name, name, name))
return scavenge_data
def extractScavengeResults(output):
# Remove anything except -d options from preprocessed scavenger output.
good_lines = []
good_pattern = regex.compile("^\s*-d")
for line in output.split('\n'):
if good_pattern.match(line):
# Convert to #define
line = regex.sub("^\s*-d(.*?)\{(.*)\}$", "#define \\1 \\2", line)
good_lines.append(line)
return good_lines
def generateMacroDefinitions(instructions, args, base_options):
# There are several ways macro definitions may be generated:
# 'command' - via compiler invocation and pattern matching
# 'definitions' - explicitly specified
# 'scavenge' - using the macro scavenger method
#
# For 'command', the macro definitions are expected to appear in the
# output of the invocation, either in 'stdout' or 'stderr' as indicated
# by 'channel'. 'match_expr' will be used to match the definitions, if
# present, otherwise the output will be used as-is.
#
# For 'definitions', a list of lists is expected where the first item
# in each list is the definition name, including parameter list for
# function-like macros, and the second item is the definition, or null
# for no definition. E.g.:
#
# [['A', 1], ['B', ''], ['C', null], ['f(a,b)', '(a + b)']]
#
# results in the definitions:
#
# #define A 1
# #define B
# #define C
# #define f(a,b) (a + b)
#
# For 'scavenge', 'scavenge_files' and/or 'scavenge_dirs', provided
# on the command line, are used to search for possible macros.
# A scavenger file is built and passed to the compiler using the
# preprocessor to expand defined macros and the output is exhumed
# to dig out and build a macro list.
#
# In all cases, a list of macro definitions, one per line, is returned.
exe = args.compiler_bin
scavenging = args.scavenge_files or args.scavenge_dirs
if scavenging and 'scavenge_command' in instructions:
scavenge_files = set()
if args.scavenge_files:
scavenge_files.update(args.scavenge_files)
if args.scavenge_dirs:
for scavenge_dir in args.scavenge_dirs:
for folder, subs, files in os.walk(scavenge_dir):
for filename in files:
if args.scavenge_pattern:
if not regex.match(args.scavenge_pattern, filename):
continue
full_path = os.path.join(folder, filename)
stat_info = os.stat(full_path)
if stat.S_ISREG(stat_info.st_mode):
scavenge_files.add(full_path)
potential_macros = set()
for scavenge_file in scavenge_files:
potential_macros.update(extractPotentialMacros(scavenge_file))
scavenge_data = createScavengeData(potential_macros)
command = [exe] + instructions['scavenge_command']
compiler_input = scavenge_data
std_out, std_err, ret_val = runCommand(command, compiler_input)
result_channel = instructions.get('channel')
result_text = std_err if result_channel == 'stderr' else std_out
return "\n".join(extractScavengeResults(result_text)) + "\n"
if 'command' in instructions:
if exe is None:
emit_warning("unable to extract macro definitions from compiler, use --compiler-bin to specify compiler locations\n")
else:
command = [exe]
if base_options:
command.extend(base_options)
command.extend(instructions['command'])
tempfilename = None
if 'tempfile' in instructions:
tempfilename = createTemporaryFileWithContents(instructions['tempfile'])
command.append(tempfilename)
compiler_input = instructions.get('input')
std_out, std_err, ret_val = runCommand(command, compiler_input)
if tempfilename:
os.remove(tempfilename)
result_channel = instructions.get('channel')
result_text = std_err if result_channel == 'stderr' else std_out
# 'match_expr' is the pattern to use to extract the macros from
# the output channel.
match_expr = instructions.get('match_expr')
if match_expr is None:
return result_text
match_result = regex.search(match_expr, result_text, regex.MULTILINE|regex.DOTALL)
if match_result:
ignored_names = defaultIgnoredMacroNames()
macros = match_result.group('macros')
macros_to_keep = []
for md in macros.splitlines():
m = regex.match(r'\s*#define\s+(\w+)', md)
if not m or m.group(1) in ignored_names:
continue
macros_to_keep.append(md)
return "\n".join(macros_to_keep) + "\n"
if 'definitions' in instructions:
macro_defs_str = ''
for definition in instructions['definitions']:
macro_name, macro_def = definition
macro_defs_str += '#define ' + macro_name + ' ' + macro_def + "\n"
return macro_defs_str
return None
def generateDecls(config, args):
# Find the built-in compiler decls. We can have 'c_decls', 'cpp_decls',
# and 'decls'.
compiler = args.compiler
base_options = args.compiler_options.split()
exe = args.compiler_bin
decls = config.get('compilers', {}).get(compiler, {}).get('decls', {}).get('definitions')
c_decls = config.get('compilers', {}).get(compiler, {}).get('c_decls', {}).get('definitions')
cpp_decls = config.get('compilers', {}).get(compiler, {}).get('cpp_decls', {}).get('definitions')
all_decls = ''
if decls:
all_decls += "\n".join(decls)
if c_decls:
all_decls += '#ifndef __cplusplus\n' + "\n".join(c_decls) + '\n#endif\n'
if cpp_decls:
all_decls += '#ifdef __cplusplus\n' + "\n".join(cpp_decls) + '\n#endif\n'
return all_decls
def generateMacros(config, args):
# Find the built-in compiler macros. We can have 'c_macros', 'cpp_macros',
# and 'macros'.
compiler = args.compiler
base_options = args.compiler_options.split()
exe = args.compiler_bin
macros = config.get('compilers', {}).get(compiler, {}).get('macros')
c_macros = config.get('compilers', {}).get(compiler, {}).get('c_macros')
cpp_macros = config.get('compilers', {}).get(compiler, {}).get('cpp_macros')
all_macro_definitions = ''
if macros:
generic_macro_defs = generateMacroDefinitions(macros, args, args.compiler_options.split())
if generic_macro_defs:
all_macro_definitions += generic_macro_defs
if c_macros:
c_macro_defs = generateMacroDefinitions(c_macros, args, args.compiler_options.split() + args.compiler_c_options.split())
if c_macro_defs:
all_macro_definitions += '#ifndef __cplusplus\n' + c_macro_defs + '#endif\n'
if cpp_macros:
cpp_macro_defs = generateMacroDefinitions(cpp_macros, args, args.compiler_options.split() + args.compiler_cpp_options.split())
if cpp_macro_defs:
all_macro_definitions += '#ifdef __cplusplus\n' + cpp_macro_defs + '#endif\n'
return all_macro_definitions
def generateBaseOptions(config, args):
compiler = args.compiler
base_config = config.get('compilers', {}).get(compiler, {}).get('base_config', {})
base_options = ""
for key in sorted(base_config.keys()):
base_options += "// " + key.title() + "\n"
for option, annotation in base_config[key]:
base_options += option
if annotation:
base_options += " // " + annotation
base_options += "\n"
base_options += "\n"
return base_options
def generateCompilerConfig(config, args):
if not 'compilers' in config:
emit_error("compiler database doesn't contain any compiler data\n")
compiler = args.compiler
compile_commands = args.compiler_options.split()
exe = args.compiler_bin
if compiler is None:
emit_error("no --compiler specified\n")
if not compiler in config['compilers']:
emit_error("'" + compiler + "' is not recognized for automatic configuration; use the option '--list-compilers' to list compilers in the database (refer to the 'System Configuration' section in chapter 2 of the manual to configure an unknown compiler manually)\n")
compiler_entry = config['compilers'][compiler]
compiler_version = getCompilerVersion(config, compiler, exe)
intro_string = "/* Compiler configuration for %s %s.\n Using the options: %s \n Generated on %s with pclp_config version %s.\n */\n\n" % (compiler, compiler_version, " ".join(compile_commands), datetime.now().strftime('%Y-%m-%d %H:%M:%S'), __version__)
# Base configuration
base_string = ""
base_options = generateBaseOptions(config, args)
if base_options is None:
base_string = "// Unable to generate base compiler options.\n\n"
else:
base_string = "// Base Options\n" + base_options + "\n\n"
# Size Options
size_string = ""
size_options = generateSizeOptions(config, args)
if size_options is None:
emit_warning("unable to determine size options, these will need to be set manually in the generated .lnt file\n")
size_string = "// Unable to determine size options. \n\n"
else:
size_string = "// Size Options\n" + size_options + "\n\n"
# Built-in include directories
includes_string = ""
include_directories = generateIncludeOptions(config, args)
if include_directories is None:
emit_warning("unable to determine built-in include directories, these will need to be set manually in the generated .lnt file\n")
includes_string = "// Failed to extract include paths. \n"
else:
includes_string = "// Include Options\n"
for include_dir in include_directories:
includes_string += "--i\"" + include_dir.strip() + "\"\n"
includes_string += "\n"
# Built-in macros
builtin_macros = generateMacros(config, args)
builtin_decls = generateDecls(config, args)
# Custom compile commands
custom_options_string = "// Transformed compiler options\n"
while compile_commands:
transformations, options_consumed = handleCompilerOption(config, compiler, compile_commands)
if transformations:
custom_options_string += " ".join(transformations) + " // From compiler option(s): " + " ".join(compile_commands[:options_consumed]) + "\n"
compile_commands = compile_commands[options_consumed:]
custom_options_string += "\n"
if args.config_output_lnt_file:
with open(args.config_output_lnt_file, 'w') as f:
f.write(intro_string)
f.write(base_string)
f.write(custom_options_string)
f.write(size_string)
f.write(includes_string)
if args.config_output_header_file:
header_guard_macro = makeHeaderGuardName(args.config_output_header_file)
with open(args.config_output_header_file, 'w') as h:
h.write('#ifndef ' + header_guard_macro + "\n")
h.write('#define ' + header_guard_macro + "\n")
h.write(builtin_macros)
h.write(builtin_decls)
h.write('\n#endif /* ' + header_guard_macro + " */\n")
header_path = args.config_output_header_file
if args.header_option_use_enclosing_directory:
header_path = "%ENCLOSING_DIRECTORY%/" + header_path
header_string = '+libh(' + header_path + ')\n' + '-header(' + header_path + ')\n'
f.write(header_string)
else:
emit_warning("no --config-output-header-file specified\n")
else:
emit_warning("no --config-output-lnt-file specified\n")
def generateProjectConfig(config, args):
compiler = args.compiler
source_pattern = args.source_pattern
imposter_file = args.imposter_file
if not imposter_file:
emit_error("An imposter input file must be specified when using --generate-project-config, " +
"use --imposter-file to specify the imposter file\n")
imposter_contents = ''
with open(imposter_file) as f:
imposter_contents = '[' + ",".join(f.readlines()) + ']'
compile_commands = yaml.load(imposter_contents, Loader=yaml.Loader)
output_file = open(args.config_output_lnt_file, 'w')
for compile_command in compile_commands:
source_files = []
options_str = ''
while compile_command:
sf_match = regex.match(source_pattern, compile_command[0])
if sf_match:
source_files.append(compile_command[0])
compile_command.pop(0)
continue
transformations, options_consumed = handleCompilerOption(config, compiler, compile_command)
if transformations:
options_str += "\n".join(transformations) + "\n"
compile_command = compile_command[options_consumed:]
if source_files:
output_file.write("-env_push\n")
output_file.write(options_str)
for source_file in source_files:
output_file.write('"' + source_file + '"\n')
output_file.write("-env_pop\n\n")
output_file.close()
def handleCompilerOption(config, compiler, option_list):
# Given a configuration and a compiler, attempt to decode the provided
# compiler option, mapping it to the corresponding PC-lint option if
# an appropriate transformation exists. Returns the transformation
# (or None) and the number of options consumed.
option_map = config.get('compilers', {}).get(compiler, {}).get('options')
if option_map is None or not option_list:
return (None, 1)
option_str = option_list[0]
# Find the longest matching option
best_match = None
best_match_size = 0
for candidate_option in option_map:
if option_str.startswith(candidate_option):
if len(candidate_option) > best_match_size:
best_match = candidate_option
best_match_size = len(candidate_option)
if best_match is None:
# We didn't recognize the option
return (None, 1)
found_option_map = option_map[best_match]
if found_option_map is None:
return (None, 1)
if 'transform' in found_option_map:
return ([found_option_map['transform']], 1)
if 'transforms' in found_option_map:
options_consumed = 1
while True:
replacements = []
for transform_pair in found_option_map['transforms']:
match_pattern, repl_pattern = transform_pair
replaced_option_str = option_str
if 'pre_transforms_replacements' in found_option_map:
for replacement_pair in found_option_map['pre_transforms_replacements']:
replaced_option_str = replaced_option_str.replace(replacement_pair[0], replacement_pair[1])
match_result = regex.match(match_pattern, replaced_option_str)
if match_result:
replacement = regex.sub(match_pattern, repl_pattern, replaced_option_str)
replacements.append(replacement)
if replacements:
return (replacements, options_consumed)
# Didn't match any of the transformation patterns. Add the next
# option to the option string to see if we can match with an arg.
if len(option_list) > options_consumed:
option_str += " " + option_list[options_consumed]
options_consumed += 1
else:
return (None, 1)
# Found an option but no transformations exist
return (None, 1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--list-compilers',
help='list the supported compilers',
default=False,
action='store_true')
parser.add_argument('--generate-compiler-config',
help='generate a customized compiler configuration',
default=False,
action='store_true')
parser.add_argument('--generate-project-config',
help='generate a customized project configuration',
default=False,
action='store_true')
parser.add_argument('--compiler',
help='the compiler that will be used to generate configurations',
type=str)
parser.add_argument('--compiler-bin',
help='the location of the compiler executable',
type=str)
parser.add_argument('--compiler-options',
help='base compiler options',
default='')
parser.add_argument('--compiler-c-options',
help='base C-specific compiler options',
default='')
parser.add_argument('--compiler-cpp-options',
help='base C++-specific compiler options',
default='')
parser.add_argument('--ignore-options',
help='compiler options that should be ignored (not transformed)',
nargs='+')
parser.add_argument('--compiler-database',
help='the name of the compiler database file',
default='compilers.yaml')
parser.add_argument('--repl',
help="enter compiler options and see the transformations that would be made",
default=False,
action='store_true')
parser.add_argument('--compiler-version',
help="show the version of the compiler being configured",
default=False,
action='store_true')
parser.add_argument('--source-pattern',
help="the pattern used to match project source files in compiler invocations",
default='.*\.(c|cpp)$',
type=str)
parser.add_argument('--imposter-file',
help="the file containing compiler invocations logged by the imposter program",
type=str)
parser.add_argument('--config-output-lnt-file',
help="the file to write the configuration to",
type=str)
parser.add_argument('--config-output-header-file',
help="the file to write supplemental configuration data to (macro definitions, etc)",
type=str)
parser.add_argument('--scavenge-files',
help="the list of files to attempt to extract macro information from",
action='append')
parser.add_argument('--scavenge-dirs',
help="the list of directories to recursively process files from to extract macro information from",
action='append')
parser.add_argument('--scavenge-pattern',
help="the regular expression pattern used to match filenames, excluding path, for macro extraction",
type=str)
parser.add_argument('--header-option-use-enclosing-directory',
help="use the built-in %%ENCLOSING_DIRECTORY%% environment variable to provide an 'absolute' path for the compiler configuration -header option",
default=False,
action='store_true')
args = parser.parse_args()
handled_task = False
if args.compiler_version:
config = processConfig(args.compiler_database)
print(getCompilerVersion(config, args.compiler, args.compiler_bin))
handled_task = True
if args.list_compilers:
config = processConfig(args.compiler_database)
listSupportedCompilers(config)
handled_task = True
if args.generate_compiler_config:
config = processConfig(args.compiler_database)
generateCompilerConfig(config, args)
handled_task = True
if args.generate_project_config:
config = processConfig(args.compiler_database)
generateProjectConfig(config, args)
handled_task = True
if args.repl:
config = processConfig(args.compiler_database)
handled_task = True
while True:
line = sys.stdin.readline()
if not line:
break
print(handleCompilerOption(config, args.compiler, line.strip().split()))
if not handled_task:
if args.imposter_file:
emit_warning("No work done as no task was requested, did you forget the --generate-project-config option?\n")
elif args.config_output_lnt_file or args.config_output_header_file:
emit_warning("No work done as no task was requested, did you forget the --generate-compiler-config option?\n")
else:
emit_warning("No work done as no task was requested, use --help for usage.\n")
if __name__ == '__main__':
try:
main()
except Exception as e:
emit_error(str(e) + "\n")
|
py | 7df82500d93407fdfb0b0b0ecb7b055d06b4cb13 | from django.db import models
class Emotion(models.Model):
"""An emotion that the user is capable of feeling at any time.
This is application-wide and not user-specific."""
name = models.CharField(max_length=50)
description = models.CharField(max_length=500, blank=True)
class Meta:
ordering = ["name"]
db_table = "emotion"
def __str__(self) -> str:
return self.name
|
py | 7df8267616b4e49b78bf72aecefc1aee44c93d9a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
import asyncio
import time
from datetime import datetime
from asyncio import gather
from inspect import isfunction
from typing import Optional, Union, Callable
from tqsdk import utils
from tqsdk.api import TqApi
from tqsdk.backtest import TqBacktest
from tqsdk.channel import TqChan
from tqsdk.datetime import _is_in_trading_time, _timestamp_nano_to_str
from tqsdk.diff import _get_obj
from tqsdk.lib.utils import _check_volume_limit, _check_direction, _check_offset, _check_volume, _check_price, \
_check_offset_priority
from tqsdk.tradeable import TqAccount, TqKq, TqSim
class TargetPosTaskSingleton(type):
"""
TargetPosTask 需要保证在每个账户下每个合约只有一个 TargetPosTask 实例。
当用户多次调用时,应该保证对于同一账户同一合约使用相同的参数构造,否则抛错。
在修改 TargetPosTask 构造参数时,同时应该修改 TargetPosTaskSingleton.__call__ 方法的参数,要确保其个数、名称、默认值和文档描述一致,\
这些参数才是实际传给 TargetPosTask.__init__ 方法的参数。
同时应该在 TargetPosTask 实例运行结束时释放相应的资源,_instances 需要情况对应的引用。
"""
# key 为 id(account) + '#' + symbol, 值为 TargetPosTask 实例。
_instances = {}
def __call__(cls, api, symbol, price="ACTIVE", offset_priority="今昨,开", min_volume=None, max_volume=None,
trade_chan=None, trade_objs_chan=None, account: Optional[Union[TqAccount, TqKq, TqSim]]=None, *args, **kwargs):
target_account = api._account._check_valid(account)
if target_account is None:
raise Exception(f"多账户模式下, 需要指定账户实例 account")
key = api._account._get_account_key(target_account) + "#" + symbol
if key not in TargetPosTaskSingleton._instances:
TargetPosTaskSingleton._instances[key] = super(TargetPosTaskSingleton, cls).__call__(api, symbol, price,
offset_priority,
min_volume, max_volume,
trade_chan,
trade_objs_chan,
target_account,
*args, **kwargs)
else:
instance = TargetPosTaskSingleton._instances[key]
if instance._offset_priority != offset_priority:
raise Exception("您试图用不同的 offset_priority 参数创建两个 %s 调仓任务, offset_priority参数原为 %s, 现为 %s" % (
symbol, instance._offset_priority, offset_priority))
if instance._price != price:
raise Exception("您试图用不同的 price 参数创建两个 %s 调仓任务, price参数原为 %s, 现为 %s" % (symbol, instance._price, price))
if instance._min_volume != min_volume:
raise Exception(f"您试图用不同的 min_volume 参数创建两个 {symbol} 调仓任务, min_volume 参数原为 {instance._min_volume}, 现为 {min_volume}")
if instance._max_volume != max_volume:
raise Exception(f"您试图用不同的 max_volume 参数创建两个 {symbol} 调仓任务, max_volume 参数原为 {instance._max_volume}, 现为 {max_volume}")
return TargetPosTaskSingleton._instances[key]
class TargetPosTask(object, metaclass=TargetPosTaskSingleton):
"""目标持仓 task, 该 task 可以将指定合约调整到目标头寸"""
def __init__(self, api: TqApi, symbol: str, price: Union[str, Callable[[str], Union[float, int]]] = "ACTIVE",
offset_priority: str = "今昨,开", min_volume: Optional[int] = None, max_volume: Optional[int] = None,
trade_chan: Optional[TqChan] = None, trade_objs_chan: Optional[TqChan] = None,
account: Optional[Union[TqAccount, TqKq, TqSim]] = None) -> None:
"""
创建目标持仓task实例,负责调整归属于该task的持仓 **(默认为整个账户的该合约净持仓)**.
**注意:**
1. TargetPosTask 在 set_target_volume 时并不下单或撤单, 它的下单和撤单动作, 是在之后的每次 wait_update 时执行的. 因此, **需保证 set_target_volume 后还会继续调用wait_update()** 。
2. 请勿在使用 TargetPosTask 的同时使用 insert_order() 函数, 否则将导致 TargetPosTask 报错或错误下单。
3. TargetPosTask 如果同时设置 min_volume(每笔最小下单手数),max_volume(每笔最大下单的手数)两个参数,表示采用 **大单拆分模式** 下单。
在 **大单拆分模式** 下,每次下单的手数为随机生成的正整数,值介于 min_volume、max_volume 之间。
具体说明:调用 set_target_volume 后,首先会根据目标持仓手数、开平仓顺序计算出,需要平今、平昨、开仓的目标下单手数及顺序。
+ 如果在调整持仓的目标下单手数小于 max_volume,则直接以目标下单手数下单。
+ 如果在调整持仓的目标下单手数大于等于 max_volume,则会以 min_volume、max_volume 之间的随机手数下一笔委托单,手数全部成交后,会接着处理剩余的手数;\
继续以随机手数下一笔委托单,全部成交后,继续处理剩余的手数,直至剩余手数小于 max_volume 时,直接以剩余手数下单。
当使用大单拆分模式下单时,必须同时填写 min_volume、max_volume,且需要满足 max_volume >= min_volume > 0。
Args:
api (TqApi): TqApi实例,该task依托于指定api下单/撤单
symbol (str): 负责调整的合约代码
price (str / Callable): [可选]下单方式, 默认为 "ACTIVE"。
* "ACTIVE":对价下单,在持仓调整过程中,若下单方向为买,对价为卖一价;若下单方向为卖,对价为买一价。
* "PASSIVE":排队价下单,在持仓调整过程中,若下单方向为买,对价为买一价;若下单方向为卖,对价为卖一价。
* Callable[[str], Union[float, int]]: 函数参数为下单方向,函数返回值是下单价格。如果返回 nan,程序会抛错。
offset_priority (str): [可选]开平仓顺序,昨=平昨仓,今=平今仓,开=开仓,逗号=等待之前操作完成
对于下单指令区分平今/昨的交易所(如上期所),按照今/昨仓的数量计算是否能平今/昨仓
对于下单指令不区分平今/昨的交易所(如中金所),按照“先平当日新开仓,再平历史仓”的规则计算是否能平今/昨仓,如果这些交易所设置为"昨开"在有当日新开仓和历史仓仓的情况下,会自动跳过平昨仓进入到下一步
* "今昨,开" 表示先平今仓,再平昨仓,等待平仓完成后开仓,对于没有单向大边的品种避免了开仓保证金不足
* "今昨开" 表示先平今仓,再平昨仓,并开仓,所有指令同时发出,适合有单向大边的品种
* "昨开" 表示先平昨仓,再开仓,禁止平今仓,适合股指这样平今手续费较高的品种
* "开" 表示只开仓,不平仓,适合需要进行锁仓操作的品种
min_volume (int): [可选] **大单拆分模式下** 每笔最小下单的手数,默认不启用 **大单拆分模式**
max_volume (int): [可选] **大单拆分模式下** 每笔最大下单的手数,默认不启用 **大单拆分模式**
trade_chan (TqChan): [可选]成交通知channel, 当有成交发生时会将成交手数(多头为正数,空头为负数)发到该channel上
trade_objs_chan (TqChan): [可选]成交对象通知channel, 当有成交发生时会将成交对象发送到该channel上
account (TqAccount/TqKq/TqSim): [可选]指定发送下单指令的账户实例, 多账户模式下,该参数必须指定
**注意**
当 price 参数为函数类型时,该函数应该返回一个有效的价格值,应该避免返回 nan。以下为 price 参数是函数类型时的示例。
Example1::
# ... 用户代码 ...
quote = api.get_quote("SHFE.cu2012")
def get_price(direction):
# 在 BUY 时使用买一价加一档价格,SELL 时使用卖一价减一档价格
if direction == "BUY":
price = quote.bid_price1 + quote.price_tick
else:
price = quote.ask_price1 - quote.price_tick
# 如果 price 价格是 nan,使用最新价报单
if price != price:
price = quote.last_price
return price
target_pos = TargetPosTask(api, "SHFE.cu2012", price=get_price)
# ... 用户代码 ...
Example2::
# ... 用户代码 ...
quote_list = api.get_quote_list(["SHFE.cu2012","SHFE.au2012"])
def get_price_by_quote(quote):
def get_price(direction):
# 在 BUY 时使用买一价加一档价格,SELL 时使用卖一价减一档价格
if direction == "BUY":
price = quote["upper_limit"]
else:
price = quote.lower_limit
# 如果 price 价格是 nan,使用最新价报单
if price != price:
price = quote.last_price
return price
return get_price
for quote in quote_list:
target_pos_active_dict[quote.instrument_id] = TargetPosTask(api, quote.instrument_id, price=get_price_by_quote(quote))
# ... 用户代码 ...
Example3::
# 大单拆分模式用法示例
from tqsdk import TqApi, TqAuth, TargetPosTask
api = TqApi(auth=TqAuth("信易账户", "账户密码"))
position = api.get_position('SHFE.rb2106')
# 同时设置 min_volume、max_volume 两个参数,表示使用大单拆分模式
t = TargetPosTask(api, 'SHFE.rb2106', min_volume=2, max_volume=10)
t.set_target_volume(50)
while True:
api.wait_update()
if position.pos_long == 50:
break
api.close()
# 说明:
# 以上代码使用 TqSim 交易,开始时用户没有 SHFE.cu2012 合约的任何持仓,那么在 t.set_target_volume(50) 之后应该开多仓 50 手
# 根据用户参数,下单使用大单拆分模式,每次下单手数在 2~10 之间,打印出的成交通知可能是这样的:
# 2021-03-15 11:29:48 - INFO - 模拟交易成交记录
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.516138, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 7, 价格: 4687.000,手续费: 32.94
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.519699, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 8, 价格: 4687.000,手续费: 37.64
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.522848, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 10, 价格: 4687.000,手续费: 47.05
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.525617, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 8, 价格: 4687.000,手续费: 37.64
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.528151, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 7, 价格: 4687.000,手续费: 32.94
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.530930, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 7, 价格: 4687.000,手续费: 32.94
# 2021-03-15 11:29:48 - INFO - 时间: 2021-03-15 11:29:47.533515, 合约: SHFE.rb2106, 开平: OPEN, 方向: BUY, 手数: 3, 价格: 4687.000,手续费: 14.12
"""
if symbol.startswith("CZCE.CJ"):
raise Exception("红枣期货不支持创建 targetpostask、twap、vwap 任务,交易所规定该品种最小开仓手数为大于等于 4 手,这些函数还未支持该规则!")
if symbol.startswith("CZCE.ZC"):
raise Exception("动力煤期货不支持创建 targetpostask、twap、vwap 任务,交易所规定该品种最小开仓手数为大于等于 4 手,这些函数还未支持该规则!")
if symbol.startswith("CZCE.WH"):
raise Exception("强麦期货不支持创建 targetpostask、twap、vwap 任务,交易所规定该品种最小开仓手数为大于等于 10 手,这些函数还未支持该规则!")
super(TargetPosTask, self).__init__()
self._api = api
self._account = api._account._check_valid(account)
self._symbol = symbol
self._exchange = symbol.split(".")[0]
self._offset_priority = _check_offset_priority(offset_priority)
self._min_volume, self._max_volume = _check_volume_limit(min_volume, max_volume)
self._price = _check_price(price)
self._pos = self._account.get_position(self._symbol)
self._pos_chan = TqChan(self._api, last_only=True)
self._trade_chan = trade_chan
self._trade_objs_chan = trade_objs_chan
self._task = self._api.create_task(self._target_pos_task())
self._time_update_task = self._api.create_task(self._update_time_from_md()) # 监听行情更新并记录当时本地时间的task
self._local_time_record = time.time() - 0.005 # 更新最新行情时间时的本地时间
self._local_time_record_update_chan = TqChan(self._api, last_only=True) # 监听 self._local_time_record 更新
def set_target_volume(self, volume: int) -> None:
"""
设置目标持仓手数
Args:
volume (int): 目标持仓手数,正数表示多头,负数表示空头,0表示空仓
Example1::
# 设置 rb1810 持仓为多头5手
from tqsdk import TqApi, TqAuth, TargetPosTask
api = TqApi(auth=TqAuth("信易账户", "账户密码"))
target_pos = TargetPosTask(api, "SHFE.rb1810")
target_pos.set_target_volume(5)
while True:
# 需在 set_target_volume 后调用wait_update()以发出指令
api.wait_update()
Example2::
# 多账户模式下使用 TargetPosTask
from tqsdk import TqApi, TqMultiAccount, TqAuth, TargetPosTask
account1 = TqAccount("H海通期货", "123456", "123456")
account2 = TqAccount("H宏源期货", "654321", "123456")
api = TqApi(TqMultiAccount([account1, account2]), auth=TqAuth("信易账户", "账户密码"))
symbol1 = "DCE.m2105"
symbol2 = "DCE.i2101"
# 多账户模式下, 调仓工具需要指定账户实例
target_pos1 = TargetPosTask(api, symbol1, account=account1)
target_pos2 = TargetPosTask(api, symbol2, account=account2)
target_pos1.set_target_volume(30)
target_pos2.set_target_volume(80)
while True:
api.wait_update()
api.close()
"""
if self._task.done():
raise Exception("已经结束的 TargetPosTask 实例不可以再设置手数。")
self._pos_chan.send_nowait(int(volume))
def _get_order(self, offset, vol, pending_frozen):
"""
根据指定的offset和预期下单手数vol, 返回符合要求的委托单最大报单手数
:param offset: "昨" / "今" / "开"
:param vol: int, <0表示SELL, >0表示BUY
:return: order_offset: "CLOSE"/"CLOSETODAY"/"OPEN"; order_dir: "BUY"/"SELL"; "order_volume": >=0, 报单手数
"""
if vol > 0: # 买单(增加净持仓)
order_dir = "BUY"
pos_all = self._pos.pos_short
else: # 卖单
order_dir = "SELL"
pos_all = self._pos.pos_long
if offset == "昨":
order_offset = "CLOSE"
if self._exchange == "SHFE" or self._exchange == "INE":
if vol > 0:
pos_all = self._pos.pos_short_his
else:
pos_all = self._pos.pos_long_his
frozen_volume = sum([order.volume_left for order in self._pos.orders.values() if
not order.is_dead and order.offset == order_offset and order.direction == order_dir])
else:
frozen_volume = pending_frozen + sum([order.volume_left for order in self._pos.orders.values() if
not order.is_dead and order.offset != "OPEN" and order.direction == order_dir])
# 判断是否有未冻结的今仓手数: 若有则不平昨仓
if (self._pos.pos_short_today if vol > 0 else self._pos.pos_long_today) - frozen_volume > 0:
pos_all = frozen_volume
order_volume = min(abs(vol), max(0, pos_all - frozen_volume))
elif offset == "今":
if self._exchange == "SHFE" or self._exchange == "INE":
order_offset = "CLOSETODAY"
if vol > 0:
pos_all = self._pos.pos_short_today
else:
pos_all = self._pos.pos_long_today
frozen_volume = sum([order.volume_left for order in self._pos.orders.values() if
not order.is_dead and order.offset == order_offset and order.direction == order_dir])
else:
order_offset = "CLOSE"
frozen_volume = pending_frozen + sum([order.volume_left for order in self._pos.orders.values() if
not order.is_dead and order.offset != "OPEN" and order.direction == order_dir])
pos_all = self._pos.pos_short_today if vol > 0 else self._pos.pos_long_today
order_volume = min(abs(vol), max(0, pos_all - frozen_volume))
elif offset == "开":
order_offset = "OPEN"
order_volume = abs(vol)
else:
order_offset = ""
order_volume = 0
return order_offset, order_dir, order_volume
async def _update_time_from_md(self):
"""监听行情更新并记录当时本地时间的task"""
try:
chan = TqChan(self._api, last_only=True)
self._quote = await self._api.get_quote(self._symbol)
self._api.register_update_notify(self._quote, chan) # quote有更新时: 更新记录的时间
if isinstance(self._api._backtest, TqBacktest):
# 回测情况下,在收到回测时间有更新的时候,也需要更新记录的时间
self._api.register_update_notify(_get_obj(self._api._data, ["_tqsdk_backtest"]), chan)
async for _ in chan:
self._local_time_record = time.time() - 0.005 # 更新最新行情时间时的本地时间
self._local_time_record_update_chan.send_nowait(True) # 通知记录的时间有更新
finally:
await chan.close()
async def _target_pos_task(self):
"""负责调整目标持仓的task"""
all_tasks = []
try:
self._quote = await self._api.get_quote(self._symbol)
async for target_pos in self._pos_chan:
# lib 中对于时间判断的方案:
# 如果当前时间(模拟交易所时间)不在交易时间段内,则:等待直到行情更新
# 行情更新(即下一交易时段开始)后:获取target_pos最新的目标仓位, 开始调整仓位
# 如果不在可交易时间段内(回测时用 backtest 下发的时间判断,实盘使用 quote 行情判断): 等待更新
while True:
if isinstance(self._api._backtest, TqBacktest):
cur_timestamp = self._api._data.get("_tqsdk_backtest", {}).get("current_dt", float("nan"))
cur_dt = _timestamp_nano_to_str(cur_timestamp)
time_record = float("nan")
else:
cur_dt = self._quote["datetime"]
time_record = self._local_time_record
if _is_in_trading_time(self._quote, cur_dt, time_record):
break
await self._local_time_record_update_chan.recv()
target_pos = self._pos_chan.recv_latest(target_pos) # 获取最后一个target_pos目标仓位
# 确定调仓增减方向
delta_volume = target_pos - self._pos.pos
pending_forzen = 0
for each_priority in self._offset_priority + ",": # 按不同模式的优先级顺序报出不同的offset单,股指(“昨开”)平昨优先从不平今就先报平昨,原油平今优先("今昨开")就报平今
if each_priority == ",":
await gather(*[each._task for each in all_tasks])
pending_forzen = 0
all_tasks = []
continue
order_offset, order_dir, order_volume = self._get_order(each_priority, delta_volume, pending_forzen)
if order_volume == 0: # 如果没有则直接到下一种offset
continue
elif order_offset != "OPEN":
pending_forzen += order_volume
order_task = InsertOrderUntilAllTradedTask(self._api, self._symbol, order_dir, offset=order_offset,
volume=order_volume, min_volume=self._min_volume,
max_volume=self._max_volume, price=self._price,
trade_chan=self._trade_chan,
trade_objs_chan=self._trade_objs_chan,
account=self._account)
all_tasks.append(order_task)
delta_volume -= order_volume if order_dir == "BUY" else -order_volume
finally:
# 执行 task.cancel() 时, 删除掉该 symbol 对应的 TargetPosTask 实例
# self._account 类型为 TqSim/TqKq/TqAccount,都包括 _account_key 变量
TargetPosTaskSingleton._instances.pop(self._account._account_key + "#" + self._symbol, None)
await self._pos_chan.close()
self._time_update_task.cancel()
await asyncio.gather(*([t._task for t in all_tasks] + [self._time_update_task]), return_exceptions=True)
def cancel(self):
"""
取消当前 TargetPosTask 实例,会将该实例已经发出但还是未成交的委托单撤单,并且如果后续调用此实例的 set_target_volume 函数会报错。
任何时刻,每个账户下一个合约只能有一个 TargetPosTask 实例,并且其构造参数不能修改。
如果对于同一个合约要构造不同参数的 TargetPosTask 实例,需要调用 cancel 方法销毁,才能创建新的 TargetPosTask 实例
Example1::
from datetime import datetime, time
from tqsdk import TqApi, TargetPosTask
api = TqApi(auth=TqAuth("信易账户", "账户密码"))
quote = api.get_quote("SHFE.rb2110")
target_pos_passive = TargetPosTask(api, "SHFE.rb2110", price="PASSIVE")
while datetime.strptime(quote.datetime, "%Y-%m-%d %H:%M:%S.%f").time() < time(14, 50):
api.wait_update()
# ... 策略代码 ...
# 取消 TargetPosTask 实例
target_pos_passive.cancel()
while not target_pos_passive.is_finished(): # 此循环等待 target_pos_passive 处理 cancel 结束
api.wait_update() # 调用wait_update(),会对已经发出但还是未成交的委托单撤单
# 创建新的 TargetPosTask 实例
target_pos_active = TargetPosTask(api, "SHFE.rb2110", price="ACTIVE")
target_pos_active.set_target_volume(0) # 平所有仓位
while True:
api.wait_update()
# ... 策略代码 ...
api.close()
"""
self._task.cancel()
def is_finished(self) -> bool:
"""
返回当前 TargetPosTask 实例是否已经结束。即如果后续调用此实例的 set_target_volume 函数会报错,此实例不会再下单或者撤单。
Returns:
bool: 当前 TargetPosTask 实例是否已经结束
"""
return self._task.done()
class InsertOrderUntilAllTradedTask(object):
"""追价下单task, 该task会在行情变化后自动撤单重下,直到全部成交
(注:此类主要在tqsdk内部使用,并非简单用法,不建议用户使用)"""
def __init__(self, api, symbol, direction, offset, volume, min_volume: Optional[int] = None,
max_volume: Optional[int] = None, price: Union[str, Callable[[str], Union[float, int]]] = "ACTIVE",
trade_chan=None, trade_objs_chan=None, account: Optional[Union[TqAccount, TqKq, TqSim]] = None):
"""
创建追价下单task实例
Args:
api (TqApi): TqApi实例,该task依托于指定api下单/撤单
symbol (str): 拟下单的合约symbol, 格式为 交易所代码.合约代码, 例如 "SHFE.cu1801"
direction (str): "BUY" 或 "SELL"
offset (str): "OPEN", "CLOSE" 或 "CLOSETODAY"
volume (int): 需要下单的手数
min_volume (int): [可选] **大单拆分模式下** 每笔最小下单的手数,默认不启用 **大单拆分模式**
max_volume (int): [可选] **大单拆分模式下** 每笔最大下单的手数,默认不启用 **大单拆分模式**
price (str / Callable): [可选]下单方式, 默认为 "ACTIVE"。
* "ACTIVE":对价下单,在持仓调整过程中,若下单方向为买,对价为卖一价;若下单方向为卖,对价为买一价。
* "PASSIVE":对价下单,在持仓调整过程中,若下单方向为买,对价为买一价;若下单方向为卖,对价为卖一价。
* Callable[[str], Union[float, int]]: 函数参数为下单方向,函数返回值是下单价格。如果返回 nan,程序会抛错。
trade_chan (TqChan): [可选]成交通知channel, 当有成交发生时会将成交手数(多头为正数,空头为负数)发到该channel上
trade_objs_chan (TqChan): [可选]成交对象通知channel, 当有成交发生时会将成交对象发送到该channel上
account (TqAccount/TqKq/TqSim): [可选]指定发送下单指令的账户实例, 多账户模式下,该参数必须指定
"""
self._api = api
self._account = account
self._symbol = symbol
self._direction = _check_direction(direction)
self._offset = _check_offset(offset)
self._volume = _check_volume(volume)
self._min_volume, self._max_volume = _check_volume_limit(min_volume, max_volume)
self._price = _check_price(price)
self._trade_chan = trade_chan
self._trade_objs_chan = trade_objs_chan
self._task = self._api.create_task(self._run())
async def _run(self):
"""负责追价下单的task"""
self._quote = await self._api.get_quote(self._symbol)
while self._volume != 0:
limit_price = self._get_price(self._direction)
if limit_price != limit_price:
raise Exception("设置价格函数返回 nan,无法处理。请检查后重试。")
# 当前下单手数
if self._min_volume and self._max_volume and self._volume >= self._max_volume:
this_volume = utils.RD.randint(self._min_volume, self._max_volume)
else:
this_volume = self._volume
insert_order_task = InsertOrderTask(self._api, self._symbol, self._direction, self._offset,
this_volume, limit_price=limit_price, trade_chan=self._trade_chan,
trade_objs_chan=self._trade_objs_chan, account=self._account)
order = await insert_order_task._order_chan.recv()
check_chan = TqChan(self._api, last_only=True)
check_task = self._api.create_task(self._check_price(check_chan, limit_price, order['order_id']))
try:
# 当父 task 被 cancel,子 task 如果正在执行,也会捕获 CancelError
# 添加 asyncio.shield 后,如果父 task 被 cancel,asyncio.shield 也会被 cancel,但是子 task 不会收到 CancelError
# 这里需要 asyncio.shield,是因为 insert_order_task._task 预期不会被 cancel, 应该等待到 order 状态是 FINISHED 才返回
await asyncio.shield(insert_order_task._task)
order = insert_order_task._order_chan.recv_latest(order)
self._volume -= (this_volume - order['volume_left'])
if order['volume_left'] != 0 and not check_task.done():
raise Exception("遇到错单: %s %s %s %d手 %f %s" % (
self._symbol, self._direction, self._offset, this_volume, limit_price, order['last_msg']))
finally:
if self._api.get_order(order['order_id'], account=self._account).status == "ALIVE":
# 当 task 被 cancel 时,主动撤掉未成交的挂单
self._api.cancel_order(order['order_id'], account=self._account)
await check_chan.close()
await check_task
# 在每次退出时,都等到 insert_order_task 执行完,此时 order 状态一定是 FINISHED;self._trade_chan 也一定会收到全部的成交手数
try:
# 当用户调用 api.close(), 会主动 cancel 所有由 api 创建的 task,包括 TargetPosTask._target_pos_task,
# 此时,insert_order_task._task 如果有未完成委托单,会永远等待下去(因为网络连接已经断开),所以这里增加超时机制。
await asyncio.wait_for(insert_order_task._task, timeout=30)
except asyncio.TimeoutError:
raise Exception(f"InsertOrderTask 执行超时,30s 内报单未执行完。此错误产生可能的原因:"
f"可能是用户调用了 api.close() 之后,已经创建的 InsertOrderTask 无法正常结束。")
def _get_price(self, direction):
"""根据最新行情和下单方式计算出最优的下单价格"""
if self._price not in ('ACTIVE', 'PASSIVE'):
assert isfunction(self._price)
return self._price(direction)
# 主动买的价格序列(优先判断卖价,如果没有则用买价)
price_list = [self._quote.ask_price1, self._quote.bid_price1]
if direction == "SELL":
price_list.reverse()
if self._price == "PASSIVE":
price_list.reverse()
limit_price = price_list[0]
if limit_price != limit_price:
limit_price = price_list[1]
if limit_price != limit_price:
limit_price = self._quote.last_price
if limit_price != limit_price:
limit_price = self._quote.pre_close
return limit_price
async def _check_price(self, update_chan, order_price, order_id):
"""判断价格是否变化的task"""
async with self._api.register_update_notify(chan=update_chan):
async for _ in update_chan:
new_price = self._get_price(self._direction)
if (self._direction == "BUY" and new_price > order_price) or (
self._direction == "SELL" and new_price < order_price):
self._api.cancel_order(order_id, account=self._account)
break
class InsertOrderTask(object):
"""下单task (注:此类主要在tqsdk内部使用,并非简单用法,不建议用户使用)"""
def __init__(self, api, symbol, direction, offset, volume, limit_price=None, order_chan=None, trade_chan=None,
trade_objs_chan=None, account: Optional[Union[TqAccount, TqKq, TqSim]] = None):
"""
创建下单task实例
Args:
api (TqApi): TqApi实例,该task依托于指定api下单/撤单
symbol (str): 拟下单的合约symbol, 格式为 交易所代码.合约代码, 例如 "SHFE.cu1801"
direction (str): "BUY" 或 "SELL"
offset (str): "OPEN", "CLOSE" 或 "CLOSETODAY"
volume (int): 需要下单的手数
limit_price (float): [可选]下单价格, 默认市价单
order_chan (TqChan): [可选]委托单通知channel, 当委托单状态发生时会将委托单信息发到该channel上
trade_chan (TqChan): [可选]成交通知channel, 当有成交发生时会将成交手数(多头为正数,空头为负数)发到该channel上
trade_objs_chan (TqChan): [可选]成交对象通知channel, 当有成交发生时会将成交对象发送到该channel上
account (TqAccount/TqKq/TqSim): [可选]指定发送下单指令的账户实例, 多账户模式下,该参数必须指定
"""
self._api = api
self._account = account
self._symbol = symbol
self._direction = _check_direction(direction)
self._offset = _check_offset(offset)
self._volume = _check_volume(volume)
self._offset = offset
self._volume = int(volume)
self._limit_price = float(limit_price) if limit_price is not None else None
self._order_chan = order_chan if order_chan is not None else TqChan(self._api)
self._trade_chan = trade_chan
self._trade_objs_chan = trade_objs_chan
self._task = self._api.create_task(self._run())
async def _run(self):
"""负责下单的task"""
order_id = utils._generate_uuid("PYSDK_target")
order = self._api.insert_order(self._symbol, self._direction, self._offset, self._volume, self._limit_price,
order_id=order_id, account=self._account)
last_order = order.copy() # 保存当前 order 的状态
last_left = self._volume
all_trades_id = set() # 记录所有的 trade_id
async with self._api.register_update_notify() as update_chan:
await self._order_chan.send({k: v for k, v in last_order.items() if not k.startswith("_")}) # 将副本的数据及所有权转移
while order.status != "FINISHED" or (order.volume_orign - order.volume_left) != sum(
[trade.volume for trade in order.trade_records.values()]):
await update_chan.recv()
if order.volume_left != last_left:
vol = last_left - order.volume_left
last_left = order.volume_left
if self._trade_chan:
await self._trade_chan.send(vol if order.direction == "BUY" else -vol)
if self._trade_objs_chan:
# 当前用户需要接受 trade_obj,才会运行以下代码
rest_trades_id = set(order.trade_records) - all_trades_id
for trade_id in rest_trades_id:
# 新收到的 trade 发送到 self._trade_objs_chan
await self._trade_objs_chan.send({k: v for k, v in order.trade_records[trade_id].items() if not k.startswith("_")})
all_trades_id.add(trade_id)
if order != last_order:
last_order = order.copy()
await self._order_chan.send({k: v for k, v in last_order.items() if not k.startswith("_")})
|
py | 7df82896e7487f707d264aab4410a6e9ff5b0d0a | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class FcoeClientFlogiRange(Base):
"""The configuration parameters for a range of FLOGI VN_Ports.
The FcoeClientFlogiRange class encapsulates a required fcoeClientFlogiRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'fcoeClientFlogiRange'
_SDM_ATT_MAP = {
'Count': 'count',
'Enabled': 'enabled',
'FipAddressingMode': 'fipAddressingMode',
'FipDestinationMacAddress': 'fipDestinationMacAddress',
'FipEnabled': 'fipEnabled',
'FipOptionSet': 'fipOptionSet',
'FipOptionSetName': 'fipOptionSetName',
'FipSolicitTimeout': 'fipSolicitTimeout',
'FipVendorId': 'fipVendorId',
'FipVlanDiscovery': 'fipVlanDiscovery',
'FipVlanDiscoveryUntagged': 'fipVlanDiscoveryUntagged',
'Name': 'name',
'NameServerCommands': 'nameServerCommands',
'NameServerQuery': 'nameServerQuery',
'NameServerQueryCommand': 'nameServerQueryCommand',
'NameServerQueryParameterType': 'nameServerQueryParameterType',
'NameServerQueryParameterValue': 'nameServerQueryParameterValue',
'NameServerRegistration': 'nameServerRegistration',
'NodeWwnIncrement': 'nodeWwnIncrement',
'NodeWwnStart': 'nodeWwnStart',
'ObjectId': 'objectId',
'PlogiDestId': 'plogiDestId',
'PlogiEnabled': 'plogiEnabled',
'PlogiMeshMode': 'plogiMeshMode',
'PlogiTargetName': 'plogiTargetName',
'PortWwnIncrement': 'portWwnIncrement',
'PortWwnStart': 'portWwnStart',
'PrliEnabled': 'prliEnabled',
'SourceOui': 'sourceOui',
'SourceOuiIncrement': 'sourceOuiIncrement',
'StateChangeRegistration': 'stateChangeRegistration',
'StateChangeRegistrationOption': 'stateChangeRegistrationOption',
'UnicastFipSolicit': 'unicastFipSolicit',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(FcoeClientFlogiRange, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: The number of FLOGI VN_Ports to be created by this VN_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@Count.setter
def Count(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Count'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def FipAddressingMode(self):
# type: () -> str
"""
Returns
-------
- str: The addressing mode specified by FLOGI/FDISC requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipAddressingMode'])
@FipAddressingMode.setter
def FipAddressingMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipAddressingMode'], value)
@property
def FipDestinationMacAddress(self):
# type: () -> str
"""
Returns
-------
- str: Represents the address to which FIP Solicit Unicast is sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipDestinationMacAddress'])
@FipDestinationMacAddress.setter
def FipDestinationMacAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipDestinationMacAddress'], value)
@property
def FipEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable FCoE Initialization Protocol (FIP).
"""
return self._get_attribute(self._SDM_ATT_MAP['FipEnabled'])
@FipEnabled.setter
def FipEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipEnabled'], value)
@property
def FipOptionSet(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/globals/.../fcoeClientOptionSet): List of TLV options used with FIP.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipOptionSet'])
@FipOptionSet.setter
def FipOptionSet(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipOptionSet'], value)
@property
def FipOptionSetName(self):
# type: () -> str
"""
Returns
-------
- str: The name of the Option Set used by this range.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipOptionSetName'])
@FipOptionSetName.setter
def FipOptionSetName(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipOptionSetName'], value)
@property
def FipSolicitTimeout(self):
# type: () -> int
"""
Returns
-------
- number: Period of time (in seconds) that the client waits for a message of type FIP Discovery Advertisement Multicast before failing.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipSolicitTimeout'])
@FipSolicitTimeout.setter
def FipSolicitTimeout(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FipSolicitTimeout'], value)
@property
def FipVendorId(self):
# type: () -> str
"""
Returns
-------
- str: The Vendor Identifier value (8-byte).
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVendorId'])
@FipVendorId.setter
def FipVendorId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVendorId'], value)
@property
def FipVlanDiscovery(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable FIP VLAN Discovery.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVlanDiscovery'])
@FipVlanDiscovery.setter
def FipVlanDiscovery(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVlanDiscovery'], value)
@property
def FipVlanDiscoveryUntagged(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, VLAN Discovery will be performed using plain Ethernet frames, i.e. without any VLAN tags.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVlanDiscoveryUntagged'])
@FipVlanDiscoveryUntagged.setter
def FipVlanDiscoveryUntagged(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVlanDiscoveryUntagged'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NameServerCommands(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): Signifies the Name Server Commands that will be sent by the client.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerCommands'])
@NameServerCommands.setter
def NameServerCommands(self, value):
# type: (List[int]) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerCommands'], value)
@property
def NameServerQuery(self):
# type: () -> bool
"""
Returns
-------
- bool: If set, the N_Port will attempt to query a Name Server.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerQuery'])
@NameServerQuery.setter
def NameServerQuery(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerQuery'], value)
@property
def NameServerQueryCommand(self):
# type: () -> str
"""
Returns
-------
- str: Name Server request command codes.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerQueryCommand'])
@NameServerQueryCommand.setter
def NameServerQueryCommand(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerQueryCommand'], value)
@property
def NameServerQueryParameterType(self):
# type: () -> str
"""
Returns
-------
- str: State Change Registration Parameter Type.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerQueryParameterType'])
@NameServerQueryParameterType.setter
def NameServerQueryParameterType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerQueryParameterType'], value)
@property
def NameServerQueryParameterValue(self):
# type: () -> str
"""
Returns
-------
- str: State Change Registration Parameter Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerQueryParameterValue'])
@NameServerQueryParameterValue.setter
def NameServerQueryParameterValue(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerQueryParameterValue'], value)
@property
def NameServerRegistration(self):
# type: () -> bool
"""
Returns
-------
- bool: If set, the N_Port will attempt to register to a Name Server.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerRegistration'])
@NameServerRegistration.setter
def NameServerRegistration(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerRegistration'], value)
@property
def NodeWwnIncrement(self):
# type: () -> str
"""
Returns
-------
- str: The Node Name incrementing value for this N_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['NodeWwnIncrement'])
@NodeWwnIncrement.setter
def NodeWwnIncrement(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NodeWwnIncrement'], value)
@property
def NodeWwnStart(self):
# type: () -> str
"""
Returns
-------
- str: The Node Name starting value for this N_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['NodeWwnStart'])
@NodeWwnStart.setter
def NodeWwnStart(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NodeWwnStart'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def PlogiDestId(self):
# type: () -> str
"""
Returns
-------
- str: Indicates FCIDs and WWNs that can be used as destination for PLOGI requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['PlogiDestId'])
@PlogiDestId.setter
def PlogiDestId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PlogiDestId'], value)
@property
def PlogiEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables these N_Ports to attempt a PLOGI connection with specified destinations.
"""
return self._get_attribute(self._SDM_ATT_MAP['PlogiEnabled'])
@PlogiEnabled.setter
def PlogiEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PlogiEnabled'], value)
@property
def PlogiMeshMode(self):
# type: () -> str
"""
Returns
-------
- str: The association mode between PLOGI initiators and targets.
"""
return self._get_attribute(self._SDM_ATT_MAP['PlogiMeshMode'])
@PlogiMeshMode.setter
def PlogiMeshMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PlogiMeshMode'], value)
@property
def PlogiTargetName(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the N_Port range used as destination for PLOGI requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['PlogiTargetName'])
@PlogiTargetName.setter
def PlogiTargetName(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PlogiTargetName'], value)
@property
def PortWwnIncrement(self):
# type: () -> str
"""
Returns
-------
- str: The Port Name incrementing value for this N_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortWwnIncrement'])
@PortWwnIncrement.setter
def PortWwnIncrement(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PortWwnIncrement'], value)
@property
def PortWwnStart(self):
# type: () -> str
"""
Returns
-------
- str: The Port Name starting value for this N_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortWwnStart'])
@PortWwnStart.setter
def PortWwnStart(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PortWwnStart'], value)
@property
def PrliEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Process login.
"""
return self._get_attribute(self._SDM_ATT_MAP['PrliEnabled'])
@PrliEnabled.setter
def PrliEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PrliEnabled'], value)
@property
def SourceOui(self):
# type: () -> str
"""
Returns
-------
- str: The OUI ID (3-byte) associated to all N_Ports in this range.
"""
return self._get_attribute(self._SDM_ATT_MAP['SourceOui'])
@SourceOui.setter
def SourceOui(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['SourceOui'], value)
@property
def SourceOuiIncrement(self):
# type: () -> str
"""
Returns
-------
- str: The OUI ID incrementing value for this N_Port range.
"""
return self._get_attribute(self._SDM_ATT_MAP['SourceOuiIncrement'])
@SourceOuiIncrement.setter
def SourceOuiIncrement(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['SourceOuiIncrement'], value)
@property
def StateChangeRegistration(self):
# type: () -> bool
"""
Returns
-------
- bool: If set, the N_Port will attempt to subscribe to State Change events.
"""
return self._get_attribute(self._SDM_ATT_MAP['StateChangeRegistration'])
@StateChangeRegistration.setter
def StateChangeRegistration(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['StateChangeRegistration'], value)
@property
def StateChangeRegistrationOption(self):
# type: () -> str
"""
Returns
-------
- str: State Change Registration option.
"""
return self._get_attribute(self._SDM_ATT_MAP['StateChangeRegistrationOption'])
@StateChangeRegistrationOption.setter
def StateChangeRegistrationOption(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['StateChangeRegistrationOption'], value)
@property
def UnicastFipSolicit(self):
# type: () -> str
"""
Returns
-------
- str: None - Fip Solicit is made as multicast. Listen/Learn FIFO - The client waits a message of type FIP Discovery Advertisement Multicast and replies with FIP Solicit Unicast to the server. Listen/Learn PB - In this mode the FCoE Client listens for Unsolicited Advertisements from FCFs; when the waiting period is over, the FCF with the highest priority is chosen and a Discovery Solicitation unicast is sent to it. Explicit - The client sends directly a FIP Solicit Unicast to the address from FIP Destination MAC Address field.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnicastFipSolicit'])
@UnicastFipSolicit.setter
def UnicastFipSolicit(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['UnicastFipSolicit'], value)
def update(self, Count=None, Enabled=None, FipAddressingMode=None, FipDestinationMacAddress=None, FipEnabled=None, FipOptionSet=None, FipOptionSetName=None, FipSolicitTimeout=None, FipVendorId=None, FipVlanDiscovery=None, FipVlanDiscoveryUntagged=None, Name=None, NameServerCommands=None, NameServerQuery=None, NameServerQueryCommand=None, NameServerQueryParameterType=None, NameServerQueryParameterValue=None, NameServerRegistration=None, NodeWwnIncrement=None, NodeWwnStart=None, PlogiDestId=None, PlogiEnabled=None, PlogiMeshMode=None, PlogiTargetName=None, PortWwnIncrement=None, PortWwnStart=None, PrliEnabled=None, SourceOui=None, SourceOuiIncrement=None, StateChangeRegistration=None, StateChangeRegistrationOption=None, UnicastFipSolicit=None):
# type: (int, bool, str, str, bool, str, str, int, str, bool, bool, str, List[int], bool, str, str, str, bool, str, str, str, bool, str, str, str, str, bool, str, str, bool, str, str) -> FcoeClientFlogiRange
"""Updates fcoeClientFlogiRange resource on the server.
Args
----
- Count (number): The number of FLOGI VN_Ports to be created by this VN_Port range.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- FipAddressingMode (str): The addressing mode specified by FLOGI/FDISC requests.
- FipDestinationMacAddress (str): Represents the address to which FIP Solicit Unicast is sent.
- FipEnabled (bool): Enable FCoE Initialization Protocol (FIP).
- FipOptionSet (str(None | /api/v1/sessions/1/ixnetwork/globals/.../fcoeClientOptionSet)): List of TLV options used with FIP.
- FipOptionSetName (str): The name of the Option Set used by this range.
- FipSolicitTimeout (number): Period of time (in seconds) that the client waits for a message of type FIP Discovery Advertisement Multicast before failing.
- FipVendorId (str): The Vendor Identifier value (8-byte).
- FipVlanDiscovery (bool): Enable FIP VLAN Discovery.
- FipVlanDiscoveryUntagged (bool): If enabled, VLAN Discovery will be performed using plain Ethernet frames, i.e. without any VLAN tags.
- Name (str): Name of range
- NameServerCommands (list(number)): Signifies the Name Server Commands that will be sent by the client.
- NameServerQuery (bool): If set, the N_Port will attempt to query a Name Server.
- NameServerQueryCommand (str): Name Server request command codes.
- NameServerQueryParameterType (str): State Change Registration Parameter Type.
- NameServerQueryParameterValue (str): State Change Registration Parameter Value.
- NameServerRegistration (bool): If set, the N_Port will attempt to register to a Name Server.
- NodeWwnIncrement (str): The Node Name incrementing value for this N_Port range.
- NodeWwnStart (str): The Node Name starting value for this N_Port range.
- PlogiDestId (str): Indicates FCIDs and WWNs that can be used as destination for PLOGI requests.
- PlogiEnabled (bool): Enables these N_Ports to attempt a PLOGI connection with specified destinations.
- PlogiMeshMode (str): The association mode between PLOGI initiators and targets.
- PlogiTargetName (str): Indicates the N_Port range used as destination for PLOGI requests.
- PortWwnIncrement (str): The Port Name incrementing value for this N_Port range.
- PortWwnStart (str): The Port Name starting value for this N_Port range.
- PrliEnabled (bool): Process login.
- SourceOui (str): The OUI ID (3-byte) associated to all N_Ports in this range.
- SourceOuiIncrement (str): The OUI ID incrementing value for this N_Port range.
- StateChangeRegistration (bool): If set, the N_Port will attempt to subscribe to State Change events.
- StateChangeRegistrationOption (str): State Change Registration option.
- UnicastFipSolicit (str): None - Fip Solicit is made as multicast. Listen/Learn FIFO - The client waits a message of type FIP Discovery Advertisement Multicast and replies with FIP Solicit Unicast to the server. Listen/Learn PB - In this mode the FCoE Client listens for Unsolicited Advertisements from FCFs; when the waiting period is over, the FCF with the highest priority is chosen and a Discovery Solicitation unicast is sent to it. Explicit - The client sends directly a FIP Solicit Unicast to the address from FIP Destination MAC Address field.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def FcoeClientFlogi(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientFlogi operation on the server.
Enable VN_Port (transmits FLOGI, if necessary).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientFlogi(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientFlogi(Arg2=enum, async_operation=bool)
------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientFlogi', payload=payload, response_object=None)
def FcoeClientFlogo(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientFlogo operation on the server.
Disable VN_Port (transmits FLOGO, if necessary).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientFlogo(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientFlogo(Arg2=enum, async_operation=bool)
------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientFlogo', payload=payload, response_object=None)
def FcoeClientPause(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientPause operation on the server.
Pause negotiation of FCoE sessions for selected plugins and ranges
fcoeClientPause(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientPause', payload=payload, response_object=None)
def FcoeClientPlogi(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientPlogi operation on the server.
Enable PLOGI (transmits PLOGI, to PLOGI Destination(s) if configured, and must display error message if not configured).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientPlogi(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientPlogi(Arg2=enum, async_operation=bool)
------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientPlogi', payload=payload, response_object=None)
def FcoeClientPlogo(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientPlogo operation on the server.
Disable PLOGI (transmits PLOGO, if already logged in to destination VN_Port(s).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientPlogo(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientPlogo(Arg2=enum, async_operation=bool)
------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientPlogo', payload=payload, response_object=None)
def FcoeClientResume(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientResume operation on the server.
Resume previously paused negotiation for selected plugins and ranges
fcoeClientResume(async_operation=bool)
--------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientResume', payload=payload, response_object=None)
def FcoeClientStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientStart operation on the server.
Negotiate FCoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientStart(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientStart(Arg2=enum, async_operation=bool)
------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientStart', payload=payload, response_object=None)
def FcoeClientStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the fcoeClientStop operation on the server.
Teardown FCoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
fcoeClientStop(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
fcoeClientStop(Arg2=enum, async_operation=bool)
-----------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('fcoeClientStop', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Negotiate sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(Arg2=enum, async_operation=bool)
--------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Teardown sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(Arg2=enum, async_operation=bool)
-------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
|
py | 7df828bd8fabe0a15d9e436cc748b5fb7428daf9 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import lmfit
from scipy.optimize import curve_fit
def model_func(x,x0,a):
return np.sin(a*x+x0)
def objective_func(pars,x,obs,err):
parvals = pars.valuesdict()
x0 = parvals['x0']
a = parvals['a']
ex = model_func(x,x0,a)
out = (obs-ex)/err
return out
def fit_function(params, x=None, dat1=None, dat2=None):
''' an example of how to write an objective function to fit multiple data sets with shared parameters '''
model1 = params['offset'] + x * params['slope1']
model2 = params['offset'] + x * params['slope2']
resid1 = dat1 - model1
resid2 = dat2 - model2
return numpy.concatenate((resid1, resid2))
N = 10
x = np.linspace(0,2*np.pi,N)
y = np.sin(x)+(np.random.random(N)-0.5)/5
errs = np.random.random(N)*0.1
plt.errorbar(x,y,errs,ls='')
########## curve fit #############
res = curve_fit(model_func,x,y,sigma=errs,absolute_sigma=True)
print(res[0])
print(np.diag(res[1])**.5)
xfit = np.linspace(0,2*np.pi,1000)
fit = model_func(xfit,*res[0])
plt.plot(xfit,fit)
########### lmfit ############
p = lmfit.Parameters()
# (Name, Value, Vary, Min, Max, Expr)
p.add_many(('x0', 0, True, None, None, None),
('a', 1, True, None, None, None))
minner = lmfit.Minimizer(objective_func,p,(x,y,errs))
result = minner.minimize()
# calculate final result
a = result.params['a'].value
x0 = result.params['x0'].value
fit = model_func(xfit,x0,a)
plt.plot(xfit,fit)
# write error report
lmfit.report_fit(result)
########### plotting ############
plt.xlim(-1,7)
plt.ylim(-1.5,1.5)
plt.show()
|
py | 7df82b57a0df04675e7bf2a30487fe3092708400 | import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="layout.slider", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs,
)
|
py | 7df82bf2a8d1de99fb133a9d35105b3d75630c6a | # GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='domaintools-rapid7-plugin',
version='1.0.1',
description='Domain name search tool that allows a wildcard search, monitoring of WHOIS record changes and history caching, as well as Reverse IP queries',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/komand_domaintools']
)
|
py | 7df82c34a2dcfb14681087cbf11cd9cdb4916d6a | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import datetime
import json
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
import functools
import logging
import time
import traceback
try:
from urlparse import urlparse # py2
except ImportError:
from urllib.parse import urlparse # py3
import uuid
import requests
from configparser import NoOptionError, NoSectionError
from json import loads
from requests.adapters import ReadTimeout
from requests.packages.urllib3 import disable_warnings # pylint: disable=import-error
from dogpile.cache.api import NoValue
from rucio.common.cache import make_region_memcached
from rucio.common.config import config_get, config_get_bool
from rucio.common.constants import FTS_JOB_TYPE, FTS_STATE, FTS_COMPLETE_STATE
from rucio.common.exception import TransferToolTimeout, TransferToolWrongAnswer, DuplicateFileTransferSubmission
from rucio.common.utils import APIEncoder, chunks, set_checksum_value
from rucio.core.request import get_source_rse, get_transfer_error
from rucio.core.rse import get_rse_supported_checksums_from_attributes
from rucio.core.oidc import get_token_for_account_operation
from rucio.core.monitor import record_counter, record_timer, MultiCounter
from rucio.transfertool.transfertool import Transfertool, TransferToolBuilder, TransferStatusReport
from rucio.db.sqla.constants import RequestState
logging.getLogger("requests").setLevel(logging.CRITICAL)
disable_warnings()
REGION_SHORT = make_region_memcached(expiration_time=900)
SUBMISSION_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_submission', statsd='transfertool.fts3.{host}.submission.{state}',
documentation='Number of transfers submitted', labelnames=('state', 'host'))
CANCEL_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_cancel', statsd='transfertool.fts3.{host}.cancel.{state}',
documentation='Number of cancelled transfers', labelnames=('state', 'host'))
UPDATE_PRIORITY_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_update_priority', statsd='transfertool.fts3.{host}.update_priority.{state}',
documentation='Number of priority updates', labelnames=('state', 'host'))
QUERY_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_query', statsd='transfertool.fts3.{host}.query.{state}',
documentation='Number of queried transfers', labelnames=('state', 'host'))
WHOAMI_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_whoami', statsd='transfertool.fts3.{host}.whoami.{state}',
documentation='Number of whoami requests', labelnames=('state', 'host'))
VERSION_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_version', statsd='transfertool.fts3.{host}.version.{state}',
documentation='Number of version requests', labelnames=('state', 'host'))
BULK_QUERY_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_bulk_query', statsd='transfertool.fts3.{host}.bulk_query.{state}',
documentation='Number of bulk queries', labelnames=('state', 'host'))
QUERY_DETAILS_COUNTER = MultiCounter(prom='rucio_transfertool_fts3_query_details', statsd='transfertool.fts3.{host}.query_details.{state}',
documentation='Number of detailed status queries', labelnames=('state', 'host'))
ALLOW_USER_OIDC_TOKENS = config_get_bool('conveyor', 'allow_user_oidc_tokens', False, False)
REQUEST_OIDC_SCOPE = config_get('conveyor', 'request_oidc_scope', False, 'fts:submit-transfer')
REQUEST_OIDC_AUDIENCE = config_get('conveyor', 'request_oidc_audience', False, 'fts:example')
# https://fts3-docs.web.cern.ch/fts3-docs/docs/state_machine.html
FINAL_FTS_JOB_STATES = (FTS_STATE.FAILED, FTS_STATE.CANCELED, FTS_STATE.FINISHED, FTS_STATE.FINISHEDDIRTY)
FINAL_FTS_FILE_STATES = (FTS_STATE.FAILED, FTS_STATE.CANCELED, FTS_STATE.FINISHED, FTS_STATE.NOT_USED)
def oidc_supported(transfer_hop) -> bool:
"""
checking OIDC AuthN/Z support per destination and source RSEs;
for oidc_support to be activated, all sources and the destination must explicitly support it
"""
# assumes use of boolean 'oidc_support' RSE attribute
if not transfer_hop.dst.rse.attributes.get('oidc_support', False):
return False
for source in transfer_hop.sources:
if not source.rse.attributes.get('oidc_support', False):
return False
return True
def checksum_validation_strategy(src_attributes, dst_attributes, logger):
"""
Compute the checksum validation strategy (none, source, destination or both) and the
supported checksums from the attributes of the source and destination RSE.
"""
source_supported_checksums = get_rse_supported_checksums_from_attributes(src_attributes)
dest_supported_checksums = get_rse_supported_checksums_from_attributes(dst_attributes)
common_checksum_names = set(source_supported_checksums).intersection(dest_supported_checksums)
verify_checksum = 'both'
if not dst_attributes.get('verify_checksum', True):
if not src_attributes.get('verify_checksum', True):
verify_checksum = 'none'
else:
verify_checksum = 'source'
else:
if not src_attributes.get('verify_checksum', True):
verify_checksum = 'destination'
else:
verify_checksum = 'both'
if len(common_checksum_names) == 0:
logger(logging.INFO, 'No common checksum method. Verifying destination only.')
verify_checksum = 'destination'
if source_supported_checksums == ['none']:
if dest_supported_checksums == ['none']:
# both endpoints support none
verify_checksum = 'none'
else:
# src supports none but dst does
verify_checksum = 'destination'
else:
if dest_supported_checksums == ['none']:
# source supports some but destination does not
verify_checksum = 'source'
else:
if len(common_checksum_names) == 0:
# source and dst support some bot none in common (dst priority)
verify_checksum = 'destination'
else:
# Don't override the value in the file_metadata
pass
checksums_to_use = ['none']
if verify_checksum == 'both':
checksums_to_use = common_checksum_names
elif verify_checksum == 'source':
checksums_to_use = source_supported_checksums
elif verify_checksum == 'destination':
checksums_to_use = dest_supported_checksums
return verify_checksum, checksums_to_use
def job_params_for_fts_transfer(transfer, bring_online, default_lifetime, archive_timeout_override, max_time_in_queue, logger, multihop=False):
"""
Prepare the job parameters which will be passed to FTS transfertool
"""
overwrite, bring_online_local = True, None
if transfer.src.rse.is_tape_or_staging_required():
bring_online_local = bring_online
if transfer.dst.rse.is_tape():
overwrite = False
# Get dest space token
dest_protocol = transfer.protocol_factory.protocol(transfer.dst.rse, transfer.dst.scheme, transfer.operation_dest)
dest_spacetoken = None
if dest_protocol.attributes and 'extended_attributes' in dest_protocol.attributes and \
dest_protocol.attributes['extended_attributes'] and 'space_token' in dest_protocol.attributes['extended_attributes']:
dest_spacetoken = dest_protocol.attributes['extended_attributes']['space_token']
src_spacetoken = None
strict_copy = transfer.dst.rse.attributes.get('strict_copy', False)
archive_timeout = transfer.dst.rse.attributes.get('archive_timeout', None)
verify_checksum, checksums_to_use = checksum_validation_strategy(transfer.src.rse.attributes, transfer.dst.rse.attributes, logger=logger)
transfer['checksums_to_use'] = checksums_to_use
job_params = {'account': transfer.rws.account,
'verify_checksum': verify_checksum,
'copy_pin_lifetime': transfer.rws.attributes.get('lifetime', default_lifetime),
'bring_online': bring_online_local,
'job_metadata': {
'issuer': 'rucio',
'multi_sources': True if len(transfer.legacy_sources) > 1 else False,
},
'overwrite': transfer.rws.attributes.get('overwrite', overwrite),
'priority': transfer.rws.priority}
if multihop:
job_params['multihop'] = True
job_params['job_metadata']['multihop'] = True
if strict_copy:
job_params['strict_copy'] = strict_copy
if dest_spacetoken:
job_params['spacetoken'] = dest_spacetoken
if src_spacetoken:
job_params['source_spacetoken'] = src_spacetoken
if transfer.use_ipv4:
job_params['ipv4'] = True
job_params['ipv6'] = False
if archive_timeout and transfer.dst.rse.is_tape():
try:
archive_timeout = int(archive_timeout)
if archive_timeout_override is None:
job_params['archive_timeout'] = archive_timeout
elif archive_timeout_override != 0:
job_params['archive_timeout'] = archive_timeout_override
# FTS only supports dst_file metadata if archive_timeout is set
job_params['dst_file_report'] = True
logger(logging.DEBUG, 'Added archive timeout to transfer.')
except ValueError:
logger(logging.WARNING, 'Could not set archive_timeout for %s. Must be integer.', transfer)
pass
if max_time_in_queue:
if transfer.rws.activity in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue[transfer.rws.activity]
elif 'default' in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue['default']
return job_params
def bulk_group_transfers(transfer_paths, policy='rule', group_bulk=200, source_strategy=None, max_time_in_queue=None,
logger=logging.log, archive_timeout_override=None, bring_online=None, default_lifetime=None):
"""
Group transfers in bulk based on certain criterias
:param transfer_paths: List of transfer paths to group. Each path is a list of single-hop transfers.
:param policy: Policy to use to group.
:param group_bulk: Bulk sizes.
:param source_strategy: Strategy to group sources
:param max_time_in_queue: Maximum time in queue
:param archive_timeout_override: Override the archive_timeout parameter for any transfers with it set (0 to unset)
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:return: List of grouped transfers.
"""
grouped_transfers = {}
grouped_jobs = []
try:
default_source_strategy = config_get(section='conveyor', option='default-source-strategy')
except (NoOptionError, NoSectionError, RuntimeError):
default_source_strategy = 'orderly'
try:
activity_source_strategy = config_get(section='conveyor', option='activity-source-strategy')
activity_source_strategy = loads(activity_source_strategy)
except (NoOptionError, NoSectionError, RuntimeError):
activity_source_strategy = {}
except ValueError:
logger(logging.WARNING, 'activity_source_strategy not properly defined')
activity_source_strategy = {}
for transfer_path in transfer_paths:
for i, transfer in enumerate(transfer_path):
transfer['selection_strategy'] = source_strategy if source_strategy else activity_source_strategy.get(str(transfer.rws.activity), default_source_strategy)
_build_job_params = functools.partial(job_params_for_fts_transfer,
bring_online=bring_online,
default_lifetime=default_lifetime,
archive_timeout_override=archive_timeout_override,
max_time_in_queue=max_time_in_queue,
logger=logger)
for transfer_path in transfer_paths:
if len(transfer_path) > 1:
# for multihop transfers, all the path is submitted as a separate job
job_params = _build_job_params(transfer_path[-1], multihop=True)
for transfer in transfer_path[:-1]:
hop_params = _build_job_params(transfer, multihop=True)
# Only allow overwrite if all transfers in multihop allow it
job_params['overwrite'] = hop_params['overwrite'] and job_params['overwrite']
# Activate bring_online if it was requested by first hop (it is a multihop starting at a tape)
# We don't allow multihop via a tape, so bring_online should not be set on any other hop
if transfer is transfer_path[0] and hop_params['bring_online']:
job_params['bring_online'] = hop_params['bring_online']
group_key = 'multihop_%s' % transfer_path[-1].rws.request_id
grouped_transfers[group_key] = {'transfers': transfer_path[0:group_bulk], 'job_params': job_params}
elif len(transfer_path[0].legacy_sources) > 1:
# for multi-source transfers, no bulk submission.
transfer = transfer_path[0]
grouped_jobs.append({'transfers': [transfer], 'job_params': _build_job_params(transfer)})
else:
# it's a single-hop, single-source, transfer. Hence, a candidate for bulk submission.
transfer = transfer_path[0]
job_params = _build_job_params(transfer)
# we cannot group transfers together if their job_key differ
job_key = '%s,%s,%s,%s,%s,%s,%s,%s' % (job_params['verify_checksum'], job_params.get('spacetoken', None),
job_params['copy_pin_lifetime'],
job_params['bring_online'], job_params['job_metadata'],
job_params.get('source_spacetoken', None),
job_params['overwrite'], job_params['priority'])
if 'max_time_in_queue' in job_params:
job_key = job_key + ',%s' % job_params['max_time_in_queue']
# Additionally, we don't want to group transfers together if their policy_key differ
policy_key = ''
if policy == 'rule':
policy_key = '%s' % transfer.rws.rule_id
if policy == 'dest':
policy_key = '%s' % transfer.dst.rse.name
if policy == 'src_dest':
policy_key = '%s,%s' % (transfer.src.rse.name, transfer.dst.rse.name)
if policy == 'rule_src_dest':
policy_key = '%s,%s,%s' % (transfer.rws.rule_id, transfer.src.rse.name, transfer.dst.rse.name)
if policy == 'activity_dest':
policy_key = '%s %s' % (transfer.rws.activity, transfer.dst.rse.name)
policy_key = "_".join(policy_key.split(' '))
if policy == 'activity_src_dest':
policy_key = '%s %s %s' % (transfer.rws.activity, transfer.src.rse.name, transfer.dst.rse.name)
policy_key = "_".join(policy_key.split(' '))
# maybe here we need to hash the key if it's too long
group_key = "%s_%s" % (job_key, policy_key)
if group_key not in grouped_transfers:
grouped_transfers[group_key] = {'transfers': [], 'job_params': job_params}
grouped_transfers[group_key]['transfers'].append(transfer)
# split transfer groups to have at most group_bulk elements in each one
for group in grouped_transfers.values():
job_params = group['job_params']
for transfer_paths in chunks(group['transfers'], group_bulk):
grouped_jobs.append({'transfers': transfer_paths, 'job_params': job_params})
return grouped_jobs
class Fts3TransferStatusReport(TransferStatusReport):
supported_db_fields = [
'state',
'external_id',
'started_at',
'transferred_at',
'staging_started_at',
'staging_finished_at',
'source_rse_id',
'err_msg',
'attributes',
]
def __init__(self, external_host, request_id, request=None):
super().__init__(request_id, request=request)
self.external_host = external_host
# Initialized in child class constructors:
self._transfer_id = None
self._file_metadata = {}
self._multi_sources = None
self._src_url = None
self._dst_url = None
# Initialized in child class initialize():
self._reason = None
self._src_rse = None
# Supported db fields bellow:
self.state = None
self.external_id = None
self.started_at = None
self.transferred_at = None
self.staging_started_at = None
self.staging_finished_at = None
self.source_rse_id = None
self.err_msg = None
self.attributes = None
def __str__(self):
return f'Transfer {self._transfer_id} of {self._file_metadata["scope"]}:{self._file_metadata["name"]} ' \
f'{self._file_metadata["src_rse"]} --({self._file_metadata["request_id"]})-> {self._file_metadata["dst_rse"]}'
def initialize(self, session, logger=logging.log):
raise NotImplementedError(f"{self.__class__.__name__} is abstract and shouldn't be used directly")
def get_monitor_msg_fields(self, session, logger=logging.log):
self.ensure_initialized(session, logger)
fields = {
'transfer_link': self._transfer_link(),
'reason': self._reason,
'src-type': self._file_metadata.get('src_type'),
'src-rse': self._src_rse,
'src-url': self._src_url,
'dst-type': self._file_metadata.get('src_type'),
'dst-rse': self._file_metadata.get('dst_rse'),
'dst-url': self._dst_url,
'started_at': self.started_at,
'transferred_at': self.transferred_at,
}
return fields
def _transfer_link(self):
return '%s/fts3/ftsmon/#/job/%s' % (self.external_host.replace('8446', '8449'), self._transfer_id)
def _find_attribute_updates(self, request, new_state, reason, overwrite_corrupted_files):
attributes = None
if new_state == RequestState.FAILED and 'Destination file exists and overwrite is not enabled' in (reason or ''):
dst_file = self._file_metadata.get('dst_file', {})
if self._dst_file_set_and_file_corrupted(request, dst_file):
if overwrite_corrupted_files:
attributes = request['attributes']
attributes['overwrite'] = True
return attributes
def _find_used_source_rse(self, session, logger):
"""
For multi-source transfers, FTS has a choice between multiple sources.
Find which of the possible sources FTS actually used for the transfer.
"""
meta_rse_name = self._file_metadata.get('src_rse', None)
meta_rse_id = self._file_metadata.get('src_rse_id', None)
request_id = self._file_metadata.get('request_id', None)
if self._multi_sources and self._src_url:
rse_name, rse_id = get_source_rse(request_id, self._src_url, session=session)
if rse_name and rse_name != meta_rse_name:
logger(logging.DEBUG, 'Correct RSE: %s for source surl: %s' % (rse_name, self._src_url))
return rse_name, rse_id
return meta_rse_name, meta_rse_id
@staticmethod
def _dst_file_set_and_file_corrupted(request, dst_file):
"""
Returns True if the `dst_file` dict returned by fts was filled and its content allows to
affirm that the file is corrupted.
"""
if (request and dst_file and (
dst_file.get('file_size') is not None and dst_file['file_size'] != request.get('bytes')
or dst_file.get('checksum_type', '').lower() == 'adler32' and dst_file.get('checksum_value') != request.get('adler32')
or dst_file.get('checksum_type', '').lower() == 'md5' and dst_file.get('checksum_value') != request.get('md5'))):
return True
return False
@staticmethod
def _dst_file_set_and_file_correct(request, dst_file):
"""
Returns True if the `dst_file` dict returned by fts was filled and its content allows to
affirm that the file is correct.
"""
if (request and dst_file
and dst_file.get('file_size')
and dst_file.get('file_size') == request.get('bytes')
and (dst_file.get('checksum_type', '').lower() == 'adler32' and dst_file.get('checksum_value') == request.get('adler32')
or dst_file.get('checksum_type', '').lower() == 'md5' and dst_file.get('checksum_value') == request.get('md5'))):
return True
return False
@classmethod
def _is_recoverable_fts_overwrite_error(cls, request, reason, file_metadata):
"""
Verify the special case when FTS cannot copy a file because destination exists and overwrite is disabled,
but the destination file is actually correct.
This can happen when some transitory error happened during a previous submission attempt.
Hence, the transfer is correctly executed by FTS, but rucio doesn't know about it.
Returns true when the request must be marked as successful even if it was reported failed by FTS.
"""
if not request or not file_metadata:
return False
dst_file = file_metadata.get('dst_file', {})
dst_type = file_metadata.get('dst_type', None)
if 'Destination file exists and overwrite is not enabled' in (reason or ''):
if cls._dst_file_set_and_file_correct(request, dst_file):
if dst_file.get('file_on_tape'):
return True
elif dst_type == 'DISK':
return True
return False
class FTS3CompletionMessageTransferStatusReport(Fts3TransferStatusReport):
"""
Parses FTS Completion messages received via the message queue
"""
def __init__(self, external_host, request_id, fts_message):
super().__init__(external_host=external_host, request_id=request_id)
self.fts_message = fts_message
self._transfer_id = fts_message.get('tr_id').split("__")[-1]
self._file_metadata = fts_message['file_metadata']
self._multi_sources = str(fts_message.get('job_metadata', {}).get('multi_sources', '')).lower() == str('true')
self._src_url = fts_message.get('src_url', None)
self._dst_url = fts_message.get('dst_url', None)
def initialize(self, session, logger=logging.log):
fts_message = self.fts_message
request_id = self.request_id
reason = fts_message.get('t__error_message', None)
# job_state = fts_message.get('t_final_transfer_state', None)
new_state = None
if str(fts_message['t_final_transfer_state']) == FTS_COMPLETE_STATE.OK: # pylint:disable=no-member
new_state = RequestState.DONE
elif str(fts_message['t_final_transfer_state']) == FTS_COMPLETE_STATE.ERROR:
request = self.request(session)
if self._is_recoverable_fts_overwrite_error(request, reason, self._file_metadata): # pylint:disable=no-member
new_state = RequestState.DONE
else:
new_state = RequestState.FAILED
transfer_id = self._transfer_id
if new_state:
request = self.request(session)
if request['external_id'] == transfer_id and request['state'] != new_state:
src_rse_name, src_rse_id = self._find_used_source_rse(session, logger)
self._reason = reason
self._src_rse = src_rse_name
self.state = new_state
self.external_id = transfer_id
self.started_at = datetime.datetime.utcfromtimestamp(float(fts_message.get('tr_timestamp_start', 0)) / 1000)
self.transferred_at = datetime.datetime.utcfromtimestamp(float(fts_message.get('tr_timestamp_complete', 0)) / 1000)
self.staging_started_at = None
self.staging_finished_at = None
self.source_rse_id = src_rse_id
self.err_msg = get_transfer_error(self.state, reason)
if self.err_msg and self._file_metadata.get('src_type') == "TAPE":
self.err_msg = '[TAPE SOURCE] ' + self.err_msg
self.attributes = self._find_attribute_updates(
request=request,
new_state=new_state,
reason=reason,
overwrite_corrupted_files=config_get_bool('transfers', 'overwrite_corrupted_files', default=False, session=session),
)
elif request['external_id'] != transfer_id:
logger(logging.WARNING, "Response %s with transfer id %s is different from the request transfer id %s, will not update" % (request_id, transfer_id, request['external_id']))
else:
logger(logging.DEBUG, "Request %s is already in %s state, will not update" % (request_id, new_state))
else:
logger(logging.DEBUG, "No state change computed for %s. Skipping request update." % request_id)
class FTS3ApiTransferStatusReport(Fts3TransferStatusReport):
"""
Parses FTS api response
"""
def __init__(self, external_host, request_id, job_response, file_response, request=None):
super().__init__(external_host=external_host, request_id=request_id, request=request)
self.job_response = job_response
self.file_response = file_response
self._transfer_id = job_response.get('job_id')
self._file_metadata = file_response['file_metadata']
self._multi_sources = str(job_response['job_metadata'].get('multi_sources', '')).lower() == str('true')
self._src_url = file_response.get('source_surl', None)
self._dst_url = file_response.get('dest_surl', None)
def initialize(self, session, logger=logging.log):
job_response = self.job_response
file_response = self.file_response
request_id = self.request_id
file_state = file_response['file_state']
reason = file_response.get('reason', None)
new_state = None
job_state = job_response.get('job_state', None)
multi_hop = job_response.get('job_type') == FTS_JOB_TYPE.MULTI_HOP
job_state_is_final = job_state in FINAL_FTS_JOB_STATES
file_state_is_final = file_state in FINAL_FTS_FILE_STATES
if file_state_is_final:
if file_state == FTS_STATE.FINISHED:
new_state = RequestState.DONE
elif job_state_is_final and file_state == FTS_STATE.FAILED \
and self._is_recoverable_fts_overwrite_error(self.request(session), reason, self._file_metadata):
new_state = RequestState.DONE
elif job_state_is_final and file_state in (FTS_STATE.FAILED, FTS_STATE.CANCELED):
new_state = RequestState.FAILED
elif job_state_is_final and file_state == FTS_STATE.NOT_USED:
if job_state == FTS_STATE.FINISHED:
# it is a multi-source transfer. This source wasn't used, but another one was successful
new_state = RequestState.DONE
else:
# failed multi-source or multi-hop (you cannot have unused sources in a successful multi-hop)
new_state = RequestState.FAILED
if not reason and multi_hop:
reason = 'Unused hop in multi-hop'
transfer_id = self._transfer_id
if new_state:
request = self.request(session)
if request['external_id'] == transfer_id and request['state'] != new_state:
src_rse_name, src_rse_id = self._find_used_source_rse(session, logger)
self._reason = reason
self._src_rse = src_rse_name
self.state = new_state
self.external_id = transfer_id
self.started_at = datetime.datetime.strptime(file_response['start_time'], '%Y-%m-%dT%H:%M:%S') if file_response['start_time'] else None
self.transferred_at = datetime.datetime.strptime(file_response['finish_time'], '%Y-%m-%dT%H:%M:%S') if file_response['finish_time'] else None
self.staging_started_at = datetime.datetime.strptime(file_response['staging_start'], '%Y-%m-%dT%H:%M:%S') if file_response['staging_start'] else None
self.staging_finished_at = datetime.datetime.strptime(file_response['staging_finished'], '%Y-%m-%dT%H:%M:%S') if file_response['staging_finished'] else None
self.source_rse_id = src_rse_id
self.err_msg = get_transfer_error(self.state, reason)
if self.err_msg and self._file_metadata.get('src_type') == "TAPE":
self.err_msg = '[TAPE SOURCE] ' + self.err_msg
self.attributes = self._find_attribute_updates(
request=request,
new_state=new_state,
reason=reason,
overwrite_corrupted_files=config_get_bool('transfers', 'overwrite_corrupted_files', default=False, session=session),
)
elif request['external_id'] != transfer_id:
logger(logging.WARNING, "Response %s with transfer id %s is different from the request transfer id %s, will not update" % (request_id, transfer_id, request['external_id']))
else:
logger(logging.DEBUG, "Request %s is already in %s state, will not update" % (request_id, new_state))
class FTS3Transfertool(Transfertool):
"""
FTS3 implementation of a Rucio transfertool
"""
external_name = 'fts3'
def __init__(self, external_host, oidc_account=None, vo=None, group_bulk=1, group_policy='rule', source_strategy=None,
max_time_in_queue=None, bring_online=43200, default_lifetime=172800, archive_timeout_override=None,
logger=logging.log):
"""
Initializes the transfertool
:param external_host: The external host where the transfertool API is running
:param oidc_account: optional oidc account to use for submission
"""
super().__init__(external_host, logger)
self.group_policy = group_policy
self.group_bulk = group_bulk
self.source_strategy = source_strategy
self.max_time_in_queue = max_time_in_queue or {}
self.bring_online = bring_online
self.default_lifetime = default_lifetime
self.archive_timeout_override = archive_timeout_override
usercert = config_get('conveyor', 'usercert', False, None)
if vo:
usercert = config_get('vo_certs', vo, False, usercert)
# token for OAuth 2.0 OIDC authorization scheme (working only with dCache + davs/https protocols as of Sep 2019)
self.token = None
if oidc_account:
getadmintoken = False
if ALLOW_USER_OIDC_TOKENS is False:
getadmintoken = True
self.logger(logging.DEBUG, 'Attempting to get a token for account %s. Admin token option set to %s' % (oidc_account, getadmintoken))
# find the appropriate OIDC token and exchange it (for user accounts) if necessary
token_dict = get_token_for_account_operation(oidc_account, req_audience=REQUEST_OIDC_AUDIENCE, req_scope=REQUEST_OIDC_SCOPE, admin=getadmintoken)
if token_dict is not None:
self.logger(logging.DEBUG, 'Access token has been granted.')
if 'token' in token_dict:
self.logger(logging.DEBUG, 'Access token used as transfer token.')
self.token = token_dict['token']
self.deterministic_id = config_get_bool('conveyor', 'use_deterministic_id', False, False)
self.headers = {'Content-Type': 'application/json'}
if self.external_host.startswith('https://'):
if self.token:
self.cert = None
self.verify = False
self.headers['Authorization'] = 'Bearer ' + self.token
else:
self.cert = (usercert, usercert)
self.verify = False
else:
self.cert = None
self.verify = True # True is the default setting of a requests.* method
@classmethod
def submission_builder_for_path(cls, transfer_path, logger=logging.log):
vo = None
if config_get_bool('common', 'multi_vo', False, None):
vo = transfer_path[-1].rws.scope.vo
sub_path = []
fts_hosts = []
for hop in transfer_path:
src_and_dst_have_fts_attribute = hop.src.rse.attributes.get('fts', None) is not None and hop.dst.rse.attributes.get('fts', None) is not None
hosts = hop.dst.rse.attributes.get('fts', None)
if hop.src.rse.attributes.get('sign_url', None) == 'gcs':
hosts = hop.src.rse.attributes.get('fts', None)
hosts = hosts.split(",") if hosts else []
if src_and_dst_have_fts_attribute and hosts:
fts_hosts = hosts
sub_path.append(hop)
else:
break
if len(sub_path) < len(transfer_path):
logger(logging.INFO, 'FTS3Transfertool can only submit {} hops from {}'.format(len(sub_path), [str(hop) for hop in transfer_path]))
if sub_path:
oidc_account = None
if all(oidc_supported(t) for t in sub_path):
logger(logging.DEBUG, 'OAuth2/OIDC available for transfer {}'.format([str(hop) for hop in sub_path]))
oidc_account = transfer_path[-1].rws.account
return sub_path, TransferToolBuilder(cls, external_host=fts_hosts[0], oidc_account=oidc_account, vo=vo)
else:
return [], None
def group_into_submit_jobs(self, transfer_paths):
jobs = bulk_group_transfers(
transfer_paths,
policy=self.group_policy,
group_bulk=self.group_bulk,
source_strategy=self.source_strategy,
max_time_in_queue=self.max_time_in_queue,
bring_online=self.bring_online,
default_lifetime=self.default_lifetime,
archive_timeout_override=self.archive_timeout_override,
)
return jobs
@classmethod
def __file_from_transfer(cls, transfer, job_params):
rws = transfer.rws
t_file = {
'sources': [s[1] for s in transfer.legacy_sources],
'destinations': [transfer.dest_url],
'metadata': {
'request_id': rws.request_id,
'scope': rws.scope,
'name': rws.name,
'activity': rws.activity,
'request_type': rws.request_type,
'src_type': "TAPE" if transfer.src.rse.is_tape_or_staging_required() else 'DISK',
'dst_type': "TAPE" if transfer.dst.rse.is_tape() else 'DISK',
'src_rse': transfer.src.rse.name,
'dst_rse': transfer.dst.rse.name,
'src_rse_id': transfer.src.rse.id,
'dest_rse_id': transfer.dst.rse.id,
'filesize': rws.byte_count,
'md5': rws.md5,
'adler32': rws.adler32
},
'filesize': rws.byte_count,
'checksum': None,
'verify_checksum': job_params['verify_checksum'],
'selection_strategy': transfer['selection_strategy'],
'request_type': rws.request_type,
'activity': rws.activity
}
if t_file['verify_checksum'] != 'none':
set_checksum_value(t_file, transfer['checksums_to_use'])
return t_file
def submit(self, transfers, job_params, timeout=None):
"""
Submit transfers to FTS3 via JSON.
:param files: List of dictionaries describing the file transfers.
:param job_params: Dictionary containing key/value pairs, for all transfers.
:param timeout: Timeout in seconds.
:returns: FTS transfer identifier.
"""
start_time = time.time()
files = []
for transfer in transfers:
if isinstance(transfer, dict):
# Compatibility with scripts form /tools which directly use transfertools and pass a dict to it instead of transfer definitions
# TODO: ensure that those scripts are still used and get rid of this compatibility otherwise
files.append(transfer)
continue
files.append(self.__file_from_transfer(transfer, job_params))
# FTS3 expects 'davs' as the scheme identifier instead of https
for transfer_file in files:
if not transfer_file['sources'] or transfer_file['sources'] == []:
raise Exception('No sources defined')
new_src_urls = []
new_dst_urls = []
for url in transfer_file['sources']:
if url.startswith('https'):
new_src_urls.append(':'.join(['davs'] + url.split(':')[1:]))
else:
new_src_urls.append(url)
for url in transfer_file['destinations']:
if url.startswith('https'):
new_dst_urls.append(':'.join(['davs'] + url.split(':')[1:]))
else:
new_dst_urls.append(url)
transfer_file['sources'] = new_src_urls
transfer_file['destinations'] = new_dst_urls
transfer_id = None
expected_transfer_id = None
if self.deterministic_id:
job_params = job_params.copy()
job_params["id_generator"] = "deterministic"
job_params["sid"] = files[0]['metadata']['request_id']
expected_transfer_id = self.__get_deterministic_id(job_params["sid"])
self.logger(logging.DEBUG, "Submit bulk transfers in deterministic mode, sid %s, expected transfer id: %s", job_params["sid"], expected_transfer_id)
# bulk submission
params_dict = {'files': files, 'params': job_params}
params_str = json.dumps(params_dict, cls=APIEncoder)
post_result = None
try:
start_time = time.time()
post_result = requests.post('%s/jobs' % self.external_host,
verify=self.verify,
cert=self.cert,
data=params_str,
headers=self.headers,
timeout=timeout)
labels = {'host': self.__extract_host(self.external_host)}
record_timer('transfertool.fts3.submit_transfer.{host}', (time.time() - start_time) * 1000 / len(files), labels=labels)
except ReadTimeout as error:
raise TransferToolTimeout(error)
except JSONDecodeError as error:
raise TransferToolWrongAnswer(error)
except Exception as error:
self.logger(logging.WARNING, 'Could not submit transfer to %s - %s' % (self.external_host, str(error)))
if post_result and post_result.status_code == 200:
SUBMISSION_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc(len(files))
transfer_id = str(post_result.json()['job_id'])
elif post_result and post_result.status_code == 409:
SUBMISSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc(len(files))
raise DuplicateFileTransferSubmission()
else:
if expected_transfer_id:
transfer_id = expected_transfer_id
self.logger(logging.WARNING, "Failed to submit transfer to %s, will use expected transfer id %s, error: %s", self.external_host, transfer_id, post_result.text if post_result is not None else post_result)
else:
self.logger(logging.WARNING, "Failed to submit transfer to %s, error: %s", self.external_host, post_result.text if post_result is not None else post_result)
SUBMISSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc(len(files))
if not transfer_id:
raise TransferToolWrongAnswer('No transfer id returned by %s' % self.external_host)
record_timer('core.request.submit_transfers_fts3', (time.time() - start_time) * 1000 / len(transfers))
return transfer_id
def cancel(self, transfer_ids, timeout=None):
"""
Cancel transfers that have been submitted to FTS3.
:param transfer_ids: FTS transfer identifiers as list of strings.
:param timeout: Timeout in seconds.
:returns: True if cancellation was successful.
"""
if len(transfer_ids) > 1:
raise NotImplementedError('Bulk cancelling not implemented')
transfer_id = transfer_ids[0]
job = None
job = requests.delete('%s/jobs/%s' % (self.external_host, transfer_id),
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=timeout)
if job and job.status_code == 200:
CANCEL_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return job.json()
CANCEL_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
raise Exception('Could not cancel transfer: %s', job.content)
def update_priority(self, transfer_id, priority, timeout=None):
"""
Update the priority of a transfer that has been submitted to FTS via JSON.
:param transfer_id: FTS transfer identifier as a string.
:param priority: FTS job priority as an integer from 1 to 5.
:param timeout: Timeout in seconds.
:returns: True if update was successful.
"""
job = None
params_dict = {"params": {"priority": priority}}
params_str = json.dumps(params_dict, cls=APIEncoder)
job = requests.post('%s/jobs/%s' % (self.external_host, transfer_id),
verify=self.verify,
data=params_str,
cert=self.cert,
headers=self.headers,
timeout=timeout) # TODO set to 3 in conveyor
if job and job.status_code == 200:
UPDATE_PRIORITY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return job.json()
UPDATE_PRIORITY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
raise Exception('Could not update priority of transfer: %s', job.content)
def query(self, transfer_ids, details=False, timeout=None):
"""
Query the status of a transfer in FTS3 via JSON.
:param transfer_ids: FTS transfer identifiers as list of strings.
:param details: Switch if detailed information should be listed.
:param timeout: Timeout in seconds.
:returns: Transfer status information as a list of dictionaries.
"""
if len(transfer_ids) > 1:
raise NotImplementedError('FTS3 transfertool query not bulk ready')
transfer_id = transfer_ids[0]
if details:
return self.__query_details(transfer_id=transfer_id)
job = None
job = requests.get('%s/jobs/%s' % (self.external_host, transfer_id),
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=timeout) # TODO Set to 5 in conveyor
if job and job.status_code == 200:
QUERY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return [job.json()]
QUERY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
raise Exception('Could not retrieve transfer information: %s', job.content)
# Public methods, not part of the common interface specification (FTS3 specific)
def whoami(self):
"""
Returns credential information from the FTS3 server.
:returns: Credentials as stored by the FTS3 server as a dictionary.
"""
get_result = None
get_result = requests.get('%s/whoami' % self.external_host,
verify=self.verify,
cert=self.cert,
headers=self.headers)
if get_result and get_result.status_code == 200:
WHOAMI_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return get_result.json()
WHOAMI_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
raise Exception('Could not retrieve credentials: %s', get_result.content)
def version(self):
"""
Returns FTS3 server information.
:returns: FTS3 server information as a dictionary.
"""
get_result = None
get_result = requests.get('%s/' % self.external_host,
verify=self.verify,
cert=self.cert,
headers=self.headers)
if get_result and get_result.status_code == 200:
VERSION_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return get_result.json()
VERSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
raise Exception('Could not retrieve version: %s', get_result.content)
def bulk_query(self, requests_by_eid, timeout=None):
"""
Query the status of a bulk of transfers in FTS3 via JSON.
:param requests_by_eid: dictionary {external_id1: {request_id1: request1, ...}, ...} of request to be queried
:returns: Transfer status information as a dictionary.
"""
responses = {}
fts_session = requests.Session()
xfer_ids = ','.join(requests_by_eid)
jobs = fts_session.get('%s/jobs/%s?files=file_state,dest_surl,finish_time,start_time,staging_start,staging_finished,reason,source_surl,file_metadata' % (self.external_host, xfer_ids),
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=timeout)
if jobs is None:
record_counter('transfertool.fts3.{host}.bulk_query.failure', labels={'host': self.__extract_host(self.external_host)})
for transfer_id in requests_by_eid:
responses[transfer_id] = Exception('Transfer information returns None: %s' % jobs)
elif jobs.status_code in (200, 207, 404):
try:
BULK_QUERY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
jobs_response = jobs.json()
responses = self.__bulk_query_responses(jobs_response, requests_by_eid)
except ReadTimeout as error:
raise TransferToolTimeout(error)
except JSONDecodeError as error:
raise TransferToolWrongAnswer(error)
except Exception as error:
raise Exception("Failed to parse the job response: %s, error: %s" % (str(jobs), str(error)))
else:
BULK_QUERY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
for transfer_id in requests_by_eid:
responses[transfer_id] = Exception('Could not retrieve transfer information: %s', jobs.content)
return responses
def list_se_status(self):
"""
Get the list of banned Storage Elements.
:returns: Detailed dictionnary of banned Storage Elements.
"""
try:
result = requests.get('%s/ban/se' % self.external_host,
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=None)
except Exception as error:
raise Exception('Could not retrieve transfer information: %s', error)
if result and result.status_code == 200:
return result.json()
raise Exception('Could not retrieve transfer information: %s', result.content)
def get_se_config(self, storage_element):
"""
Get the Json response for the configuration of a storage element.
:returns: a Json result for the configuration of a storage element.
:param storage_element: the storage element you want the configuration for.
"""
try:
result = requests.get('%s/config/se' % (self.external_host),
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=None)
except Exception:
self.logger(logging.WARNING, 'Could not get config of %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
if result and result.status_code == 200:
C = result.json()
config_se = C[storage_element]
return config_se
raise Exception('Could not get the configuration of %s , status code returned : %s', (storage_element, result.status_code if result else None))
def set_se_config(self, storage_element, inbound_max_active=None, outbound_max_active=None, inbound_max_throughput=None, outbound_max_throughput=None, staging=None):
"""
Set the configuration for a storage element. Used for alleviating transfer failures due to timeout.
:param storage_element: The storage element to be configured
:param inbound_max_active: the integer to set the inbound_max_active for the SE.
:param outbound_max_active: the integer to set the outbound_max_active for the SE.
:param inbound_max_throughput: the float to set the inbound_max_throughput for the SE.
:param outbound_max_throughput: the float to set the outbound_max_throughput for the SE.
:param staging: the integer to set the staging for the operation of a SE.
:returns: JSON post response in case of success, otherwise raise Exception.
"""
params_dict = {storage_element: {'operations': {}, 'se_info': {}}}
if staging is not None:
try:
policy = config_get('policy', 'permission')
except Exception:
self.logger(logging.WARNING, 'Could not get policy from config')
params_dict[storage_element]['operations'] = {policy: {'staging': staging}}
# A lot of try-excepts to avoid dictionary overwrite's,
# see https://stackoverflow.com/questions/27118687/updating-nested-dictionaries-when-data-has-existing-key/27118776
if inbound_max_active is not None:
try:
params_dict[storage_element]['se_info']['inbound_max_active'] = inbound_max_active
except KeyError:
params_dict[storage_element]['se_info'] = {'inbound_max_active': inbound_max_active}
if outbound_max_active is not None:
try:
params_dict[storage_element]['se_info']['outbound_max_active'] = outbound_max_active
except KeyError:
params_dict[storage_element]['se_info'] = {'outbound_max_active': outbound_max_active}
if inbound_max_throughput is not None:
try:
params_dict[storage_element]['se_info']['inbound_max_throughput'] = inbound_max_throughput
except KeyError:
params_dict[storage_element]['se_info'] = {'inbound_max_throughput': inbound_max_throughput}
if outbound_max_throughput is not None:
try:
params_dict[storage_element]['se_info']['outbound_max_throughput'] = outbound_max_throughput
except KeyError:
params_dict[storage_element]['se_info'] = {'outbound_max_throughput': outbound_max_throughput}
params_str = json.dumps(params_dict, cls=APIEncoder)
try:
result = requests.post('%s/config/se' % (self.external_host),
verify=self.verify,
cert=self.cert,
data=params_str,
headers=self.headers,
timeout=None)
except Exception:
self.logger(logging.WARNING, 'Could not set the config of %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
if result and result.status_code == 200:
configSe = result.json()
return configSe
raise Exception('Could not set the configuration of %s , status code returned : %s', (storage_element, result.status_code if result else None))
def set_se_status(self, storage_element, message, ban=True, timeout=None):
"""
Ban a Storage Element. Used when a site is in downtime.
One can use a timeout in seconds. In that case the jobs will wait before being cancel.
If no timeout is specified, the jobs are canceled immediately
:param storage_element: The Storage Element that will be banned.
:param message: The reason of the ban.
:param ban: Boolean. If set to True, ban the SE, if set to False unban the SE.
:param timeout: if None, send to FTS status 'cancel' else 'waiting' + the corresponding timeout.
:returns: 0 in case of success, otherwise raise Exception
"""
params_dict = {'storage': storage_element, 'message': message}
status = 'CANCEL'
if timeout:
params_dict['timeout'] = timeout
status = 'WAIT'
params_dict['status'] = status
params_str = json.dumps(params_dict, cls=APIEncoder)
result = None
if ban:
try:
result = requests.post('%s/ban/se' % self.external_host,
verify=self.verify,
cert=self.cert,
data=params_str,
headers=self.headers,
timeout=None)
except Exception:
self.logger(logging.WARNING, 'Could not ban %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
if result and result.status_code == 200:
return 0
raise Exception('Could not ban the storage %s , status code returned : %s', (storage_element, result.status_code if result else None))
else:
try:
result = requests.delete('%s/ban/se?storage=%s' % (self.external_host, storage_element),
verify=self.verify,
cert=self.cert,
data=params_str,
headers=self.headers,
timeout=None)
except Exception:
self.logger(logging.WARNING, 'Could not unban %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
if result and result.status_code == 204:
return 0
raise Exception('Could not unban the storage %s , status code returned : %s', (storage_element, result.status_code if result else None))
# Private methods unique to the FTS3 Transfertool
@staticmethod
def __extract_host(external_host):
# graphite does not like the dots in the FQDN
return urlparse(external_host).hostname.replace('.', '_')
def __get_transfer_baseid_voname(self):
"""
Get transfer VO name from the external host.
:returns base id as a string and VO name as a string.
"""
result = (None, None)
try:
key = 'voname: %s' % self.external_host
result = REGION_SHORT.get(key)
if isinstance(result, NoValue):
self.logger(logging.DEBUG, "Refresh transfer baseid and voname for %s", self.external_host)
get_result = None
try:
get_result = requests.get('%s/whoami' % self.external_host,
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=5)
except ReadTimeout as error:
raise TransferToolTimeout(error)
except JSONDecodeError as error:
raise TransferToolWrongAnswer(error)
except Exception as error:
self.logger(logging.WARNING, 'Could not get baseid and voname from %s - %s' % (self.external_host, str(error)))
if get_result and get_result.status_code == 200:
baseid = str(get_result.json()['base_id'])
voname = str(get_result.json()['vos'][0])
result = (baseid, voname)
REGION_SHORT.set(key, result)
self.logger(logging.DEBUG, "Get baseid %s and voname %s from %s", baseid, voname, self.external_host)
else:
self.logger(logging.WARNING, "Failed to get baseid and voname from %s, error: %s", self.external_host, get_result.text if get_result is not None else get_result)
result = (None, None)
except Exception as error:
self.logger(logging.WARNING, "Failed to get baseid and voname from %s: %s" % (self.external_host, str(error)))
result = (None, None)
return result
def __get_deterministic_id(self, sid):
"""
Get deterministic FTS job id.
:param sid: FTS seed id.
:returns: FTS transfer identifier.
"""
baseid, voname = self.__get_transfer_baseid_voname()
if baseid is None or voname is None:
return None
root = uuid.UUID(baseid)
atlas = uuid.uuid5(root, voname)
jobid = uuid.uuid5(atlas, sid)
return str(jobid)
def __bulk_query_responses(self, jobs_response, requests_by_eid):
if not isinstance(jobs_response, list):
jobs_response = [jobs_response]
responses = {}
for job_response in jobs_response:
transfer_id = job_response['job_id']
if job_response['http_status'] == '200 Ok':
files_response = job_response['files']
multi_sources = job_response['job_metadata'].get('multi_sources', False)
if multi_sources and job_response['job_state'] not in [FTS_STATE.FAILED,
FTS_STATE.FINISHEDDIRTY,
FTS_STATE.CANCELED,
FTS_STATE.FINISHED]:
# multipe source replicas jobs is still running. should wait
responses[transfer_id] = {}
continue
resps = {}
for file_resp in files_response:
file_state = file_resp['file_state']
# for multiple source replicas jobs, the file_metadata(request_id) will be the same.
# The next used file will overwrite the current used one. Only the last used file will return.
if multi_sources and file_state == FTS_STATE.NOT_USED:
continue
request_id = file_resp['file_metadata']['request_id']
request = requests_by_eid.get(transfer_id, {}).get(request_id)
if request is not None:
resps[request_id] = FTS3ApiTransferStatusReport(self.external_host, request_id=request_id, request=request,
job_response=job_response, file_response=file_resp)
# multiple source replicas jobs and we found the successful one, it's the final state.
if multi_sources and file_state == FTS_STATE.FINISHED:
break
responses[transfer_id] = resps
elif job_response['http_status'] == '404 Not Found':
# Lost transfer
responses[transfer_id] = None
else:
responses[transfer_id] = Exception('Could not retrieve transfer information(http_status: %s, http_message: %s)' % (job_response['http_status'],
job_response['http_message'] if 'http_message' in job_response else None))
return responses
def __query_details(self, transfer_id):
"""
Query the detailed status of a transfer in FTS3 via JSON.
:param transfer_id: FTS transfer identifier as a string.
:returns: Detailed transfer status information as a dictionary.
"""
files = None
files = requests.get('%s/jobs/%s/files' % (self.external_host, transfer_id),
verify=self.verify,
cert=self.cert,
headers=self.headers,
timeout=5)
if files and (files.status_code == 200 or files.status_code == 207):
QUERY_DETAILS_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
return files.json()
QUERY_DETAILS_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
return
|
py | 7df82c9e8e6265fcc25c2c9783a2ffc6631dc352 | # This module contians a legacy API, an early approach to plans that
# was deprecated in v0.10.0. It will be removed in a future release. It should
# not be used. To build 'reusable' plans we now recommend `functools.partial`.
from bluesky import utils
from collections import defaultdict
# The code below adds no new logic, but it wraps the generators above in
# classes for an alternative interface that is more stateful.
from bluesky import preprocessors as bpp
from bluesky.plans import (count, list_scan, rel_list_scan, log_scan,
rel_scan, adaptive_scan, rel_adaptive_scan,
scan_nd, inner_product_scan, relative_inner_product_scan,
grid_scan, scan, tweak, spiral, spiral_fermat,
rel_spiral_fermat, rel_spiral, rel_log_scan,
rel_grid_scan)
class Plan(utils.Struct):
"""
This is a base class for wrapping plan generators in a stateful class.
To create a new sub-class you need to over-ride two things:
- an ``__init__`` method *or* a class level ``_fields`` attribute which is
used to construct the init signature via meta-class magic
- a ``_gen`` method, which should return a generator of Msg objects
The class provides:
- state stored in attributes that are used to re-generate a plan generator
with the same parameters
- a hook for adding "flyable" objects to a plan
- attributes for adding subscriptions and subscription factory functions
"""
subs = utils.Subs({})
sub_factories = utils.Subs({})
def __iter__(self):
"""
Return an iterable of messages.
"""
return self()
def __call__(self, **kwargs):
"""
Return an iterable of messages.
Any keyword arguments override present settings.
"""
subs = defaultdict(list)
utils.update_sub_lists(subs, self.subs)
utils.update_sub_lists(
subs, utils.apply_sub_factories(self.sub_factories, self))
flyers = getattr(self, 'flyers', [])
def cls_plan():
current_settings = {}
for key, val in kwargs.items():
current_settings[key] = getattr(self, key)
setattr(self, key, val)
try:
plan = self._gen()
plan = bpp.subs_wrapper(plan, subs)
plan = bpp.stage_wrapper(plan, flyers)
plan = bpp.fly_during_wrapper(plan, flyers)
return (yield from plan)
finally:
for key, val in current_settings.items():
setattr(self, key, val)
cls_plan.__name__ = self.__class__.__name__
return cls_plan()
def _gen(self):
"Subclasses override this to provide the main plan content."
yield from utils.censure_generator([])
PlanBase = Plan # back-compat
class Count(Plan):
_fields = ['detectors', 'num', 'delay']
__doc__ = count.__doc__
def __init__(self, detectors, num=1, delay=0, *, md=None):
self.detectors = detectors
self.num = num
self.delay = delay
self.flyers = []
self.md = md
def _gen(self):
return count(self.detectors, self.num, self.delay, md=self.md)
class ListScan(Plan):
_fields = ['detectors', 'motor', 'steps']
__doc__ = list_scan.__doc__
def _gen(self):
return list_scan(self.detectors, self.motor, self.steps,
md=self.md)
AbsListScanPlan = ListScan # back-compat
class RelativeListScan(Plan):
_fields = ['detectors', 'motor', 'steps']
__doc__ = rel_list_scan.__doc__
def _gen(self):
return rel_list_scan(self.detectors, self.motor, self.steps,
md=self.md)
DeltaListScanPlan = RelativeListScan # back-compat
class Scan(Plan):
_fields = ['detectors', 'motor', 'start', 'stop', 'num']
__doc__ = scan.__doc__
def _gen(self):
return scan(self.detectors, self.motor, self.start, self.stop,
self.num, md=self.md)
AbsScanPlan = Scan # back-compat
class LogScan(Plan):
_fields = ['detectors', 'motor', 'start', 'stop', 'num']
__doc__ = log_scan.__doc__
def _gen(self):
return log_scan(self.detectors, self.motor, self.start, self.stop,
self.num, md=self.md)
LogAbsScanPlan = LogScan # back-compat
class RelativeScan(Plan):
_fields = ['detectors', 'motor', 'start', 'stop', 'num']
__doc__ = rel_scan.__doc__
def _gen(self):
return rel_scan(self.detectors, self.motor, self.start, self.stop,
self.num, md=self.md)
DeltaScanPlan = RelativeScan # back-compat
class RelativeLogScan(Plan):
_fields = ['detectors', 'motor', 'start', 'stop', 'num']
__doc__ = rel_log_scan.__doc__
def _gen(self):
return rel_log_scan(self.detectors, self.motor, self.start,
self.stop, self.num, md=self.md)
LogDeltaScanPlan = RelativeLogScan # back-compat
class AdaptiveScan(Plan):
_fields = ['detectors', 'target_field', 'motor', 'start', 'stop',
'min_step', 'max_step', 'target_delta', 'backstep',
'threshold']
__doc__ = adaptive_scan.__doc__
def __init__(self, detectors, target_field, motor, start, stop,
min_step, max_step, target_delta, backstep,
threshold=0.8, *, md=None):
self.detectors = detectors
self.target_field = target_field
self.motor = motor
self.start = start
self.stop = stop
self.min_step = min_step
self.max_step = max_step
self.target_delta = target_delta
self.backstep = backstep
self.threshold = threshold
self.flyers = []
self.md = md
def _gen(self):
return adaptive_scan(self.detectors, self.target_field, self.motor,
self.start, self.stop, self.min_step,
self.max_step, self.target_delta,
self.backstep, self.threshold, md=self.md)
AdaptiveAbsScanPlan = AdaptiveScan # back-compat
class RelativeAdaptiveScan(AdaptiveAbsScanPlan):
__doc__ = rel_adaptive_scan.__doc__
def _gen(self):
return rel_adaptive_scan(self.detectors, self.target_field,
self.motor, self.start, self.stop,
self.min_step, self.max_step,
self.target_delta, self.backstep,
self.threshold, md=self.md)
AdaptiveDeltaScanPlan = RelativeAdaptiveScan # back-compat
class ScanND(PlanBase):
_fields = ['detectors', 'cycler']
__doc__ = scan_nd.__doc__
def _gen(self):
return scan_nd(self.detectors, self.cycler, md=self.md)
PlanND = ScanND # back-compat
class InnerProductScan(Plan):
__doc__ = inner_product_scan.__doc__
def __init__(self, detectors, num, *args, md=None):
self.detectors = detectors
self.num = num
self.args = args
self.flyers = []
self.md = md
def _gen(self):
return inner_product_scan(self.detectors, self.num, *self.args,
md=self.md)
InnerProductAbsScanPlan = InnerProductScan # back-compat
class RelativeInnerProductScan(InnerProductScan):
__doc__ = relative_inner_product_scan.__doc__
def _gen(self):
return relative_inner_product_scan(self.detectors, self.num,
*self.args, md=self.md)
InnerProductDeltaScanPlan = RelativeInnerProductScan # back-compat
class OuterProductScan(Plan):
__doc__ = grid_scan.__doc__
def __init__(self, detectors, *args, md=None):
self.detectors = detectors
self.args = args
self.flyers = []
self.md = md
def _gen(self):
return grid_scan(self.detectors, *self.args, md=self.md)
OuterProductAbsScanPlan = OuterProductScan # back-compat
class RelativeOuterProductScan(OuterProductScan):
__doc__ = rel_grid_scan.__doc__
def _gen(self):
return rel_grid_scan(self.detectors, *self.args,
md=self.md)
OuterProductDeltaScanPlan = RelativeOuterProductScan # back-compat
class Tweak(Plan):
_fields = ['detector', 'target_field', 'motor', 'step']
__doc__ = tweak.__doc__
def _gen(self):
return tweak(self.detector, self.target_field, self.motor, self.step,
md=self.md)
class SpiralScan(Plan):
_fields = ['detectors', 'x_motor', 'y_motor', 'x_start', 'y_start',
'x_range', 'y_range', 'dr', 'nth', 'tilt']
__doc__ = spiral.__doc__
def _gen(self):
return spiral(self.detectors, self.x_motor, self.y_motor, self.x_start,
self.y_start, self.x_range, self.y_range, self.dr,
self.nth, tilt=self.tilt, md=self.md)
class SpiralFermatScan(Plan):
_fields = ['detectors', 'x_motor', 'y_motor', 'x_start', 'y_start',
'x_range', 'y_range', 'dr', 'factor', 'tilt']
__doc__ = spiral_fermat.__doc__
def _gen(self):
return spiral_fermat(self.detectors, self.x_motor, self.y_motor,
self.x_start, self.y_start, self.x_range,
self.y_range, self.dr, self.factor,
tilt=self.tilt, md=self.md)
class RelativeSpiralScan(Plan):
_fields = ['detectors', 'x_motor', 'y_motor', 'x_range', 'y_range', 'dr',
'nth', 'tilt']
__doc__ = rel_spiral.__doc__
def _gen(self):
return rel_spiral(self.detectors, self.x_motor, self.y_motor,
self.x_range, self.y_range, self.dr, self.nth,
tilt=self.tilt, md=self.md)
class RelativeSpiralFermatScan(Plan):
_fields = ['detectors', 'x_motor', 'y_motor', 'x_range', 'y_range', 'dr',
'factor', 'tilt']
__doc__ = rel_spiral_fermat.__doc__
def _gen(self):
return rel_spiral_fermat(self.detectors, self.x_motor,
self.y_motor, self.x_range, self.y_range,
self.dr, self.factor, tilt=self.tilt,
md=self.md)
|
py | 7df82fcc21dd45d9eb5d1a4617e63f1f8154d8c4 | from fm_solver.translator.translator import Translator
from fm_solver.translator.mini_zinc_arithmetic_translator import (
MiniZincArithmeticTranslator,
)
from fm_solver.translator.xcsp3_arithmetic_translator import XCSP3ArithmeticTranslator
from fm_solver.translator.cnf_translator import CNFTranslator
__all__ = [
"Translator",
"MiniZincArithmeticTranslator",
"XCSP3ArithmeticTranslator",
"CNFTranslator",
]
|
py | 7df82fdaae1951c93bbe1760eb69fd54678a543f | from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
)
from .forms import ExpenditureFilterForm, ExpenditureForm
from .models import Category, Expenditure
def categories_list_view(request):
categories_with_related_count = Category.objects.add_related_count(
Category.objects.all(), Expenditure, "category", "o_count", cumulative=True
)
return render(
request, "budget/categories.html", {"categories": categories_with_related_count}
)
class ExpenditureList(LoginRequiredMixin, ListView):
model = Expenditure
paginate_by = 20
def get_queryset(self):
return Expenditure.objects.filter(user=self.request.user).order_by("-spent_at")
class ExpenditureDetailView(DetailView):
model = Expenditure
class ExpenditureCreateView(LoginRequiredMixin, CreateView):
model = Expenditure
form_class = ExpenditureForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class ExpenditureUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Expenditure
fields = ["value", "spent_at", "comment", "category"]
def get_form_class(self):
return ExpenditureForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
obj = self.get_object()
if self.request.user == obj.user:
return True
return False
class ExpenditureDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Expenditure
success_url = reverse_lazy("budget:expenditure_list_view")
def test_func(self):
obj = self.get_object()
if self.request.user == obj.user:
return True
return False
class ExpenditureDetailedList(LoginRequiredMixin, ListView):
model = Expenditure
paginate_by = 20
template_name = "budget/expenditure_list_detailed.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = ExpenditureFilterForm(
initial={"category": list(self.request.GET.getlist("category", []))}
)
return context
def get_queryset(self, *args, **kwargs):
qs = Expenditure.objects.filter(user=self.request.user).order_by("-spent_at")
get_data = self.request.GET
if "category" in get_data:
qs = qs.filter(category__in=get_data.getlist("category", []))
return qs.all()
|
py | 7df8305d83201c3894f74ef1ba2d4cd7929c0678 | # @Author: Manuel Rodriguez <valle>
# @Date: 10-Jun-2018
# @Email: [email protected]
# @Last modified by: valle
# @Last modified time: 10-Jun-2018
# @License: Apache license vesion 2.0
|
py | 7df830bd121bb3802d904902b3580aada9cb4d06 | import os
import argparse
import logging
import compreffor
from compreffor.pyCompressor import human_size
from fontTools.ttLib import TTFont
from fontTools.misc.loggingTools import configLogger
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
prog='compreffor',
description="FontTools Compreffor will take a CFF-flavored OpenType font "
"and automatically detect repeated routines and generate "
"subroutines to minimize the disk space needed to represent "
"a font.")
parser.add_argument("infile", metavar="INPUT",
help="path to the input font file")
parser.add_argument("outfile", nargs="?", metavar="OUTPUT",
help="path to the compressed file (default: "
"*.compressed.otf)")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="print more messages to stdout; use it multiple "
"times to increase the level of verbosity")
parser.add_argument("-c", "--check", action='store_true',
help="verify that the outputted font is valid and "
"functionally equivalent to the input")
parser.add_argument("-d", "--decompress", action="store_true",
help="decompress source before compressing (necessary if "
"there are subroutines in the source)")
parser.add_argument("-n", "--nrounds", type=int,
help="the number of iterations to run the algorithm"
" (default: 4)")
parser.add_argument("-m", "--max-subrs", type=int,
help="limit to the number of subroutines per INDEX "
" (default: 65533)")
parser.add_argument('--generate-cff', action='store_true',
help="Also save binary CFF table data as {INPUT}.cff")
parser.add_argument('--py', dest="method_python", action='store_true',
help="Use pure Python method, instead of C++ extension")
py_meth_group = parser.add_argument_group("options for pure Python method")
py_meth_group.add_argument("--chunk-ratio", type=float,
help="specify the percentage size of the "
"job chunks used for parallel processing "
"(0 < float <= 1; default: 0.1)")
py_meth_group.add_argument("-p", "--processes", type=int,
help="specify number of concurrent processes to "
"run. Use value 1 to perform operation serially "
"(default: 12)")
options = parser.parse_args(args)
kwargs = vars(options)
if options.method_python:
if options.processes is not None and options.processes < 1:
parser.error('argument --processes expects positive integer > 0')
if (options.chunk_ratio is not None
and not (0 < options.chunk_ratio <= 1)):
parser.error('argument --chunk-ratio expects float number 0 < n <= 1')
else:
for attr in ('chunk_ratio', 'processes'):
if getattr(options, attr):
opt = attr.replace('_', '-')
parser.error('argument --%s can only be used with --py (pure '
'Python) method' % opt)
else:
del kwargs[attr]
if options.outfile is None:
options.outfile = "%s.compressed%s" % os.path.splitext(options.infile)
return kwargs
def main(args=None):
log = compreffor.log
timer = compreffor.timer
timer.reset()
options = parse_arguments(args)
# consume kwargs that are not passed on to 'compress' function
infile = options.pop('infile')
outfile = options.pop('outfile')
decompress = options.pop('decompress')
generate_cff = options.pop('generate_cff')
check = options.pop('check')
verbose = options.pop('verbose')
if verbose == 1:
level = logging.INFO
elif verbose > 1:
level = logging.DEBUG
else:
level = logging.WARNING
configLogger(logger=log, level=level)
orig_size = os.path.getsize(infile)
font = TTFont(infile)
if decompress:
log.info("Decompressing font with FontTools Subsetter")
with timer("decompress the font"):
compreffor.decompress(font)
log.info("Compressing font through %s Compreffor",
"pure-Python" if options['method_python'] else "C++")
compreffor.compress(font, **options)
with timer("compile and save compressed font"):
font.save(outfile)
if generate_cff:
cff_file = os.path.splitext(outfile)[0] + ".cff"
with open(cff_file, 'wb') as f:
font['CFF '].cff.compile(f, None)
log.info("Saved CFF data to '%s'" % os.path.basename(cff_file))
if check:
log.info("Checking compression integrity and call depth")
passed = compreffor.check(infile, outfile)
if not passed:
return 1
comp_size = os.path.getsize(outfile)
log.info("Compressed to '%s' -- saved %s" %
(os.path.basename(outfile), human_size(orig_size - comp_size)))
log.debug("Total time: %gs", timer.time())
if __name__ == "__main__":
main()
|
py | 7df832ddd7038928398be71aaa03238b7f419fb7 | import sys
import os
import csv
import json
import pika
import boto3
from botocore.exceptions import ClientError
CHECKSUM_HEADERS = ['object_id', 'type', 'checksum']
CONTENTS_HEADERS = ['object_id', 'id' , 'name', 'drs_uri', 'type']
OBJECT_HEADERS = ['id','name','description','self_uri','size','created_time','updated_time','version','mime_type','aliases']
OBJECT_HEADERS_V2 = ['id','name','type','description','self_uri','size','created_time','updated_time','version','mime_type','aliases','bundle','dataset']
URL_HEADERS = ['object_id', 'type', 'access_url', 'region', 'headers', 'access_id']
csv.field_size_limit(sys.maxsize)
def write_or_append_csv(filename, DATA, headers):
"""Write DATA as CSV in filename"""
print(DATA)
with open(filename, mode='a', encoding='utf-8-sig') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
if (csvfile.tell()==0):
print("Writing headers")
writer.writeheader()
writer.writerow(DATA)
def pull_queue(channel, rabbitmq_queue, file, headers):
while True:
method_frame, header_frame, body = channel.basic_get(queue=rabbitmq_queue)
if method_frame:
try:
data_dict = json.loads(body)
write_or_append_csv(file,data_dict,headers)
channel.basic_ack(method_frame.delivery_tag)
except Exception as err:
print('Handling run-time error:', err)
channel.basic_nack(method_frame.delivery_tag)
raise err
else:
print("Message not found")
break
def create_s3_client():
session = boto3.session.Session()
s3_client = session.client(
service_name='s3',
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY'),
aws_secret_access_key=os.environ.get('AWS_SECRET_KEY'),
endpoint_url=os.environ.get('S3_ENDPOINT')
)
return s3_client
def upload_file(file_name, bucket, s3_client, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param s3_client: boto3 s3 client
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
try:
print(file_name)
print(bucket)
print(object_name)
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
print(e)
raise e
def main():
rabbitmq_url = os.environ.get('BROKER_URL')
object_queue = os.environ.get('OBJECT_QUEUE','object_queue')
checksums_queue = os.environ.get('CHECKSUMS_QUEUE','checksums_queue')
contents_queue = os.environ.get('CONTENTS_QUEUE','contents_queue')
access_methods_queue = os.environ.get('ACCESS_METHODS_QUEUE','access_methods_queue')
# Pulling each queue and forming csv with records
connection = pika.BlockingConnection(pika.URLParameters(rabbitmq_url))
channel = connection.channel()
indexer_version = os.environ.get('INDEXER_VERSION')
if (indexer_version == 'v2'):
pull_queue(channel, object_queue , '/data/objects.csv', OBJECT_HEADERS_V2)
else:
pull_queue(channel, object_queue , '/data/objects.csv', OBJECT_HEADERS)
pull_queue(channel, checksums_queue , '/data/checksums.csv', CHECKSUM_HEADERS)
pull_queue(channel, contents_queue , '/data/contents.csv', CONTENTS_HEADERS)
pull_queue(channel, access_methods_queue , '/data/access_methods.csv', URL_HEADERS)
connection.close()
# Pushing to object store for ingestion
s3_bucket = os.environ.get('S3_OUTPUT_BUCKET','rdsds-indexing')
s3_filepath = os.environ.get('S3_OUTPUT_FILEPATH','indexed_items/')
s3_client = create_s3_client()
upload_file(file_name='/data/objects.csv', bucket=s3_bucket, s3_client=s3_client,object_name=s3_filepath+'objects.csv')
upload_file(file_name='/data/checksums.csv', bucket=s3_bucket, s3_client=s3_client,object_name=s3_filepath+'checksums.csv')
upload_file(file_name='/data/contents.csv', bucket=s3_bucket, s3_client=s3_client,object_name=s3_filepath+'contents.csv')
upload_file(file_name='/data/access_methods.csv', bucket=s3_bucket, s3_client=s3_client,object_name=s3_filepath+'access_methods.csv')
if __name__ == "__main__":
main() |
py | 7df832fc41586f09ad8199baed7e2d2652d76d30 | from django.apps import AppConfig
class DefaultConfig(AppConfig):
name = '{{cookiecutter.project_name}}.{{cookiecutter.django_app}}'
|
py | 7df833d64933d7df1ecee442cae75aecfb655fb1 |
import snoop
def type_watch(source, value):
return 'type({})'.format(source), type(value)
snoop.install(
out='logs/snoop.log',
color = False,
prefix = 'snoop: ',
columns = ['time','file','function'],
watch_extras=[type_watch],
# builtins=False,
)
|
py | 7df834de4711a12496f9125631ec8537f619af41 | #from setuptools import setup
from distutils.core import setup
setup(
name = 'caterapp',
packages=['caterapp'],
version = '1.1', # Ideally should be same as your GitHub release tag varsion
description='A TCP based file sharing application',
author='Ravi Prakash',
author_email='[email protected]',
url='https://github.com/ravi-prakash1907/CaterApp', # github source url
download_url = 'https://github.com/ravi-prakash1907/CaterApp/archive/refs/tags/v0.1-alpha.tar.gz',
long_descp = 'README.md',
keywords = ['v0.1-alpha', 'caterapp'],
REQUIRED = ['cater','shareBox'],
classifiers=[
'Development Status :: 3 - Alpha', # "3 - Alpha", "4 - Beta" or "5 - Production/Stable" ---- the current state of this package
'License :: OSI Approved :: MIT License', # picking a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.8',
],
)
|
py | 7df8353536d31bfda235cfefe2d73597e7c47630 | #!/usr/bin/env python
"""
Usage:
process scripts/process.py
Assumptions
1) Always run from poetics directory
2) virtual env for frozen-pie in in the same folder as frozen pie
3) paths are hardcoded
"""
poetics_path = "/Users/harsha/yuti/poetics"
frozen_pie_path = "/Users/harsha/yuti/frozen-pie-latest"
poems_path = "/Users/harsha/yuti/poems"
errors_path = poems_path + "/errors"
github_url = "[email protected]:Facjure/poetics.git"
# get latest poems if any
import os
cmd = "cd " + poems_path + "; git pull origin master"
os.system(cmd)
os.system("rm except.log.md")
log = open("except.log.md", "a")
import sys
import re
import yaml
import StringIO
from glob import glob
from pipes import quote
from codecs import open
from datetime import datetime
from dateutil import tz
import cleaners
def split_file(poem_text):
match = re.split(u"\n---[ ]*\n", poem_text, flags=re.U | re.S)
yaml_text = match[0]
poem = match[1]
return yaml_text, poem
print """Processing poems"""
for txtfile in glob(poems_path + os.sep + "*.txt"):
try:
txtfile_name = os.path.basename(txtfile)
if not cleaners.is_clean_name(txtfile_name):
raise Exception("Filenames should have hyphens only. \"<authorLastName>-<first-five-words-of-title>.txt\". Use - for all special characters.")
text = open(txtfile, "r", "utf-8").read()
yaml_text, poem = split_file(text)
if len(poem) < 10:
raise Exception("Fault in process.py or Poem is too small")
yaml.load(StringIO.StringIO(yaml_text))
except Exception, error:
log.write("#### Error in \"" + txtfile + "\"\n" + str(error) + "\n\n")
cmd = "mv " + quote(txtfile) + " " + quote(errors_path)
print " " + cmd
os.system(cmd)
continue
print "Done"
log.close()
log = open("except.log.md", "r")
if len(log.readlines()) > 2:
log.close()
utc = datetime.utcnow()
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
utc = utc.replace(tzinfo=from_zone)
boston_time = utc.astimezone(to_zone)
print str(boston_time)
cmd = "mv except.log.md " + quote(errors_path + os.sep + "except.log.BostonTime." + str(boston_time) + ".md")
print " " + cmd
os.system(cmd)
os.chdir(poems_path)
os.system("git add -A; git commit -m 'BuildBot " + str(boston_time) + "'")
os.system("git push origin master")
os.chdir(frozen_pie_path)
os.system("./env/bin/python bake.py --config " + poetics_path + os.sep + "config.yml")
os.chdir(poetics_path)
os.system("mkdir deploy")
os.system("mv .build/index.html deploy/")
os.system("rm -rf .build")
os.system("git clone -b gh-pages " + github_url + " .build")
os.system("cp deploy/index.html .build/")
os.system("cd .build; git add index.html; git commit -m 'new deploy " + str(boston_time) + "'; git push --force origin gh-pages")
|
py | 7df8355244f2100aafae9020518a79cacd9b8c9c | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Csiszar f-Divergence and helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import monte_carlo
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal.reparameterization import FULLY_REPARAMETERIZED
from tensorflow_probability.python.stats.leave_one_out import log_soomean_exp
__all__ = [
'amari_alpha',
'arithmetic_geometric',
'chi_square',
'csiszar_vimco',
'dual_csiszar_function',
'jeffreys',
'jensen_shannon',
'kl_forward',
'kl_reverse',
'log1p_abs',
'modified_gan',
'monte_carlo_variational_loss',
'pearson',
'squared_hellinger',
'symmetrized_csiszar_function',
't_power',
'total_variation',
'triangular',
]
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.name_scope(name or 'amari_alpha'):
if tf.get_static_value(alpha) is None:
raise TypeError('Argument `alpha` cannot be `None` or `Tensor` type.')
if tf.get_static_value(self_normalized) is None:
raise TypeError(
'Argument `self_normalized` cannot be `None` or `Tensor` type.')
logu = tf.convert_to_tensor(logu, name='logu')
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = tf.exp(logu) * logu
else:
f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + tf.math.expm1(logu)
elif alpha == 1.:
return f - tf.math.expm1(logu)
else:
return f - tf.math.expm1(logu) / (alpha - 1.)
def kl_reverse(logu, self_normalized=False, name=None):
"""The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.name_scope(name or 'kl_reverse'):
return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
def kl_forward(logu, self_normalized=False, name=None):
"""The forward Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-forward Csiszar-function is:
```none
f(u) = u log(u) - (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, q]
```
The KL is "forward" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_forward_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.name_scope(name or 'kl_forward'):
return amari_alpha(logu, alpha=1., self_normalized=self_normalized)
def jensen_shannon(logu, self_normalized=False, name=None):
"""The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.name_scope(name or 'jensen_shannon'):
logu = tf.convert_to_tensor(logu, name='logu')
y = tf.nn.softplus(logu)
if self_normalized:
y -= np.log(2.)
# TODO(jvdillon): Maybe leverage the fact that:
# (x-sp(x))*exp(x) approx= expm1(-1.1x + 0.5) for x>12?
# Basically, take advantage of x approx= softplus(x) for x>>0.
return (logu - y) * tf.exp(logu) - y
def arithmetic_geometric(logu, self_normalized=False, name=None):
"""The Arithmetic-Geometric Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:
```none
f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
```
When `self_normalized = False` the `(1 + u) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[m, p] + KL[m, q]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Jensen-Shannon
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
arithmetic_geometric_of_u: `float`-like `Tensor` of the
Csiszar-function evaluated at `u = exp(logu)`.
"""
with tf.name_scope(name or 'arithmetic_geometric'):
logu = tf.convert_to_tensor(logu, name='logu')
y = tf.nn.softplus(logu) - 0.5 * logu
if self_normalized:
y -= np.log(2.)
return (1. + tf.exp(logu)) * y
def total_variation(logu, name=None):
"""The Total Variation Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Total-Variation Csiszar-function is:
```none
f(u) = 0.5 |u - 1|
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.name_scope(name or 'total_variation'):
logu = tf.convert_to_tensor(logu, name='logu')
return 0.5 * tf.abs(tf.math.expm1(logu))
def pearson(logu, name=None):
"""The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
"""
with tf.name_scope(name or 'pearson'):
logu = tf.convert_to_tensor(logu, name='logu')
return tf.square(tf.math.expm1(logu))
def squared_hellinger(logu, name=None):
"""The Squared-Hellinger Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Squared-Hellinger Csiszar-function is:
```none
f(u) = (sqrt(u) - 1)**2
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
squared_hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with tf.name_scope(name or 'squared_hellinger'):
logu = tf.convert_to_tensor(logu, name='logu')
return pearson(0.5 * logu)
def triangular(logu, name=None):
"""The Triangular Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Triangular Csiszar-function is:
```none
f(u) = (u - 1)**2 / (1 + u)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 'triangular'):
logu = tf.convert_to_tensor(logu, name='logu')
return pearson(logu) / (1. + tf.exp(logu))
def t_power(logu, t, self_normalized=False, name=None):
"""The T-Power Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the T-Power Csiszar-function is:
```none
f(u) = s [ u**t - 1 - t(u - 1) ]
s = { -1 0 < t < 1
{ +1 otherwise
```
When `self_normalized = False` the `- t(u - 1)` term is omitted.
This is similar to the `amari_alpha` Csiszar-function, with the associated
divergence being the same up to factors depending only on `t`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
t: `Tensor` of same `dtype` as `logu` and broadcastable shape.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 't_power'):
logu = tf.convert_to_tensor(logu, name='logu')
t = tf.convert_to_tensor(
t, dtype=dtype_util.base_dtype(logu.dtype), name='t')
fu = tf.math.expm1(t * logu)
if self_normalized:
fu = fu - t * tf.math.expm1(logu)
return tf.where((0 < t) & (t < 1), -fu, fu)
def log1p_abs(logu, name=None):
"""The log1p-abs Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Log1p-Abs Csiszar-function is:
```none
f(u) = u**(sign(u-1)) - 1
```
This function is so-named because it was invented from the following recipe.
Choose a convex function g such that g(0)=0 and solve for f:
```none
log(1 + f(u)) = g(log(u)).
<=>
f(u) = exp(g(log(u))) - 1
```
That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
is `log`-domain.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 'log1p_abs'):
logu = tf.convert_to_tensor(logu, name='logu')
return tf.math.expm1(tf.abs(logu))
def jeffreys(logu, name=None):
"""The Jeffreys Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Jeffreys Csiszar-function is:
```none
f(u) = 0.5 ( u log(u) - log(u) )
= 0.5 kl_forward + 0.5 kl_reverse
= symmetrized_csiszar_function(kl_reverse)
= symmetrized_csiszar_function(kl_forward)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 'jeffreys'):
logu = tf.convert_to_tensor(logu, name='logu')
return 0.5 * tf.math.expm1(logu) * logu
def chi_square(logu, name=None):
"""The chi-Square Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Chi-square Csiszar-function is:
```none
f(u) = u**2 - 1
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 'chi_square'):
logu = tf.convert_to_tensor(logu, name='logu')
return tf.math.expm1(2. * logu)
def modified_gan(logu, self_normalized=False, name=None):
"""The Modified-GAN Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the modified-GAN (Generative/Adversarial
Network) Csiszar-function is:
```none
f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
```
When `self_normalized = False` the `0.5 (u - 1)` is omitted.
The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
`self_normalized = False`).
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.name_scope(name or 'chi_square'):
logu = tf.convert_to_tensor(logu, name='logu')
y = tf.nn.softplus(logu) - logu
if self_normalized:
y += 0.5 * tf.math.expm1(logu)
return y
def dual_csiszar_function(logu, csiszar_function, name=None):
"""Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`.
"""
with tf.name_scope(name or 'dual_csiszar_function'):
return tf.exp(logu) * csiszar_function(-logu)
def symmetrized_csiszar_function(logu, csiszar_function, name=None):
"""Symmetrizes a Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The symmetrized Csiszar-function is defined as:
```none
f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
```
where `g` is some other Csiszar-function.
We say the function is "symmetrized" because:
```none
D_{f_g}[p, q] = D_{f_g}[q, p]
```
for all `p << >> q` (i.e., `support(p) = support(q)`).
There exists alternatives for symmetrizing a Csiszar-function. For example,
```none
f_g(u) = max(f(u), f^*(u)),
```
where `f^*` is the dual Csiszar-function, also implies a symmetric
f-Divergence.
Example:
When either of the following functions are symmetrized, we obtain the
Jensen-Shannon Csiszar-function, i.e.,
```none
g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
h(u) = log(4) + 2 u log(u / (1 + u))
```
implies,
```none
f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
= jensen_shannon(log(u)).
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the
symmetrization of `g` evaluated at `u = exp(logu)`.
"""
with tf.name_scope(name or 'symmetrized_csiszar_function'):
logu = tf.convert_to_tensor(logu, name='logu')
return 0.5 * (csiszar_function(logu) +
dual_csiszar_function(logu, csiszar_function))
def monte_carlo_variational_loss(target_log_prob_fn,
surrogate_posterior,
sample_size=1,
discrepancy_fn=kl_reverse,
use_reparameterization=None,
seed=None,
name=None):
"""Monte-Carlo approximation of an f-Divergence variational loss.
Variational losses measure the divergence between an unnormalized target
distribution `p` (provided via `target_log_prob_fn`) and a surrogate
distribution `q` (provided as `surrogate_posterior`). When the
target distribution is an unnormalized posterior from conditioning a model on
data, minimizing the loss with respect to the parameters of
`surrogate_posterior` performs approximate posterior inference.
This function defines divergences of the form
`E_q[discrepancy_fn(log p(z) - log q(z))]`, sometimes known as f-divergences
[1, 2]. In the special case `discrepancy_fn(logu) == -logu` (the default
`tfp.vi.kl_reverse`), this is the reverse Kullback-Liebler divergence
`KL[q||p]`, whose negation applied to an unnormalized `p` is the widely-used
evidence lower bound (ELBO) [3]. Other cases of interest available under
`tfp.vi` include the forward `KL[p||q]` (given by `tfp.vi.kl_forward(logu)
== exp(logu) * logu`), total variation distance, Amari
alpha-divergences, and [more](https://en.wikipedia.org/wiki/F-divergence).
Args:
target_log_prob_fn: Python callable that takes a set of `Tensor` arguments
and returns a `Tensor` log-density. Given
`q_sample = surrogate_posterior.sample(sample_size)`, this
will be called as `target_log_prob_fn(*q_sample)` if `q_sample` is a list
or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
It should support batched evaluation, i.e., should return a result of
shape `[sample_size]`.
surrogate_posterior: A `tfp.distributions.Distribution`
instance defining a variational posterior (could be a
`tfd.JointDistribution`). Crucially, the distribution's `log_prob` and
(if reparameterizeable) `sample` methods must directly invoke all ops
that generate gradients to the underlying variables. One way to ensure
this is to use `tfp.util.TransformedVariable` and/or
`tfp.util.DeferredTensor` to represent any parameters defined as
transformations of unconstrained variables, so that the transformations
execute at runtime instead of at distribution creation.
sample_size: Integer scalar number of Monte Carlo samples used to
approximate the variational divergence. Larger values may stabilize
the optimization, but at higher cost per step in time and memory.
Default value: `1`.
discrepancy_fn: Python `callable` representing a Csiszar `f` function in
in log-space. That is, `discrepancy_fn(log(u)) = f(u)`, where `f` is
convex in `u`.
Default value: `tfp.vi.kl_reverse`.
use_reparameterization: Python `bool`. When `None` (the default),
automatically set to:
`surrogate_posterior.reparameterization_type ==
tfd.FULLY_REPARAMETERIZED`. When `True` uses the standard Monte-Carlo
average. When `False` uses the score-gradient trick. (See above for
details.) When `False`, consider using `csiszar_vimco`.
seed: Python `int` seed for `surrogate_posterior.sample`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
monte_carlo_variational_loss: `float`-like `Tensor` Monte Carlo
approximation of the Csiszar f-Divergence.
Raises:
ValueError: if `surrogate_posterior` is not a reparameterized
distribution and `use_reparameterization = True`. A distribution is said
to be "reparameterized" when its samples are generated by transforming the
samples of another distribution that does not depend on the first
distribution's parameters. This property ensures the gradient with respect
to parameters is valid.
TypeError: if `target_log_prob_fn` is not a Python `callable`.
#### Csiszar f-divergences
A Csiszar function `f` is a convex function from `R^+` (the positive reals)
to `R`. The Csiszar f-Divergence is given by:
```none
D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]
~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),
where x_j ~iid q(X)
```
For example, `f = lambda u: -log(u)` recovers `KL[q||p]`, while `f =
lambda u: u * log(u)` recovers the forward `KL[p||q]`. These and other
functions are available in `tfp.vi`.
#### Tricks: Reparameterization and Score-Gradient
When q is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`
and `s_i = f(x_i), x_i ~iid q(X)`.
However, if q is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of unreparameterized distributions. In
this circumstance using the Score-Gradient trick results in an unbiased
gradient, i.e.,
```none
grad[ E_q[f(X)] ]
= grad[ int dx q(x) f(x) ]
= int dx grad[ q(x) f(x) ]
= int dx [ q'(x) f(x) + q(x) f'(x) ]
= int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]
= int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]
= E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]
```
Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is
usually preferable to set `use_reparameterization = True`.
#### Example Application:
The Csiszar f-Divergence is a useful framework for variational inference.
I.e., observe that,
```none
f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )
<= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]
:= D_f[p(x, Z), q(Z | x)]
```
The inequality follows from the fact that the "perspective" of `f`, i.e.,
`(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and
`t` is a real. Since the above framework includes the popular Evidence Lower
BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework
"Evidence Divergence Bound Optimization" (EDBO).
#### References:
[1]: https://en.wikipedia.org/wiki/F-divergence
[2]: Ali, Syed Mumtaz, and Samuel D. Silvey. "A general class of coefficients
of divergence of one distribution from another." Journal of the Royal
Statistical Society: Series B (Methodological) 28.1 (1966): 131-142.
[3]: Bishop, Christopher M. Pattern Recognition and Machine Learning.
Springer, 2006.
"""
with tf.name_scope(name or 'monte_carlo_variational_loss'):
reparameterization_types = tf.nest.flatten(
surrogate_posterior.reparameterization_type)
if use_reparameterization is None:
use_reparameterization = all(
reparameterization_type == FULLY_REPARAMETERIZED
for reparameterization_type in reparameterization_types)
elif (use_reparameterization and
any(reparameterization_type != FULLY_REPARAMETERIZED
for reparameterization_type in reparameterization_types)):
# TODO(jvdillon): Consider only raising an exception if the gradient is
# requested.
raise ValueError(
'Distribution `surrogate_posterior` must be reparameterized, i.e.,'
'a diffeomorphic transformation of a parameterless distribution. '
'(Otherwise this function has a biased gradient.)')
if not callable(target_log_prob_fn):
raise TypeError('`target_log_prob_fn` must be a Python `callable`'
'function.')
def divergence_fn(q_samples, q_lp=None):
target_log_prob = nest_util.call_fn(target_log_prob_fn, q_samples)
if q_lp is None:
q_lp = surrogate_posterior.log_prob(q_samples)
return discrepancy_fn(target_log_prob - q_lp)
if use_reparameterization:
# Attempt to avoid bijector inverses by computing the surrogate log prob
# during the forward sampling pass.
q_samples, q_lp = surrogate_posterior.experimental_sample_and_log_prob(
sample_size, seed=seed)
divergence_fn = functools.partial(divergence_fn, q_lp=q_lp)
else:
# Score fn objective requires explicit gradients of `log_prob`.
q_samples = surrogate_posterior.sample(sample_size, seed=seed)
return monte_carlo.expectation(
f=divergence_fn,
samples=q_samples,
# Log-prob is only used if use_reparameterization=False.
log_prob=surrogate_posterior.log_prob,
use_reparameterization=use_reparameterization)
def csiszar_vimco(f,
p_log_prob,
q,
num_draws,
num_batch_draws=1,
seed=None,
name=None):
"""Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))].
This function generalizes VIMCO [(Mnih and Rezende, 2016)][1] to Csiszar
f-Divergences.
Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`,
consider using `monte_carlo_csiszar_f_divergence`.
The VIMCO loss is:
```none
vimco = f(Avg{logu[i] : i=0,...,m-1})
where,
logu[i] = log( p(x, h[i]) / q(h[i] | x) )
h[i] iid~ q(H | x)
```
Interestingly, the VIMCO gradient is not the naive gradient of `vimco`.
Rather, it is characterized by:
```none
grad[vimco] - variance_reducing_term
where,
variance_reducing_term = Sum{ grad[log q(h[i] | x)] *
(vimco - f(log Avg{h[j;i] : j=0,...,m-1}))
: i=0, ..., m-1 }
h[j;i] = { u[j] j!=i
{ GeometricAverage{ u[k] : k!=i} j==i
```
(We omitted `stop_gradient` for brevity. See implementation for more details.)
The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th
element has been replaced by the leave-`i`-out Geometric-average.
This implementation prefers numerical precision over efficiency, i.e.,
`O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`.
(The constant may be fairly large, perhaps around 12.)
Args:
f: Python `callable` representing a Csiszar-function in log-space.
p_log_prob: Python `callable` representing the natural-log of the
probability under distribution `p`. (In variational inference `p` is the
joint distribution.)
q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and
`log_prob(x)`. (In variational inference `q` is the approximate posterior
distribution.)
num_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
num_batch_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
seed: Python `int` seed for `q.sample`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
vimco: The Csiszar f-Divergence generalized VIMCO objective.
Raises:
ValueError: if `num_draws < 2`.
#### References
[1]: Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo
objectives. In _International Conference on Machine Learning_, 2016.
https://arxiv.org/abs/1602.06725
"""
with tf.name_scope(name or 'csiszar_vimco'):
if num_draws < 2:
raise ValueError('Must specify num_draws > 1.')
stop = tf.stop_gradient # For readability.
q_sample = q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed)
x = tf.nest.map_structure(stop, q_sample)
logqx = q.log_prob(x)
logu = nest_util.call_fn(p_log_prob, x) - logqx
f_log_sooavg_u, f_log_avg_u = map(f, log_soomean_exp(logu, axis=0))
dotprod = tf.reduce_sum(
logqx * stop(f_log_avg_u - f_log_sooavg_u),
axis=0) # Sum over iid samples.
# We now rewrite f_log_avg_u so that:
# `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`.
# To achieve this, we use a trick that
# `f(x) - stop(f(x)) == zeros_like(f(x))`
# but its gradient is grad[f(x)].
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the relevant
# portions of the IEEE754 standard, see the StackOverflow question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
# Following is same as adding zeros_like(dot_prod).
f_log_avg_u = f_log_avg_u + dotprod - stop(dotprod)
return tf.reduce_mean(f_log_avg_u, axis=0) # Avg over batches.
|
py | 7df835959c63b88f998fd74ef3cc0d8795f98bd1 | while input("y or n?") != "n":
for i in range(1, 6):
print(i)
|
py | 7df835d7c50beaa2fc6630ab7d49b2c976a0d811 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-03-03 00:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gdb', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='occurrence',
old_name='specimen_number',
new_name='catalog_number',
),
]
|
py | 7df83649d0a1cb3262030d7520306fc43fc038d7 | ## @file
# Trim files preprocessed by compiler
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import re
from optparse import OptionParser
from optparse import make_option
from Common.BuildToolError import *
from Common.Misc import *
from Common.BuildVersion import gBUILD_VERSION
import Common.EdkLogger as EdkLogger
from Common.LongFilePathSupport import OpenLongFilePath as open
# Version and Copyright
__version_number__ = ("0.10" + " " + gBUILD_VERSION)
__version__ = "%prog Version " + __version_number__
__copyright__ = "Copyright (c) 2007-2010, Intel Corporation. All rights reserved."
## Regular expression for matching Line Control directive like "#line xxx"
gLineControlDirective = re.compile('^\s*#(?:line)?\s+([0-9]+)\s+"*([^"]*)"')
## Regular expression for matching "typedef struct"
gTypedefPattern = re.compile("^\s*typedef\s+struct(\s+\w+)?\s*[{]*$", re.MULTILINE)
## Regular expression for matching "#pragma pack"
gPragmaPattern = re.compile("^\s*#pragma\s+pack", re.MULTILINE)
#
# The following number pattern match will only match if following criteria is met:
# There is leading non-(alphanumeric or _) character, and no following alphanumeric or _
# as the pattern is greedily match, so it is ok for the gDecNumberPattern or gHexNumberPattern to grab the maximum match
#
## Regular expression for matching HEX number
gHexNumberPattern = re.compile("(?<=[^a-zA-Z0-9_])(0[xX])([0-9a-fA-F]+)(U(?=$|[^a-zA-Z0-9_]))?")
## Regular expression for matching decimal number with 'U' postfix
gDecNumberPattern = re.compile("(?<=[^a-zA-Z0-9_])([0-9]+)U(?=$|[^a-zA-Z0-9_])")
## Regular expression for matching constant with 'ULL' 'LL' postfix
gLongNumberPattern = re.compile("(?<=[^a-zA-Z0-9_])(0[xX][0-9a-fA-F]+|[0-9]+)U?LL(?=$|[^a-zA-Z0-9_])")
## Regular expression for matching "Include ()" in asl file
gAslIncludePattern = re.compile("^(\s*)[iI]nclude\s*\(\"?([^\"\(\)]+)\"\)", re.MULTILINE)
## Regular expression for matching C style #include "XXX.asl" in asl file
gAslCIncludePattern = re.compile(r'^(\s*)#include\s*[<"]\s*([-\\/\w.]+)\s*([>"])', re.MULTILINE)
## Patterns used to convert EDK conventions to EDK2 ECP conventions
gImportCodePatterns = [
[
re.compile('^(\s*)\(\*\*PeiServices\)\.PciCfg\s*=\s*([^;\s]+);', re.MULTILINE),
'''\\1{
\\1 STATIC EFI_PEI_PPI_DESCRIPTOR gEcpPeiPciCfgPpiList = {
\\1 (EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
\\1 &gEcpPeiPciCfgPpiGuid,
\\1 \\2
\\1 };
\\1 (**PeiServices).InstallPpi (PeiServices, &gEcpPeiPciCfgPpiList);
\\1}'''
],
[
re.compile('^(\s*)\(\*PeiServices\)->PciCfg\s*=\s*([^;\s]+);', re.MULTILINE),
'''\\1{
\\1 STATIC EFI_PEI_PPI_DESCRIPTOR gEcpPeiPciCfgPpiList = {
\\1 (EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
\\1 &gEcpPeiPciCfgPpiGuid,
\\1 \\2
\\1 };
\\1 (**PeiServices).InstallPpi (PeiServices, &gEcpPeiPciCfgPpiList);
\\1}'''
],
[
re.compile("(\s*).+->Modify[\s\n]*\(", re.MULTILINE),
'\\1PeiLibPciCfgModify ('
],
[
re.compile("(\W*)gRT->ReportStatusCode[\s\n]*\(", re.MULTILINE),
'\\1EfiLibReportStatusCode ('
],
[
re.compile('#include\s+EFI_GUID_DEFINITION\s*\(FirmwareFileSystem\)', re.MULTILINE),
'#include EFI_GUID_DEFINITION (FirmwareFileSystem)\n#include EFI_GUID_DEFINITION (FirmwareFileSystem2)'
],
[
re.compile('gEfiFirmwareFileSystemGuid', re.MULTILINE),
'gEfiFirmwareFileSystem2Guid'
],
[
re.compile('EFI_FVH_REVISION', re.MULTILINE),
'EFI_FVH_PI_REVISION'
],
[
re.compile("(\s*)\S*CreateEvent\s*\([\s\n]*EFI_EVENT_SIGNAL_READY_TO_BOOT[^,]*,((?:[^;]+\n)+)(\s*\));", re.MULTILINE),
'\\1EfiCreateEventReadyToBoot (\\2\\3;'
],
[
re.compile("(\s*)\S*CreateEvent\s*\([\s\n]*EFI_EVENT_SIGNAL_LEGACY_BOOT[^,]*,((?:[^;]+\n)+)(\s*\));", re.MULTILINE),
'\\1EfiCreateEventLegacyBoot (\\2\\3;'
],
# [
# re.compile("(\W)(PEI_PCI_CFG_PPI)(\W)", re.MULTILINE),
# '\\1ECP_\\2\\3'
# ]
]
## file cache to avoid circular include in ASL file
gIncludedAslFile = []
## Trim preprocessed source code
#
# Remove extra content made by preprocessor. The preprocessor must enable the
# line number generation option when preprocessing.
#
# @param Source File to be trimmed
# @param Target File to store the trimmed content
# @param Convert If True, convert standard HEX format to MASM format
#
def TrimPreprocessedFile(Source, Target, ConvertHex, TrimLong):
CreateDirectory(os.path.dirname(Target))
try:
f = open (Source, 'r')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Source)
# read whole file
Lines = f.readlines()
f.close()
PreprocessedFile = ""
InjectedFile = ""
LineIndexOfOriginalFile = None
NewLines = []
LineControlDirectiveFound = False
for Index in range(len(Lines)):
Line = Lines[Index]
#
# Find out the name of files injected by preprocessor from the lines
# with Line Control directive
#
MatchList = gLineControlDirective.findall(Line)
if MatchList != []:
MatchList = MatchList[0]
if len(MatchList) == 2:
LineNumber = int(MatchList[0], 0)
InjectedFile = MatchList[1]
# The first injetcted file must be the preprocessed file itself
if PreprocessedFile == "":
PreprocessedFile = InjectedFile
LineControlDirectiveFound = True
continue
elif PreprocessedFile == "" or InjectedFile != PreprocessedFile:
continue
if LineIndexOfOriginalFile == None:
#
# Any non-empty lines must be from original preprocessed file.
# And this must be the first one.
#
LineIndexOfOriginalFile = Index
EdkLogger.verbose("Found original file content starting from line %d"
% (LineIndexOfOriginalFile + 1))
# convert HEX number format if indicated
if ConvertHex:
Line = gHexNumberPattern.sub(r"0\2h", Line)
else:
Line = gHexNumberPattern.sub(r"\1\2", Line)
if TrimLong:
Line = gLongNumberPattern.sub(r"\1", Line)
# convert Decimal number format
Line = gDecNumberPattern.sub(r"\1", Line)
if LineNumber != None:
EdkLogger.verbose("Got line directive: line=%d" % LineNumber)
# in case preprocessor removed some lines, like blank or comment lines
if LineNumber <= len(NewLines):
# possible?
NewLines[LineNumber - 1] = Line
else:
if LineNumber > (len(NewLines) + 1):
for LineIndex in range(len(NewLines), LineNumber-1):
NewLines.append(os.linesep)
NewLines.append(Line)
LineNumber = None
EdkLogger.verbose("Now we have lines: %d" % len(NewLines))
else:
NewLines.append(Line)
# in case there's no line directive or linemarker found
if (not LineControlDirectiveFound) and NewLines == []:
NewLines = Lines
# save to file
try:
f = open (Target, 'wb')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Target)
f.writelines(NewLines)
f.close()
## Trim preprocessed VFR file
#
# Remove extra content made by preprocessor. The preprocessor doesn't need to
# enable line number generation option when preprocessing.
#
# @param Source File to be trimmed
# @param Target File to store the trimmed content
#
def TrimPreprocessedVfr(Source, Target):
CreateDirectory(os.path.dirname(Target))
try:
f = open (Source,'r')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Source)
# read whole file
Lines = f.readlines()
f.close()
FoundTypedef = False
Brace = 0
TypedefStart = 0
TypedefEnd = 0
for Index in range(len(Lines)):
Line = Lines[Index]
# don't trim the lines from "formset" definition to the end of file
if Line.strip() == 'formset':
break
if FoundTypedef == False and (Line.find('#line') == 0 or Line.find('# ') == 0):
# empty the line number directive if it's not aomong "typedef struct"
Lines[Index] = "\n"
continue
if FoundTypedef == False and gTypedefPattern.search(Line) == None:
# keep "#pragram pack" directive
if gPragmaPattern.search(Line) == None:
Lines[Index] = "\n"
continue
elif FoundTypedef == False:
# found "typedef struct", keept its position and set a flag
FoundTypedef = True
TypedefStart = Index
# match { and } to find the end of typedef definition
if Line.find("{") >= 0:
Brace += 1
elif Line.find("}") >= 0:
Brace -= 1
# "typedef struct" must end with a ";"
if Brace == 0 and Line.find(";") >= 0:
FoundTypedef = False
TypedefEnd = Index
# keep all "typedef struct" except to GUID, EFI_PLABEL and PAL_CALL_RETURN
if Line.strip("} ;\r\n") in ["GUID", "EFI_PLABEL", "PAL_CALL_RETURN"]:
for i in range(TypedefStart, TypedefEnd+1):
Lines[i] = "\n"
# save all lines trimmed
try:
f = open (Target,'w')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Target)
f.writelines(Lines)
f.close()
## Read the content ASL file, including ASL included, recursively
#
# @param Source File to be read
# @param Indent Spaces before the Include() statement
# @param IncludePathList The list of external include file
# @param LocalSearchPath If LocalSearchPath is specified, this path will be searched
# first for the included file; otherwise, only the path specified
# in the IncludePathList will be searched.
#
def DoInclude(Source, Indent='', IncludePathList=[], LocalSearchPath=None):
NewFileContent = []
try:
#
# Search LocalSearchPath first if it is specified.
#
if LocalSearchPath:
SearchPathList = [LocalSearchPath] + IncludePathList
else:
SearchPathList = IncludePathList
for IncludePath in SearchPathList:
IncludeFile = os.path.join(IncludePath, Source)
if os.path.isfile(IncludeFile):
F = open(IncludeFile, "r")
break
else:
EdkLogger.error("Trim", "Failed to find include file %s" % Source)
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Source)
# avoid A "include" B and B "include" A
IncludeFile = os.path.abspath(os.path.normpath(IncludeFile))
if IncludeFile in gIncludedAslFile:
EdkLogger.warn("Trim", "Circular include",
ExtraData= "%s -> %s" % (" -> ".join(gIncludedAslFile), IncludeFile))
return []
gIncludedAslFile.append(IncludeFile)
for Line in F:
LocalSearchPath = None
Result = gAslIncludePattern.findall(Line)
if len(Result) == 0:
Result = gAslCIncludePattern.findall(Line)
if len(Result) == 0 or os.path.splitext(Result[0][1])[1].lower() not in [".asl", ".asi"]:
NewFileContent.append("%s%s" % (Indent, Line))
continue
#
# We should first search the local directory if current file are using pattern #include "XXX"
#
if Result[0][2] == '"':
LocalSearchPath = os.path.dirname(IncludeFile)
CurrentIndent = Indent + Result[0][0]
IncludedFile = Result[0][1]
NewFileContent.extend(DoInclude(IncludedFile, CurrentIndent, IncludePathList, LocalSearchPath))
NewFileContent.append("\n")
gIncludedAslFile.pop()
F.close()
return NewFileContent
## Trim ASL file
#
# Replace ASL include statement with the content the included file
#
# @param Source File to be trimmed
# @param Target File to store the trimmed content
# @param IncludePathFile The file to log the external include path
#
def TrimAslFile(Source, Target, IncludePathFile):
CreateDirectory(os.path.dirname(Target))
SourceDir = os.path.dirname(Source)
if SourceDir == '':
SourceDir = '.'
#
# Add source directory as the first search directory
#
IncludePathList = [SourceDir]
#
# If additional include path file is specified, append them all
# to the search directory list.
#
if IncludePathFile:
try:
LineNum = 0
for Line in open(IncludePathFile,'r'):
LineNum += 1
if Line.startswith("/I") or Line.startswith ("-I"):
IncludePathList.append(Line[2:].strip())
else:
EdkLogger.warn("Trim", "Invalid include line in include list file.", IncludePathFile, LineNum)
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=IncludePathFile)
Lines = DoInclude(Source, '', IncludePathList)
#
# Undef MIN and MAX to avoid collision in ASL source code
#
Lines.insert(0, "#undef MIN\n#undef MAX\n")
# save all lines trimmed
try:
f = open (Target,'w')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Target)
f.writelines(Lines)
f.close()
## Trim EDK source code file(s)
#
#
# @param Source File or directory to be trimmed
# @param Target File or directory to store the trimmed content
#
def TrimEdkSources(Source, Target):
if os.path.isdir(Source):
for CurrentDir, Dirs, Files in os.walk(Source):
if '.svn' in Dirs:
Dirs.remove('.svn')
elif "CVS" in Dirs:
Dirs.remove("CVS")
for FileName in Files:
Dummy, Ext = os.path.splitext(FileName)
if Ext.upper() not in ['.C', '.H']: continue
if Target == None or Target == '':
TrimEdkSourceCode(
os.path.join(CurrentDir, FileName),
os.path.join(CurrentDir, FileName)
)
else:
TrimEdkSourceCode(
os.path.join(CurrentDir, FileName),
os.path.join(Target, CurrentDir[len(Source)+1:], FileName)
)
else:
TrimEdkSourceCode(Source, Target)
## Trim one EDK source code file
#
# Do following replacement:
#
# (**PeiServices\).PciCfg = <*>;
# => {
# STATIC EFI_PEI_PPI_DESCRIPTOR gEcpPeiPciCfgPpiList = {
# (EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
# &gEcpPeiPciCfgPpiGuid,
# <*>
# };
# (**PeiServices).InstallPpi (PeiServices, &gEcpPeiPciCfgPpiList);
#
# <*>Modify(<*>)
# => PeiLibPciCfgModify (<*>)
#
# gRT->ReportStatusCode (<*>)
# => EfiLibReportStatusCode (<*>)
#
# #include <LoadFile\.h>
# => #include <FvLoadFile.h>
#
# CreateEvent (EFI_EVENT_SIGNAL_READY_TO_BOOT, <*>)
# => EfiCreateEventReadyToBoot (<*>)
#
# CreateEvent (EFI_EVENT_SIGNAL_LEGACY_BOOT, <*>)
# => EfiCreateEventLegacyBoot (<*>)
#
# @param Source File to be trimmed
# @param Target File to store the trimmed content
#
def TrimEdkSourceCode(Source, Target):
EdkLogger.verbose("\t%s -> %s" % (Source, Target))
CreateDirectory(os.path.dirname(Target))
try:
f = open (Source,'rb')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Source)
# read whole file
Lines = f.read()
f.close()
NewLines = None
for Re,Repl in gImportCodePatterns:
if NewLines == None:
NewLines = Re.sub(Repl, Lines)
else:
NewLines = Re.sub(Repl, NewLines)
# save all lines if trimmed
if Source == Target and NewLines == Lines:
return
try:
f = open (Target,'wb')
except:
EdkLogger.error("Trim", FILE_OPEN_FAILURE, ExtraData=Target)
f.write(NewLines)
f.close()
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Options A optparse.Values object containing the parsed options
# @retval InputFile Path of file to be trimmed
#
def Options():
OptionList = [
make_option("-s", "--source-code", dest="FileType", const="SourceCode", action="store_const",
help="The input file is preprocessed source code, including C or assembly code"),
make_option("-r", "--vfr-file", dest="FileType", const="Vfr", action="store_const",
help="The input file is preprocessed VFR file"),
make_option("-a", "--asl-file", dest="FileType", const="Asl", action="store_const",
help="The input file is ASL file"),
make_option("-8", "--Edk-source-code", dest="FileType", const="EdkSourceCode", action="store_const",
help="The input file is source code for Edk to be trimmed for ECP"),
make_option("-c", "--convert-hex", dest="ConvertHex", action="store_true",
help="Convert standard hex format (0xabcd) to MASM format (abcdh)"),
make_option("-l", "--trim-long", dest="TrimLong", action="store_true",
help="Remove postfix of long number"),
make_option("-i", "--include-path-file", dest="IncludePathFile",
help="The input file is include path list to search for ASL include file"),
make_option("-o", "--output", dest="OutputFile",
help="File to store the trimmed content"),
make_option("-v", "--verbose", dest="LogLevel", action="store_const", const=EdkLogger.VERBOSE,
help="Run verbosely"),
make_option("-d", "--debug", dest="LogLevel", type="int",
help="Run with debug information"),
make_option("-q", "--quiet", dest="LogLevel", action="store_const", const=EdkLogger.QUIET,
help="Run quietly"),
make_option("-?", action="help", help="show this help message and exit"),
]
# use clearer usage to override default usage message
UsageString = "%prog [-s|-r|-a] [-c] [-v|-d <debug_level>|-q] [-i <include_path_file>] [-o <output_file>] <input_file>"
Parser = OptionParser(description=__copyright__, version=__version__, option_list=OptionList, usage=UsageString)
Parser.set_defaults(FileType="Vfr")
Parser.set_defaults(ConvertHex=False)
Parser.set_defaults(LogLevel=EdkLogger.INFO)
Options, Args = Parser.parse_args()
# error check
if len(Args) == 0:
EdkLogger.error("Trim", OPTION_MISSING, ExtraData=Parser.get_usage())
if len(Args) > 1:
EdkLogger.error("Trim", OPTION_NOT_SUPPORTED, ExtraData=Parser.get_usage())
InputFile = Args[0]
return Options, InputFile
## Entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
try:
EdkLogger.Initialize()
CommandOptions, InputFile = Options()
if CommandOptions.LogLevel < EdkLogger.DEBUG_9:
EdkLogger.SetLevel(CommandOptions.LogLevel + 1)
else:
EdkLogger.SetLevel(CommandOptions.LogLevel)
except FatalError, X:
return 1
try:
if CommandOptions.FileType == "Vfr":
if CommandOptions.OutputFile == None:
CommandOptions.OutputFile = os.path.splitext(InputFile)[0] + '.iii'
TrimPreprocessedVfr(InputFile, CommandOptions.OutputFile)
elif CommandOptions.FileType == "Asl":
if CommandOptions.OutputFile == None:
CommandOptions.OutputFile = os.path.splitext(InputFile)[0] + '.iii'
TrimAslFile(InputFile, CommandOptions.OutputFile, CommandOptions.IncludePathFile)
elif CommandOptions.FileType == "EdkSourceCode":
TrimEdkSources(InputFile, CommandOptions.OutputFile)
else :
if CommandOptions.OutputFile == None:
CommandOptions.OutputFile = os.path.splitext(InputFile)[0] + '.iii'
TrimPreprocessedFile(InputFile, CommandOptions.OutputFile, CommandOptions.ConvertHex, CommandOptions.TrimLong)
except FatalError, X:
import platform
import traceback
if CommandOptions != None and CommandOptions.LogLevel <= EdkLogger.DEBUG_9:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
return 1
except:
import traceback
import platform
EdkLogger.error(
"\nTrim",
CODE_ERROR,
"Unknown fatal error when trimming [%s]" % InputFile,
ExtraData="\n(Please send email to [email protected] for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
return 1
return 0
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
py | 7df83671c104e22bdc7abda9e3e87af2d3059fc1 | # -*- coding: utf-8 -*-
"""
Tests for the CreateEvaluateWorkChain.
"""
# pylint: disable=unused-argument,redefined-outer-name
from aiida import orm
from aiida.plugins import WorkflowFactory
from aiida.engine.launch import run_get_node
from sample_processes import echo_process # pylint: disable=import-error,useless-suppression, unused-import
def test_create_evaluate_basic(configure_with_daemon, echo_process):
"""
Test the CreateEvaluateWorkChain by chaining two basic processes.
"""
CreateEvaluateWorkChain = WorkflowFactory('optimize.wrappers.create_evaluate') # pylint: disable=invalid-name
res, node = run_get_node(
CreateEvaluateWorkChain,
create_process=echo_process,
evaluate_process=echo_process,
create={'x': orm.Float(1)},
output_input_mapping=orm.Dict(dict={'result': 'x'})
)
assert node.is_finished_ok
assert 'create' in res
assert 'evaluate' in res
assert 'result' in res['create']
assert 'result' in res['evaluate']
assert res['create']['result'].value == 1
assert res['evaluate']['result'].value == 1
|
py | 7df836f314d9b30e75826e425afaefcad5409da2 |
import websockets
import asyncio
import requests
import json
from threading import Thread
from lib.interface import OrderBook, Tier, InstrumentDetails
from lib.callback_utils import assert_param_counts
import logging
import sys
import time
from collections import defaultdict
from typing import DefaultDict
from operator import itemgetter
URL_WS = 'wss://ftx.com/ws/'
URL_REST = 'https://ftx.com/api'
class FtxManager:
def __init__(self, symbol: {str}, name='FTX'):
# support string or set of string
if isinstance(symbol, str):
self._symbol = {symbol}
elif isinstance(symbol, set):
self._symbol = symbol
else:
raise ValueError('symbol must either be str or {str}')
# web socket connection
self._conn = None
self._ws = None
# create an empty dictionary to store static data
self._instrument_static = {}
# create an empty dictionary to store latest order books
self._order_books = {}
# create a new async event loop to run async tasks
self._loop = asyncio.new_event_loop()
# create a dedicated thread to run the event loop
self._loop_thread = Thread(target=self._run_async_tasks, daemon=True, name=name)
# order book processor, one per symbol
self._book_processors = {}
for sym in self._symbol:
self._book_processors[sym] = OrderBookProcessor(sym)
# order book callback
self._book_callback = None
# start connection to FTX
def connect(self):
logging.info('Initializing connection')
# get instrument static data
self._get_static()
self._loop.run_until_complete(self._reconnect_ws())
logging.info("starting event loop thread")
self._loop_thread.start()
# make a REST call to /markets and store the price and size increments for all given symbols in the response
def _get_static(self):
logging.info('REST - Requesting static data')
resp = requests.get(URL_REST + '/markets')
message = resp.json()
for data in message['result']:
contract_name = data.get('name')
instrument_static = InstrumentDetails(contract_name=contract_name,
tick_size=float(data.get('priceIncrement')),
quantity_size=float(data.get('sizeIncrement')))
self._instrument_static[contract_name] = instrument_static
# reconnect websocket session to FTX
async def _reconnect_ws(self):
# ws connect
await self._connect_ws()
# subscribe to channels
await self._subscribe_all()
async def _connect_ws(self):
logging.info('WebSocket - Connecting')
self._conn = websockets.connect(URL_WS)
try:
# connecting
self._ws = await self._conn.__aenter__()
# connected
logging.info('WebSocket - connection established')
except Exception as e:
logging.error(e)
async def _send_ws(self, request: str):
logging.info(request)
await self._ws.send(request)
# subscribe to orderbook channel
async def _subscribe_all(self):
if not self._ws:
logging.error("websocket disconnected, unable to subscribe")
return
logging.info('WS - Subscribing to order book')
for sym in self._symbol:
request_msg = \
{
"op": "subscribe",
"channel": "orderbook",
"market": sym
}
await self._send_ws(json.dumps(request_msg))
def _run_async_tasks(self):
""" Run the following tasks concurrently in the current thread """
self._loop.create_task(self._listen_forever())
self._loop.create_task(self._keep_alive_websocket())
self._loop.run_forever()
async def _keep_alive_websocket(self):
while True:
await asyncio.sleep(15)
logging.info("Sending ping to WebSocket server")
await self._ws.send('{"op": "ping"}')
async def _listen_forever(self):
""" This is the main callback that process incoming WebSocket messages from server """
logging.info("start listening on incoming messages")
while True:
if not self._ws:
logging.error("websocket disconnected, reconnecting")
await self._reconnect_ws()
# wait for incoming message
try:
# wait for incoming message, with a maximum wait time
response = await asyncio.wait_for(self._ws.recv(), timeout=10)
# process the message
self._process_websocket_message(response)
except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed) as e:
logging.error('connection issue, resetting ws: %s' % e)
self._ws = None
except Exception as e:
logging.error('encountered issue, resetting ws: %s' % e)
self._ws = None
# parse and process incoming websocket message
def _process_websocket_message(self, message: str):
data = json.loads(message)
if 'type' in data and data['type'] == 'subscribed':
logging.info('Received subscription response: ' + message)
elif 'channel' in data and data['channel'] == 'orderbook':
market = data['market']
processor = self._book_processors.get(market)
processor.handle(data)
order_book = processor.get_orderbook()
self._order_books[market] = order_book
if self._book_callback:
self._book_callback(market, order_book)
else:
logging.info(message)
""" ----------------------------------- """
""" Interface """
""" ----------------------------------- """
# To get latest order book
def get_ticker(self, contract_name: str) -> OrderBook:
return self._order_books.get(contract_name)
# To get tick size
def get_tick_size(self, contract_name: str):
if contract_name in self._instrument_static:
instrument_static = self._instrument_static.get(contract_name)
return instrument_static.tick_size
else:
return None
# To get quantity size
def get_quantity_size(self, contract_name: str):
if contract_name in self._instrument_static:
instrument_static = self._instrument_static.get(contract_name)
return instrument_static.quantity_size
else:
return 0
def register_depth_callback(self, callback):
""" a depth callback function takes two argument: (contract_name:str, book: OrderBook) """
assert_param_counts(callback, 2)
self._book_callback = callback
# A helper class to process order book message
class OrderBookProcessor:
def __init__(self, symbol: str, depth=5):
self._symbol = symbol
self._depth = depth
# a map that contain bids and asks sides
self._orderbooks: DefaultDict[str, DefaultDict[float, float]] = defaultdict(lambda: defaultdict(float))
self._timestamp = 0
def handle(self, message: {}):
market = message['market']
if market != self._symbol:
raise ValueError("Received message for market {} but this processor is for {}".format(market, self._symbol))
data = message['data']
if data['action'] == 'partial':
self._reset()
for side in {'bids', 'asks'}:
book = self._orderbooks[side]
for price, size in data[side]:
if size:
book[price] = size
else:
del book[price]
self._timestamp = data['time']
def get_orderbook(self) -> OrderBook:
if self._timestamp == 0:
return 0
sorted_orderbooks = {side: sorted(
[(price, quantity) for price, quantity in list(self._orderbooks[side].items()) if quantity],
key=itemgetter(0),
reverse=(True if side == 'bids' else False)
)
for side in {'bids', 'asks'}}
return OrderBook(timestamp=self._timestamp,
bids=[Tier(price=p, size=s) for (p, s) in sorted_orderbooks['bids'][:self._depth]],
asks=[Tier(price=p, size=s) for (p, s) in sorted_orderbooks['asks'][:self._depth]])
def _reset(self) -> None:
if 'bids' in self._orderbooks:
del self._orderbooks['bids']
if 'asks' in self._orderbooks:
del self._orderbooks['asks']
self._timestamp = 0
if __name__ == '__main__':
# logging stuff
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
handler.setFormatter(logFormatter)
root.addHandler(handler)
# connect to FTX
logging.info("main program starts")
contract = 'BTC-PERP'
ftx = FtxManager(contract)
ftx.connect()
# print tick and quantity size
logging.info('Contract[{}]: tick size = {}, quantity size = {}'.format(
contract, ftx.get_tick_size(contract), ftx.get_quantity_size(contract)))
# print order book once a second
while True:
time.sleep(1)
logging.info('Order book: %s' % ftx.get_ticker(contract))
|
py | 7df8371b6cee6a700825207323d275462602f09f | # pylint: disable=line-too-long, invalid-name, missing-docstring
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fh:
requirements = fh.readlines()
setuptools.setup(
name="resnet_models",
version="1.1.3",
author="Eugene Ilyushin",
author_email="[email protected]",
description="The package contains ResNet architectures which were developed on TensorFlow",
long_description="The package contains ResNet architectures which were developed on TensorFlow",
long_description_content_type="text/markdown",
url="https://github.com/Ilyushin/resnet",
packages=setuptools.find_packages(),
package_dir={
'resnet_models': 'resnet_models',
},
entry_points={
'console_scripts': [
'resnet_models=resnet_models.main:main',
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=requirements
)
|
py | 7df83721a1f2c34c103b6d92460985184075ba66 | #
# mac_payload: fhdr(7..23) fport(1) frm_payload(0..N)
#
from MalformedPacketException import MalformedPacketException
from FHDR import FHDR
from MHDR import MHDR
from JoinRequestPayload import JoinRequestPayload
from JoinAcceptPayload import JoinAcceptPayload
from DataPayload import DataPayload
class MacPayload:
def read(self, mtype, mac_payload):
if len(mac_payload) < 1:
raise MalformedPacketException("Invalid mac payload")
self.fhdr = FHDR()
self.fhdr.read(mac_payload)
self.fport = mac_payload[self.fhdr.length()]
self.frm_payload = None
if mtype == MHDR.JOIN_REQUEST:
self.frm_payload = JoinRequestPayload()
self.frm_payload.read(mac_payload[self.fhdr.length() + 1:])
if mtype == MHDR.JOIN_ACCEPT:
self.frm_payload = JoinAcceptPayload()
self.frm_payload.read(mac_payload[self.fhdr.length() + 1:])
if mtype == MHDR.UNCONF_DATA_UP or mtype == MHDR.UNCONF_DATA_DOWN or\
mtype == MHDR.CONF_DATA_UP or mtype == MHDR.CONF_DATA_DOWN:
self.frm_payload = DataPayload()
self.frm_payload.read(self, mac_payload[self.fhdr.length() + 1:])
def create(self, mtype, key, args):
self.fhdr = FHDR()
self.fhdr.create(mtype, args)
#changed by C. Pham
self.fport = 0x01
self.frm_payload = None
if mtype == MHDR.JOIN_REQUEST:
self.frm_payload = JoinRequestPayload()
self.frm_payload.create(args)
if mtype == MHDR.JOIN_ACCEPT:
self.frm_payload = JoinAcceptPayload()
self.frm_payload.create(args)
if mtype == MHDR.UNCONF_DATA_UP or mtype == MHDR.UNCONF_DATA_DOWN or\
mtype == MHDR.CONF_DATA_UP or mtype == MHDR.CONF_DATA_DOWN:
self.frm_payload = DataPayload()
self.frm_payload.create(self, key, args)
def length(self):
return len(self.to_raw())
def to_raw(self):
mac_payload = []
if self.fhdr.get_devaddr() != [0x00, 0x00, 0x00, 0x00]:
mac_payload += self.fhdr.to_raw()
if self.frm_payload != None:
if self.fhdr.get_devaddr() != [0x00, 0x00, 0x00, 0x00]:
mac_payload += [self.fport]
mac_payload += self.frm_payload.to_raw()
return mac_payload
def get_fhdr(self):
return self.fhdr
def set_fhdr(self, fhdr):
self.fhdr = fhdr
def get_fport(self):
return self.fport
def set_fport(self, fport):
self.fport = fport
def get_frm_payload(self):
return self.frm_payload
def set_frm_payload(self, frm_payload):
self.frm_payload = frm_payload
|
py | 7df8381116e0fcd25f1552f2e90822a34086d16d | """Unit test package for kune."""
|
py | 7df838473dce9781a10e87399aaa556113bff5fe | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.nn.utils import clip_grad_norm_
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from pysot.utils.lr_scheduler import build_lr_scheduler
from pysot.utils.log_helper import init_log, print_speed, add_file_handler
from pysot.utils.distributed import dist_init, DistModule, reduce_gradients,\
average_reduce, get_rank, get_world_size
from pysot.utils.model_load import load_pretrain, restore_from
from pysot.utils.average_meter import AverageMeter
from pysot.utils.misc import describe, commit
from pysot.models.model_builder import ModelBuilder,Modified_ModelBuilder
from pysot.datasets.dataset import modified_TrkDataset
from pysot.core.config import cfg
#initialize logger...
logger = logging.getLogger('global')
#get input...
parser = argparse.ArgumentParser(description='siamrpn tracking')
parser.add_argument('--cfg', type=str, default='config.yaml',
help='configuration of tracking')
parser.add_argument('--seed', type=int, default=123456,
help='random seed')
parser.add_argument('--local_rank', type=int, default=0,
help='compulsory for pytorch launcer')
args = parser.parse_args()
#initialize random seed...
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
#initialize dataloader...
def build_data_loader():
logger.info("build train dataset")
# train_dataset
train_dataset = modified_TrkDataset()
logger.info("build dataset done")
train_sampler = None
train_loader = DataLoader(train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.TRAIN.NUM_WORKERS,
pin_memory=True,
sampler=train_sampler)
return train_loader
#initialize optimizer..
def build_opt_lr(model, current_epoch=0):
for param in model.backbone.parameters():
param.requires_grad = False
for m in model.backbone.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
for layer in cfg.BACKBONE.TRAIN_LAYERS:
for param in getattr(model.backbone, layer).parameters():
param.requires_grad = True
for m in getattr(model.backbone, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
trainable_params = []
trainable_params+=[{
'params':model.forgetting_factor_layer3,
'lr':cfg.TRAIN.BASE_LR
}]
trainable_params += [{
'params': model.forgetting_factor_layer4,
'lr': cfg.TRAIN.BASE_LR
}]
trainable_params += [{
'params': model.forgetting_factor_layer5,
'lr': cfg.TRAIN.BASE_LR
}]
trainable_params += [{'params': filter(lambda x: x.requires_grad,
model.backbone.parameters()),
'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]
if cfg.ADJUST.ADJUST:
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.rpn_head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
if cfg.MASK.MASK:
trainable_params += [{'params': model.mask_head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
if cfg.REFINE.REFINE:
trainable_params += [{'params': model.refine_head.parameters(),
'lr': cfg.TRAIN.LR.BASE_LR}]
optimizer = torch.optim.SGD(trainable_params,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
lr_scheduler.step(cfg.TRAIN.START_EPOCH)
return optimizer, lr_scheduler
def train(train_loader, model, optimizer, lr_scheduler, tb_writer):
cur_lr = lr_scheduler.get_cur_lr()
rank = get_rank()
average_meter = AverageMeter()
def is_valid_number(x):
return not(math.isnan(x) or math.isinf(x) or x > 1e4)
world_size = get_world_size()
num_per_epoch = len(train_loader.dataset) // \
cfg.TRAIN.EPOCH // (cfg.TRAIN.BATCH_SIZE * world_size)
start_epoch = cfg.TRAIN.START_EPOCH
epoch = start_epoch
if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR) and \
get_rank() == 0:
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR)
logger.info("model\n{}".format(describe(model.module))) # the describtion of the module
end = time.time()
for idx, data in enumerate(train_loader):
if epoch != idx // num_per_epoch + start_epoch:
epoch = idx // num_per_epoch + start_epoch
if get_rank() == 0:
torch.save(
{'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict()},
cfg.TRAIN.SNAPSHOT_DIR+'/checkpoint_e%d.pth' % (epoch))
if epoch == cfg.TRAIN.EPOCH:
return
if cfg.BACKBONE.TRAIN_EPOCH == epoch:
logger.info('start training backbone.')
optimizer, lr_scheduler = build_opt_lr(model.module, epoch)
logger.info("model\n{}".format(describe(model.module)))
lr_scheduler.step(epoch)
cur_lr = lr_scheduler.get_cur_lr()
logger.info('epoch: {}'.format(epoch+1))
tb_idx = idx
if idx % num_per_epoch == 0 and idx != 0:
for idx, pg in enumerate(optimizer.param_groups):
logger.info('epoch {} lr {}'.format(epoch+1, pg['lr']))
if rank == 0:
tb_writer.add_scalar('lr/group{}'.format(idx+1),
pg['lr'], tb_idx)
data_time = average_reduce(time.time() - end)
if rank == 0:
tb_writer.add_scalar('time/data', data_time, tb_idx)
outputs = model(data)
loss = outputs['total_loss']
if is_valid_number(loss.data.item()):
optimizer.zero_grad()
loss.backward()
reduce_gradients(model)
if rank == 0 and cfg.TRAIN.LOG_GRADS:
log_grads(model.module, tb_writer, tb_idx)
# clip gradient
clip_grad_norm_(model.parameters(), cfg.TRAIN.GRAD_CLIP)
optimizer.step()
batch_time = time.time() - end
batch_info = {}
batch_info['batch_time'] = average_reduce(batch_time)
batch_info['data_time'] = average_reduce(data_time)
for k, v in sorted(outputs.items()):
batch_info[k] = average_reduce(v.data.item())
average_meter.update(**batch_info)
if rank == 0:
for k, v in batch_info.items():
tb_writer.add_scalar(k, v, tb_idx)
if (idx+1) % cfg.TRAIN.PRINT_FREQ == 0:
for name, para in model.named_parameters():
if name == 'module.forgetting_factor_layer3':
print("forgetting_factor_layer3: ", F.softmax(para,0))
if name == 'module.forgetting_factor_layer4':
print("forgetting_factor_layer4: ", F.softmax(para,0))
if name == 'module.forgetting_factor_layer5':
print("forgetting_factor_layer5: ", F.softmax(para,0))
info = "Epoch: [{}][{}/{}] lr: {:.6f}\n".format(
epoch+1, (idx+1) % num_per_epoch,
num_per_epoch, cur_lr)
for cc, (k, v) in enumerate(batch_info.items()):
if cc % 2 == 0:
info += ("\t{:s}\t").format(
getattr(average_meter, k))
else:
info += ("{:s}\n").format(
getattr(average_meter, k))
logger.info(info)
print_speed(idx+1+start_epoch*num_per_epoch,
average_meter.batch_time.avg,
cfg.TRAIN.EPOCH * num_per_epoch)
end = time.time()
def main():
rank, world_size = dist_init()
logger.info("init done")
# load cfg
cfg.merge_from_file(args.cfg)
if rank == 0:#in distributed training,rank=0 stands for master node..
if not os.path.exists(cfg.TRAIN.LOG_DIR): #if not exist dir ./logs then create it!
os.makedirs(cfg.TRAIN.LOG_DIR)
init_log('global', logging.INFO)
if cfg.TRAIN.LOG_DIR: #this code should add log file ...
add_file_handler('global',
os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
logging.INFO)
logger.info("Version Information: \n{}\n".format(commit()))
logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
# create model
model = Modified_ModelBuilder().cuda().train()
# for name, param in model.named_parameters():
# print(name)
# load pretrained backbone weights
if cfg.BACKBONE.PRETRAINED:
cur_path = os.path.dirname(os.path.realpath(__file__))
backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED)
load_pretrain(model.backbone, backbone_path)
# create tensorboard writer
if rank == 0 and cfg.TRAIN.LOG_DIR:
tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
else:
tb_writer = None
# build dataset loader
train_loader = build_data_loader()
# build optimizer and lr_scheduler
optimizer, lr_scheduler = build_opt_lr(model,
cfg.TRAIN.START_EPOCH)
# resume training
if cfg.TRAIN.RESUME:
logger.info("resume from {}".format(cfg.TRAIN.RESUME))
assert os.path.isfile(cfg.TRAIN.RESUME), \
'{} is not a valid file.'.format(cfg.TRAIN.RESUME)
model, optimizer, cfg.TRAIN.START_EPOCH = \
restore_from(model, optimizer, cfg.TRAIN.RESUME)
# load pretrain
elif cfg.TRAIN.PRETRAINED:
load_pretrain(model, cfg.TRAIN.PRETRAINED)
dist_model = DistModule(model)
logger.info(lr_scheduler)
logger.info("model prepare done")
# start training
train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer)
if __name__ == '__main__':
seed_torch(args.seed)
main()
"""
the following code is just for testing dataloder module...
create by zhangbo Nov 24 15:00
"""
# from PIL import Image
# import matplotlib.pyplot as plt
# from torchvision import transforms
# unloader = transforms.ToPILImage()
# def imshow(tensor, title=None):
# image = tensor.cpu().clone() # we clone the tensor to not do changes on it
# #image = image.squeeze(0) # remove the fake batch dimension
# image = unloader(image)
# plt.imshow(image)
# if title is not None:
# plt.title(title)
# plt.pause(20) # pause a bit so that plots are updated
# train_loader=build_data_loader()
# data=next(iter(train_loader))
# searchs=data['searchs']
# search=[]
# for i in range(2,8):
# search.append(searchs[:,3*(i-1):3*i,:,:])
# imshow(data['template'][0].to(torch.float32))
# print(len(search),search[0][0].shape)
# for idx, data in enumerate(train_loader):
# print("idx is ",idx)
# searchs=data['searchs']
# search=[]
# for i in range(2,8):
# search.append(searchs[:,3*(i-1):3*i,:,:])
# imshow(search[0][0].to(torch.float32))
# print(len(search),search[0][0].shape)
|
py | 7df8386438e91d4159da564cf99deaeb45dd0dcb | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Petaca(CMakePackage):
"""A collection of modern Fortran modules.
Modules include:
- Parameter lists
- map_any_type
- fortran_dynamic_loader
- timer_tree_type
- yajl_fort
- json
"""
homepage = "https://petaca.readthedocs.io/en/master"
git = "https://github.com/nncarlson/petaca.git"
url = "https://github.com/nncarlson/petaca/archive/refs/tags/v22.03.tar.gz"
maintainers = ['pbrady']
version('develop', branch="master")
version('22.03', sha256='e6559e928c7cca6017ef0582c204eee775f6bb3f927f1c224c515c2ad574cc32')
version('21.03', commit='f17df95193ca1a3879687a59a91a123be25e3efa')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:')
# override RelWithDebugInfo since those flags aren't set in petaca
variant('build_type', default="Release",
description='Type build type to build',
values=('Debug', 'Release'))
variant('shared', default=False, description='build shared libraries')
variant('std_name', default=False, description='enables std_mod_proc_name with intel')
# copied from openmpi/package.py to ensure fortran support
@run_before('cmake')
def die_without_fortran(self):
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'petaca requires both C and Fortran compilers!'
)
def cmake_args(self):
return [
self.define('ENABLE_TESTS', self.run_tests),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
self.define_from_variant("ENABLE_STD_MOD_PROC_NAME", "std_name")
]
|
py | 7df838fb26236c9330e95f2f79d637765a115e38 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from lib import meta
class MetaTestDecorator(meta.RequireDocs, meta.DecorateFlakyTests):
"""Composition of multiple metaclasses"""
|
py | 7df83a285ba8b4ec6cf0467930740a0ea02e498c | import argparse
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--faidx',
help='Path to the .fa.idx file.'
)
parser.add_argument(
'-o', '--out',
help='Path to the output length map.'
)
args = parser.parse_args()
return args
def gen_length_map(args):
f = open(args.faidx, 'r')
if not args.out:
f_out = sys.stdout
else:
f_out = open(args.out, 'w')
for line in f:
line = line.split()
print(f'{line[0]}\t{line[1]}', file=f_out)
if __name__ == '__main__':
args = parse_args()
gen_length_map(args)
|
py | 7df83a3b8bcd4273dcc52fbf82efb814524f38f1 | # Import dependencies
import cv2
# Read Images
img = cv2.imread(r'C:\Users\harrizazham98\Desktop\OpenCVForPython\resources\Day 1\image\KL_view.jpg')
# Display Image
cv2.imshow('Original Image',img)
# make a copy of the original image
imageRectangle = img.copy()
# define the starting and end points of the rectangle
start_point =(200,115)
end_point =(300,225)
# draw the rectangle
cv2.rectangle(imageRectangle, start_point, end_point, (0, 0, 255), thickness= 3, lineType=cv2.LINE_8)
# display the output
cv2.imshow('imageRectangle', imageRectangle)
cv2.waitKey(0)
cv2.distroyAllWindows()
|
py | 7df83aec88c23a55f7be023a6a1e6888b3303ea1 | # -*- coding: utf-8 -*-
'''
- - - - - -- - - - - - - - - - - - - - - - - - - - - - -
Name - - CNN - Convolution Neural Network For Photo Recognizing
Goal - - Recognize Handing Writting Word Photo
Detail:Total 5 layers neural network
* Convolution layer
* Pooling layer
* Input layer layer of BP
* Hiden layer of BP
* Output layer of BP
Author: Stephen Lee
Github: [email protected]
Date: 2017.9.20
- - - - - -- - - - - - - - - - - - - - - - - - - - - - -
'''
from __future__ import print_function
import pickle
import matplotlib.pyplot as plt
import numpy as np
class CNN():
def __init__(self, conv1_get, size_p1, bp_num1, bp_num2, bp_num3, rate_w=0.2, rate_t=0.2):
'''
:param conv1_get: [a,c,d],size, number, step of convolution kernel
:param size_p1: pooling size
:param bp_num1: units number of flatten layer
:param bp_num2: units number of hidden layer
:param bp_num3: units number of output layer
:param rate_w: rate of weight learning
:param rate_t: rate of threshold learning
'''
self.num_bp1 = bp_num1
self.num_bp2 = bp_num2
self.num_bp3 = bp_num3
self.conv1 = conv1_get[:2]
self.step_conv1 = conv1_get[2]
self.size_pooling1 = size_p1
self.rate_weight = rate_w
self.rate_thre = rate_t
self.w_conv1 = [np.mat(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) for i in range(self.conv1[1])]
self.wkj = np.mat(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5)
self.vji = np.mat(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5)
self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1
self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1
self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1
def save_model(self, save_path):
# save model dict with pickle
model_dic = {'num_bp1': self.num_bp1,
'num_bp2': self.num_bp2,
'num_bp3': self.num_bp3,
'conv1': self.conv1,
'step_conv1': self.step_conv1,
'size_pooling1': self.size_pooling1,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conv1,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conv1,
'thre_bp2': self.thre_bp2,
'thre_bp3': self.thre_bp3}
with open(save_path, 'wb') as f:
pickle.dump(model_dic, f)
print('Model saved: %s' % save_path)
@classmethod
def ReadModel(cls, model_path):
# read saved model
with open(model_path, 'rb') as f:
model_dic = pickle.load(f)
conv_get = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
size_p1 = model_dic.get('size_pooling1')
bp1 = model_dic.get('num_bp1')
bp2 = model_dic.get('num_bp2')
bp3 = model_dic.get('num_bp3')
r_w = model_dic.get('rate_weight')
r_t = model_dic.get('rate_thre')
# create model instance
conv_ins = CNN(conv_get, size_p1, bp1, bp2, bp3, r_w, r_t)
# modify model parameter
conv_ins.w_conv1 = model_dic.get('w_conv1')
conv_ins.wkj = model_dic.get('wkj')
conv_ins.vji = model_dic.get('vji')
conv_ins.thre_conv1 = model_dic.get('thre_conv1')
conv_ins.thre_bp2 = model_dic.get('thre_bp2')
conv_ins.thre_bp3 = model_dic.get('thre_bp3')
return conv_ins
def sig(self, x):
return 1 / (1 + np.exp(-1 * x))
def do_round(self, x):
return round(x, 3)
def convolute(self, data, convs, w_convs, thre_convs, conv_step):
# convolution process
size_conv = convs[0]
num_conv = convs[1]
size_data = np.shape(data)[0]
# get the data slice of original image data, data_focus
data_focus = []
for i_focus in range(0, size_data - size_conv + 1, conv_step):
for j_focus in range(0, size_data - size_conv + 1, conv_step):
focus = data[i_focus:i_focus + size_conv, j_focus:j_focus + size_conv]
data_focus.append(focus)
# caculate the feature map of every single kernel, and saved as list of matrix
data_featuremap = []
Size_FeatureMap = int((size_data - size_conv) / conv_step + 1)
for i_map in range(num_conv):
featuremap = []
for i_focus in range(len(data_focus)):
net_focus = np.sum(np.multiply(data_focus[i_focus], w_convs[i_map])) - thre_convs[i_map]
featuremap.append(self.sig(net_focus))
featuremap = np.asmatrix(featuremap).reshape(Size_FeatureMap, Size_FeatureMap)
data_featuremap.append(featuremap)
# expanding the data slice to One dimenssion
focus1_list = []
for each_focus in data_focus:
focus1_list.extend(self.Expand_Mat(each_focus))
focus_list = np.asarray(focus1_list)
return focus_list, data_featuremap
def pooling(self, featuremaps, size_pooling, type='average_pool'):
# pooling process
size_map = len(featuremaps[0])
size_pooled = int(size_map / size_pooling)
featuremap_pooled = []
for i_map in range(len(featuremaps)):
map = featuremaps[i_map]
map_pooled = []
for i_focus in range(0, size_map, size_pooling):
for j_focus in range(0, size_map, size_pooling):
focus = map[i_focus:i_focus + size_pooling, j_focus:j_focus + size_pooling]
if type == 'average_pool':
# average pooling
map_pooled.append(np.average(focus))
elif type == 'max_pooling':
# max pooling
map_pooled.append(np.max(focus))
map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled)
featuremap_pooled.append(map_pooled)
return featuremap_pooled
def _expand(self, datas):
# expanding three dimension data to one dimension list
data_expanded = []
for i in range(len(datas)):
shapes = np.shape(datas[i])
data_listed = datas[i].reshape(1, shapes[0] * shapes[1])
data_listed = data_listed.getA().tolist()[0]
data_expanded.extend(data_listed)
data_expanded = np.asarray(data_expanded)
return data_expanded
def _expand_mat(self, data_mat):
# expanding matrix to one dimension list
data_mat = np.asarray(data_mat)
shapes = np.shape(data_mat)
data_expanded = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def _calculate_gradient_from_pool(self, out_map, pd_pool, num_map, size_map, size_pooling):
'''
calcluate the gradient from the data slice of pool layer
pd_pool: list of matrix
out_map: the shape of data slice(size_map*size_map)
return: pd_all: list of matrix, [num, size_map, size_map]
'''
pd_all = []
i_pool = 0
for i_map in range(num_map):
pd_conv1 = np.ones((size_map, size_map))
for i in range(0, size_map, size_pooling):
for j in range(0, size_map, size_pooling):
pd_conv1[i:i + size_pooling, j:j + size_pooling] = pd_pool[i_pool]
i_pool = i_pool + 1
pd_conv2 = np.multiply(pd_conv1, np.multiply(out_map[i_map], (1 - out_map[i_map])))
pd_all.append(pd_conv2)
return pd_all
def train(self, patterns, datas_train, datas_teach, n_repeat, error_accuracy, draw_e=bool):
# model traning
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(datas_train)))
print((' - - Shape: Teach_Data ', np.shape(datas_teach)))
rp = 0
all_mse = []
mse = 10000
while rp < n_repeat and mse >= error_accuracy:
alle = 0
print('-------------Learning Time %d--------------' % rp)
for p in range(len(datas_train)):
# print('------------Learning Image: %d--------------'%p)
data_train = np.asmatrix(datas_train[p])
data_teach = np.asarray(datas_teach[p])
data_focus1, data_conved1 = self.convolute(data_train, self.conv1, self.w_conv1,
self.thre_conv1, conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
shape_featuremap1 = np.shape(data_conved1)
'''
print(' -----original shape ', np.shape(data_train))
print(' ---- after convolution ',np.shape(data_conv1))
print(' -----after pooling ',np.shape(data_pooled1))
'''
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = np.dot(bp_out1, self.vji.T) - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = np.dot(bp_out2, self.wkj.T) - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
# --------------Model Leaning ------------------------
# calcluate error and gradient---------------
pd_k_all = np.multiply((data_teach - bp_out3), np.multiply(bp_out3, (1 - bp_out3)))
pd_j_all = np.multiply(np.dot(pd_k_all, self.wkj), np.multiply(bp_out2, (1 - bp_out2)))
pd_i_all = np.dot(pd_j_all, self.vji)
pd_conv1_pooled = pd_i_all / (self.size_pooling1 * self.size_pooling1)
pd_conv1_pooled = pd_conv1_pooled.T.getA().tolist()
pd_conv1_all = self._calculate_gradient_from_pool(data_conved1, pd_conv1_pooled, shape_featuremap1[0],
shape_featuremap1[1], self.size_pooling1)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conv1[1]):
pd_conv_list = self._expand_mat(pd_conv1_all[k_conv])
delta_w = self.rate_weight * np.dot(pd_conv_list, data_focus1)
self.w_conv1[k_conv] = self.w_conv1[k_conv] + delta_w.reshape((self.conv1[0], self.conv1[0]))
self.thre_conv1[k_conv] = self.thre_conv1[k_conv] - np.sum(pd_conv1_all[k_conv]) * self.rate_thre
# all connected layer
self.wkj = self.wkj + pd_k_all.T * bp_out2 * self.rate_weight
self.vji = self.vji + pd_j_all.T * bp_out1 * self.rate_weight
self.thre_bp3 = self.thre_bp3 - pd_k_all * self.rate_thre
self.thre_bp2 = self.thre_bp2 - pd_j_all * self.rate_thre
# calculate the sum error of all single image
errors = np.sum(abs((data_teach - bp_out3)))
alle = alle + errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
rp = rp + 1
mse = alle / patterns
all_mse.append(mse)
def draw_error():
yplot = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(all_mse, '+-')
plt.plot(yplot, 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(True, alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, ' - - Mse: %.6f' % mse))
if draw_e:
draw_error()
return mse
def predict(self, datas_test):
# model predict
produce_out = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(datas_test)))
for p in range(len(datas_test)):
data_test = np.asmatrix(datas_test[p])
data_focus1, data_conved1 = self.convolute(data_test, self.conv1, self.w_conv1,
self.thre_conv1, conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = bp_out1 * self.vji.T - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = bp_out2 * self.wkj.T - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
produce_out.extend(bp_out3.getA().tolist())
res = [list(map(self.do_round, each)) for each in produce_out]
return np.asarray(res)
def convolution(self, data):
# return the data of image after convoluting process so we can check it out
data_test = np.asmatrix(data)
data_focus1, data_conved1 = self.convolute(data_test, self.conv1, self.w_conv1,
self.thre_conv1, conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
return data_conved1, data_pooled1
if __name__ == '__main__':
pass
'''
I will put the example on other file
'''
|
py | 7df83b02faa1abf427bfaeb7d4b65591583fc8c2 | import rclpy
from sensor_thermometer import thermometer_data_publisher_node as node
def main(args=None):
rclpy.init(args=args)
# Construct the publisher
thermometer_data_publisher = node.ThermometerDataPublisher()
# Reading and publishing data at defined rate (2 seconds)
rclpy.spin(thermometer_data_publisher)
# Clean up when script is stopped
thermometer_data_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() |
py | 7df83b04bd4f53d5b96eac7d9f25efc95a462fd3 | # -*- coding: utf-8 -*-
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
py | 7df83b1d537c92a52dd58962ebcf5dce643c32d5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Caffe(CMakePackage):
"""Caffe is a deep learning framework made with expression, speed, and
modularity in mind. It is developed by the Berkeley Vision and Learning
Center (BVLC) and by community contributors."""
homepage = "http://caffe.berkeleyvision.org"
url = "https://github.com/BVLC/caffe/archive/1.0.tar.gz"
version('1.0', sha256='71d3c9eb8a183150f965a465824d01fe82826c22505f7aa314f700ace03fa77f')
version('rc5', sha256='06592aa8f5254335df3e244dafacc15765e2c60479b4bf2e7c887e8e023802fb')
version('rc4', sha256='018792411d75ee34b6107216550cca2a1d668d45cb366033ba3c647e6a3018df')
version('rc3', sha256='0884207bfba0fbc8b263b87d30f9304f7094eec3a48f975177d142f8c72b6e3b')
version('rc2', sha256='55c9c20870b30ce398e19e4f1a62ade1eff08fce51e28fa5604035b711978eec')
variant('cuda', default=False,
description='Builds with support for GPUs via CUDA and cuDNN')
variant('opencv', default=True,
description='Build with OpenCV support')
variant('leveldb', default=True,
description="Build with levelDB")
variant('lmdb', default=True,
description="Build with lmdb")
variant('python', default=False,
description='Build python wrapper and caffe python layer')
variant('matlab', default=False,
description='Build Matlab wrapper')
depends_on('boost')
depends_on('boost +python', when='+python')
depends_on('cuda', when='+cuda')
depends_on('blas')
depends_on('protobuf')
depends_on('glog')
depends_on('gflags')
depends_on('hdf5 +hl +cxx')
# Optional dependencies
depends_on('[email protected]+core+highgui+imgproc', when='+opencv')
depends_on('leveldb', when='+leveldb')
depends_on('lmdb', when='+lmdb')
depends_on('[email protected]:', when='+python')
depends_on('[email protected]:', when='+python', type=('build', 'run'))
depends_on('matlab', when='+matlab')
extends('python', when='+python')
def cmake_args(self):
spec = self.spec
args = ['-DBLAS={0}'.format('open' if spec['blas'].name == 'openblas'
else spec['blas'].name),
'-DCPU_ONLY=%s' % ('~cuda' in spec),
'-DUSE_CUDNN=%s' % ('+cuda' in spec),
'-DBUILD_python=%s' % ('+python' in spec),
'-DBUILD_python_layer=%s' % ('+python' in spec),
'-DBUILD_matlab=%s' % ('+matlab' in spec),
'-DUSE_OPENCV=%s' % ('+opencv' in spec),
'-DUSE_LEVELDB=%s' % ('+leveldb' in spec),
'-DUSE_LMDB=%s' % ('+lmdb' in spec),
'-DGFLAGS_ROOT_DIR=%s' % spec['gflags'].prefix,
'-DGLOG_ROOT_DIR=%s' % spec['glog'].prefix,
]
if spec.satisfies('^openblas'):
env['OpenBLAS_HOME'] = spec['openblas'].prefix
if spec.satisfies('+lmdb'):
env['LMDB_DIR'] = spec['lmdb'].prefix
if spec.satisfies('+leveldb'):
env['LEVELDB_ROOT'] = spec['leveldb'].prefix
if spec.satisfies('+python'):
version = spec['python'].version.up_to(1)
args.append('-Dpython_version=%s' % version)
return args
|
py | 7df83b2f92d4836f5b16f91f675a326fbea55026 | from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class CustomerForm(ModelForm):
class Meta:
model = Customer
fields = '__all__'
exclude = ['user']
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
py | 7df83b60124353901487b6f5634c819781ff24c5 | # -*- coding: utf-8 -*-
"""
@author: Taar
"""
# conversion of https://github.com/openwebos/qt/tree/master/examples/tutorials/modelview/6_treeview
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt as qt
ROWS = 2
COLS = 3
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
treeView = QtWidgets.QTreeView(self)
self.setCentralWidget(treeView)
standardModel = QtGui.QStandardItemModel()
rootNode = standardModel.invisibleRootItem()
# defining a couple of items
americaItem = QtGui.QStandardItem("America")
canadaItem = QtGui.QStandardItem("Canada")
usaItem = QtGui.QStandardItem("USA")
bostonItem = QtGui.QStandardItem("Boston")
europeItem = QtGui.QStandardItem("Europe")
italyItem = QtGui.QStandardItem("Italy")
romeItem = QtGui.QStandardItem("Rome")
veronaItem = QtGui.QStandardItem("Verona")
# building up the hierarchy
rootNode.appendRow(americaItem)
rootNode.appendRow(europeItem)
americaItem.appendRow(canadaItem)
americaItem.appendRow(usaItem)
usaItem.appendRow(bostonItem)
europeItem.appendRow(italyItem)
italyItem.appendRow(romeItem)
italyItem.appendRow(veronaItem)
# register the model
treeView.setModel(standardModel)
treeView.expandAll()
# selection changes shall trigger a slot
selectionModel = treeView.selectionModel()
selectionModel.selectionChanged.connect(self.selectionChangedSlot)
self.treeView = treeView
@QtCore.pyqtSlot(QtCore.QItemSelection, QtCore.QItemSelection) # decorator has same signature as the signal
def selectionChangedSlot(self, newSelection, oldSelection):
# get the text of the selected item
index = self.treeView.selectionModel().currentIndex()
selectedText = index.data(qt.DisplayRole)
# find out the hierarchy level of the selected item
hierarchyLevel = 1
seekRoot = index
invalid = QtCore.QModelIndex()
while seekRoot.parent() != invalid:
seekRoot = seekRoot.parent()
hierarchyLevel += 1
showString = '{}, Level {}'.format(selectedText, hierarchyLevel)
self.setWindowTitle(showString)
if __name__ == '__main__':
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
w = MainWindow(None)
w.show()
app.exec_()
|
py | 7df83b6c3abe997b00420eba36326f805b709b28 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 21:30:14 2019
@author: wmy
"""
import tensorflow as tf
from keras import backend as K
from keras.optimizers import Adam
from tqdm import tqdm
class AdamWithWeightsNormalization(Adam):
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations, K.floatx())))
pass
t = K.cast(self.iterations + 1, K.floatx())
lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))
shapes = [K.get_variable_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# if a weight tensor (len > 1) use weight normalized parameterization
# this is the only part changed w.r.t. keras.optimizers.Adam
ps = K.get_variable_shape(p)
if len(ps)>1:
# get weight normalization parameters
V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)
# Adam containers for the 'g' parameter
V_scaler_shape = K.get_variable_shape(V_scaler)
m_g = K.zeros(V_scaler_shape)
v_g = K.zeros(V_scaler_shape)
# update g parameters
m_g_t = (self.beta_1 * m_g) + (1. - self.beta_1) * grad_g
v_g_t = (self.beta_2 * v_g) + (1. - self.beta_2) * K.square(grad_g)
new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) + self.epsilon)
self.updates.append(K.update(m_g, m_g_t))
self.updates.append(K.update(v_g, v_g_t))
# update V parameters
m_t = (self.beta_1 * m) + (1. - self.beta_1) * grad_V
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(grad_V)
new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
# if there are constraints we apply them to V, not W
if getattr(p, 'constraint', None) is not None:
new_V_param = p.constraint(new_V_param)
pass
# wn param updates --> W updates
add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)
pass
else: # do optimization normally
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# apply constraints
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
pass
self.updates.append(K.update(p, new_p))
pass
pass
return self.updates
pass
def get_weightnorm_params_and_grads(p, g):
ps = K.get_variable_shape(p)
# construct weight scaler: V_scaler = g/||V||
V_scaler_shape = (ps[-1],) # assumes we're using tensorflow!
V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change
# get V parameters = ||V||/g * W
norm_axes = [i for i in range(len(ps) - 1)]
V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])
# split V_scaler into ||V|| and g parameters
V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
g_param = V_scaler * V_norm
# get grad in V,g parameters
grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
(g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)
return V, V_norm, V_scaler, g_param, grad_g, grad_V
def add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):
ps = K.get_variable_shape(new_V_param)
norm_axes = [i for i in range(len(ps) - 1)]
# update W and V_scaler
new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))
new_V_scaler = new_g_param / new_V_norm
new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param
updates.append(K.update(W, new_W))
updates.append(K.update(V_scaler, new_V_scaler))
pass
def data_based_init(model, input):
# input can be dict, numpy array, or list of numpy arrays
if type(input) is dict:
feed_dict = input
pass
elif type(input) is list:
feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
pass
else:
feed_dict = {model.inputs[0]: input}
pass
# add learning phase if required
if model.uses_learning_phase and K.learning_phase() not in feed_dict:
feed_dict.update({K.learning_phase(): 1})
pass
# get all layer name, output, weight, bias tuples
layer_output_weight_bias = []
for l in model.layers:
trainable_weights = l.trainable_weights
if len(trainable_weights) == 2:
assert(l.built)
W,b = trainable_weights
layer_output_weight_bias.append((l.name,l.get_output_at(0),W,b)) # if more than one node, only use the first
pass
pass
# iterate over our list and do data dependent init
sess = K.get_session()
pbar = tqdm(layer_output_weight_bias)
for l,o,W,b in pbar:
pbar.set_description(f"Init layer {l}")
m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
s = tf.sqrt(v + 1e-10)
W_updated = W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])
updates = tf.group(W.assign(W_updated), b.assign((b-m)/s))
sess.run(updates, feed_dict)
pass
pass
|
py | 7df83d505a3d8f8dc4651c2627619581bac18e31 | from flask import Flask, render_template, request, escape, session, copy_current_request_context
from annotation import search4letters
from DBcm import UseDatabase, ConnectionError, CredentialsError, SQLError
from checker import check_logged_in
from threading import Thread
from time import sleep
app = Flask(__name__)
app.secret_key = 'SuperSecretKeyByRaman'
app.config['dbConfig'] = {'host': '127.0.0.1',
'user': 'vsearch',
'password': 'tmm0gqgB1!',
'database': 'vsearchlogDB', }
# def check_logged_in() -> bool:
# if 'logged_in' in session:
# return True
# return False
@app.route('/search4', methods=['POST'])
def do_search() -> 'html':
phrase = request.form['phrase']
letters = request.form['letters']
results = str(search4letters(phrase, letters))
title = 'Here are your results'
@copy_current_request_context
def log_request(req: 'flask_request', res: str) -> None:
sleep(15)
with UseDatabase(app.config['dbConfig']) as cursor:
_SQL = """insert into log
(phrase, letters, ip, browser_string, results)
values
(%s, %s, %s, %s, %s)"""
cursor.execute(_SQL, (req.form['phrase'],
req.form['letters'],
req.remote_addr,
req.user_agent.browser,
res,))
try:
# log_request(request, results)
t = Thread(target=log_request, args=(request, results))
t.start()
except Exception as err:
print('***** Logging failed with this error:', str(err))
return render_template('results.html',
the_title=title,
the_phrase=phrase,
the_letters=letters,
the_results=results
)
@app.route('/')
@app.route('/entry')
def entry_page() -> 'html':
return render_template('entry.html', the_title='Welcome to test page')
@app.route('/viewlog')
def view_the_log() -> 'html':
try:
"""DIsplay the contents of the log file as a HTML table."""
with UseDatabase(app.config['dbConfig']) as cursor:
_SQL = """select phrase, letters, ip, browser_string, results from log"""
cursor.execute(_SQL)
contents = cursor.fetchall()
titles = ('Phrase', 'Letters', 'Remote_addr', 'User_agent', 'Results')
return render_template('viewlog.html',
the_title='View Log',
the_row_titles=titles,
the_data=contents, )
except ConnectionError as err:
print('Is you database switched on? Errors:', str(err))
except CredentialsError as err:
print('User-id/Password issues. Errors:', str(err))
except SQLError as err:
print('Is your query correct? Errors:', str(err))
except Exception as err:
print('Something went wrong:', str(err))
return 'Error'
@app.route('/setuser/<user>')
def setuser(user: str) -> str:
session['user'] = user
return 'User value set to: ' + session['user']
@app.route('/getuser')
def getuser() -> str:
return 'User value currently is set to: ' + session['user']
@app.route('/login')
def login():
session['logged_in'] = True
return 'You are now logged in.'
@app.route('/logout')
def logout():
session.pop('logged_in')
return 'You are now logged out.'
@app.route('/page1')
@check_logged_in
def page1() -> str:
return 'This is page 1'
@app.route('/page2')
@check_logged_in
def page2() -> str:
return 'This is page 2'
@app.route('/page3')
@check_logged_in
def page3() -> str:
return 'This is page 3'
# def execute_slowly(glacial, plogging, leaden):
# return
#
#
# t = Thread(target=execute_slowly, args=(glacial, plogging, leaden))
if __name__ == '__main__':
app.run(debug=True)
|
py | 7df83e0f6179432555c3a8a788eacce6ca6c617f | # -*- coding:utf-8 -*-
import time,random,re,sys,json
from util import *
from driver import *
import requests,json,sys
from pyquery import PyQuery as pq
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.command import Command
driver = getChromeWebDriver()
# print '进入大众点评移动版主页...'
driver.get('https://m.dianping.com/')
# time.sleep(1)
# driver.find_element_by_css_selector('#index-kw').send_keys(u'大众点评')
# driver.find_element_by_css_selector('#index-bn').click()
# time.sleep(3)
# driver.find_elements_by_partial_link_text(u'北京美食_生活_团购_旅游_电影_优惠券 - 大众点评网官网')[0].click()
time.sleep(1)
driver.find_element_by_css_selector('body > div.J_header > header > div.search.J_search_trigger').click()
time.sleep(1)
driver.find_element_by_css_selector('body > div.J_search_container.search_container > form > div.head_cnt > div > input.J_search_input').send_keys(u'千岛湖')
print '输入千岛湖...'
driver.find_element_by_css_selector('body > div.J_search_container.search_container > form > div.head_cnt > div > input.J_search_input').send_keys(Keys.ENTER)
print '搜索千岛湖...'
driver.find_element_by_css_selector('#app > div > div.J_searchList > nav > div > a:nth-child(2)').click()
scrollbar = driver.find_element_by_css_selector('#app > div > div.J_searchList > nav > section:nth-child(3) > div.menu.main > div.iScrollVerticalScrollbar.iScrollLoneScrollbar > div')
around = driver.find_element_by_css_selector('#app > div > div.J_searchList > nav > section:nth-child(3) > div.menu.main > div:nth-child(1) > div:nth-child(9)')
ActionChains(driver).click_and_hold(scrollbar).move_by_offset(0,-100).click(scrollbar).perform()
driver.find_element_by_css_selector('#app > div > div.J_searchList > nav > section:nth-child(3) > div.menu.main > div:nth-child(1) > div:nth-child(9)').click()
# print '选择周边游...'
driver.find_element_by_link_text(u'景点').click()
# print '选择景点...'
while(True):
try:
driver.find_element_by_css_selector('#app > div > div.J_footer')
break
except NoSuchElementException as e:
# print e
# print '正在下拉店铺页面...'
DropDown(driver)
getShopInfo(driver)
driver.quit() |
py | 7df83e18950448c0acadad299520db8e18b96b5a | import Bio
class RunnerFilterCodonStop(object):
def __init__(self, variant_read_count_df):
"""Carries out a chimera analysis"""
self.variant_read_count_df = variant_read_count_df
self.genetic_code = None
def get_variant_read_count_delete_df(
self,
variant_df,
genetic_code,
skip_filter_codon_stop):
variant_read_count_delete_df = self.variant_read_count_df.copy()
variant_read_count_delete_df['filter_delete'] = False
if not skip_filter_codon_stop:
variant_has_stop_codon_df = self.annotate_stop_codon_count(
variant_df, genetic_code)
variants_with_stop_codons_list = variant_has_stop_codon_df.index[variant_has_stop_codon_df['has_stop_codon'] == 1].tolist(
)
variant_read_count_delete_df = self.variant_read_count_df.copy()
variant_read_count_delete_df['filter_delete'] = False
variant_read_count_delete_df.loc[variant_read_count_delete_df.variant_id.isin(
variants_with_stop_codons_list), 'filter_delete'] = True
return variant_read_count_delete_df
def annotate_stop_codon_count(self, variant_df, genetic_code):
"""Takes a stats_df of variants and add the number of stop codons for each open reading frame
Returns
-------
pandas DF
VariantReadCountLikeModel are id, sequence, codon_stop_nb_frame1, codon_stop_nb_frame2, codon_stop_nb_frame3"""
# For safety, convert to upper
variant_df['sequence'] = variant_df['sequence'].str.upper()
variant_has_stop_codon_df = variant_df.copy()
variant_has_stop_codon_df['has_stop_codon'] = 0
for row in variant_df.iterrows():
id = row[0]
sequence = row[1].sequence
#
variant_has_stop_codon_frame1 = self.seq_has_codon_stop(sequence=sequence, frame=1, genetic_code=genetic_code)
variant_has_stop_codon_frame2 = self.seq_has_codon_stop(sequence=sequence, frame=2, genetic_code=genetic_code)
variant_has_stop_codon_frame3 = self.seq_has_codon_stop(sequence=sequence, frame=3, genetic_code=genetic_code)
# Check if all frames have stop codons
if variant_has_stop_codon_frame1 and variant_has_stop_codon_frame2 and variant_has_stop_codon_frame3:
variant_has_stop_codon_df.loc[variant_has_stop_codon_df.index ==
id, 'has_stop_codon'] = 1
return variant_has_stop_codon_df
def seq_has_codon_stop(self, sequence, frame, genetic_code):
"""Takes one sequence and returns whether it has a stop codon or not
Parameters
----------
sequence: str
DNA sequence in upper case
frame : int
Open reading frame index, 1,2,3
Returns
-------
bool
Has stop codon
"""
if self.count_sequence_codon_stops(sequence, frame, genetic_code) > 0:
return True
else:
return False
def count_sequence_codon_stops(self, sequence, frame, genetic_code):
"""Takes one sequence and counts the number of stop codons for a given open reading frame and genetic genetic_code
Parameters
----------
sequence: str
DNA sequence in upper case
frame : int
Open reading frame index, 1,2,3
genetic_code : int
NCBI genetic_codes: https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
Returns
-------
int
Number of stop codons in the given open reading frame
"""
# genetic table number 5: 'stop_codons': ['TAA', 'UAA', 'TAG', 'UAG']
stop_codon_list = Bio.Data.CodonTable.generic_by_id[genetic_code].__dict__[
'stop_codons']
codon_list = [sequence[i:i + 3]
for i in range(frame - 1, len(sequence), 3)]
codon_stop_count = sum([codon_list.count(stop_codon)
for stop_codon in stop_codon_list])
return codon_stop_count
|
py | 7df83e5cfa0a2a8330c2a8cbe04b259117c3faec | from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import (
ButtonHolder,
Column,
Fieldset,
Layout,
Row,
Submit,
)
from dal import autocomplete
from django import forms
from django.core.exceptions import ValidationError
from django.forms import RadioSelect
from . import const
from .models.uczelnia import RaportSlotowUczelnia
from bpp.const import PBN_MAX_ROK, PBN_MIN_ROK
from bpp.models import Autor, Uczelnia
from bpp.util import formdefaults_html_after, formdefaults_html_before, year_last_month
OUTPUT_FORMATS = [
("html", "wyświetl w przeglądarce"),
("xlsx", "Microsoft Excel (XLSX)"),
]
OUTPUT_FORMATS_WITH_PDF = OUTPUT_FORMATS + [
("pdf", "Portable Document Format (PDF)"),
]
class AutorRaportSlotowForm(forms.Form):
obiekt = forms.ModelChoiceField(
label="Autor",
queryset=Autor.objects.all(),
widget=autocomplete.ModelSelect2(url="bpp:public-autor-autocomplete"),
)
od_roku = forms.IntegerField(initial=year_last_month, min_value=2016)
do_roku = forms.IntegerField(initial=Uczelnia.objects.do_roku_default)
minimalny_pk = forms.IntegerField(label="Minimalna wartość PK pracy", initial=0)
dzialanie = forms.ChoiceField(
label="Wygeneruj",
choices=(
(
const.DZIALANIE_WSZYSTKO,
"prace autora z punktacją dla dyscyplin za dany okres",
),
(const.DZIALANIE_SLOT, "zbierz najlepsze prace do zadanej wielkości slotu"),
),
initial="wszystko",
widget=forms.RadioSelect,
)
slot = forms.DecimalField(
label="Zadana wielkość slotu",
required=False,
max_digits=8,
decimal_places=4,
max_value=20,
)
_export = forms.ChoiceField(
label="Format wyjściowy", choices=OUTPUT_FORMATS_WITH_PDF, required=True
)
def clean(self):
if "od_roku" in self.cleaned_data and "do_roku" in self.cleaned_data:
if self.cleaned_data["od_roku"] > self.cleaned_data["do_roku"]:
raise ValidationError(
{
"od_roku": ValidationError(
'Pole musi być większe lub równe jak pole "Do roku".',
code="od_do_zle",
)
}
)
if (
self.cleaned_data["dzialanie"] == const.DZIALANIE_WSZYSTKO
and "slot" in self.cleaned_data
and self.cleaned_data["slot"] is not None
):
raise ValidationError(
{
"slot": ValidationError(
"Gdy chcesz wygenerować wszystkie prace tego autora, pozostaw pole 'Slot' puste. ",
code="nie_podawaj_gdy_wszystko",
)
}
)
if self.cleaned_data["dzialanie"] == const.DZIALANIE_SLOT and (
"slot" not in self.cleaned_data
or ("slot" in self.cleaned_data and self.cleaned_data["slot"] is None)
or (
"slot" in self.cleaned_data
and self.cleaned_data["slot"] is not None
and self.cleaned_data["slot"] <= 0
)
):
raise ValidationError(
{
"slot": ValidationError(
"Podaj wartość slota do którego chcesz zbierać prace. Wartość musi być większa od zera. ",
code="podawaj_gdy_slot",
)
}
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = "custom"
self.helper.form_action = "."
self.helper.layout = Layout(
Fieldset(
"Wybierz parametry",
formdefaults_html_before(self),
Row(Column("obiekt", css_class="large-12 small-12")),
Row(Column("dzialanie", css_class="large-12 small-12")),
Row(Column("slot", css_class="large-12 small-12")),
Row(
Column("od_roku", css_class="large-6 small-6"),
Column("do_roku", css_class="large-6 small-6"),
),
Row(Column("minimalny_pk")),
Row(Column("_export")),
formdefaults_html_after(self),
),
ButtonHolder(
Submit(
"submit",
"Pobierz raport",
css_id="id_submit",
css_class="submit button",
),
),
)
class UtworzRaportSlotowUczelniaForm(forms.ModelForm):
class Meta:
model = RaportSlotowUczelnia
fields = [
"od_roku",
"do_roku",
"akcja",
"slot",
"minimalny_pk",
"dziel_na_jednostki_i_wydzialy",
"pokazuj_zerowych",
]
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = "custom"
self.helper.form_action = "."
self.helper.layout = Layout(
Fieldset(
"Wybierz parametry",
formdefaults_html_before(self),
Row(
Column("od_roku", css_class="large-6 small-6"),
Column("do_roku", css_class="large-6 small-6"),
),
Row(Column("akcja", css_class="large-12 small-12")),
Row(Column("slot", css_class="large-12 small-12")),
Row(Column("minimalny_pk", css_class="large-12 small-12")),
Row(Column("dziel_na_jednostki_i_wydzialy")),
Row(Column("pokazuj_zerowych")),
formdefaults_html_after(self),
),
ButtonHolder(
Submit(
"submit",
"Utwórz raport",
css_id="id_submit",
css_class="submit button",
),
),
)
super().__init__(*args, **kwargs)
class ParametryRaportSlotowEwaluacjaForm(forms.Form):
od_roku = forms.IntegerField(
initial=Uczelnia.objects.do_roku_default, min_value=PBN_MIN_ROK
)
do_roku = forms.IntegerField(
initial=Uczelnia.objects.do_roku_default,
min_value=PBN_MIN_ROK,
max_value=PBN_MAX_ROK,
)
_export = forms.ChoiceField(
label="Format wyjściowy",
choices=OUTPUT_FORMATS,
required=True,
widget=RadioSelect,
initial="html",
)
upowaznienie_pbn = forms.NullBooleanField(
required=False,
# widget=RadioSelect,
)
def clean(self):
if "od_roku" in self.cleaned_data and "do_roku" in self.cleaned_data:
if self.cleaned_data["od_roku"] > self.cleaned_data["do_roku"]:
raise ValidationError(
{
"od_roku": ValidationError(
'Pole musi być większe lub równe jak pole "Do roku".',
code="od_do_zle",
)
}
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = "custom"
self.helper.form_action = "."
self.helper.layout = Layout(
Fieldset(
"Wybierz parametry",
formdefaults_html_before(self),
Row(
Column("od_roku", css_class="large-6 small-6"),
Column("do_roku", css_class="large-6 small-6"),
),
Row(Column("upowaznienie_pbn", css_class="large-12 small-12")),
Row(Column("_export")),
formdefaults_html_after(self),
),
ButtonHolder(
Submit(
"submit",
"Pobierz raport",
css_id="id_submit",
css_class="submit button",
),
),
)
super().__init__(*args, **kwargs)
|
py | 7df83eff3ad081476ffd0d6a6e6be57a00733172 | import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
collapses = html.Div(
[
dbc.Button(
"Toggle left", color="primary", id="left", className="mr-1"
),
dbc.Button(
"Toggle right", color="primary", id="right", className="mr-1"
),
dbc.Button("Toggle both", color="primary", id="both"),
dbc.Row(
[
dbc.Col(
dbc.Collapse(
dbc.Card("This is the left card.", body=True),
id="left-collapse",
)
),
dbc.Col(
dbc.Collapse(
dbc.Card("This is the right card!", body=True),
id="right-collapse",
)
),
],
className="mt-3",
),
]
)
@app.callback(
Output("left-collapse", "is_open"),
[Input("left", "n_clicks"), Input("both", "n_clicks")],
[State("left-collapse", "is_open")],
)
def toggle_left(n_left, n_both, is_open):
if n_left or n_both:
return not is_open
return is_open
@app.callback(
Output("right-collapse", "is_open"),
[Input("right", "n_clicks"), Input("both", "n_clicks")],
[State("right-collapse", "is_open")],
)
def toggle_left(n_right, n_both, is_open):
if n_right or n_both:
return not is_open
return is_open
|
py | 7df83f0c319c05614e77c46081881e861230500b | from demo.models.function.non_null_count import NotNullCount # noqa
|
py | 7df83f6f28602e4f090729175b7904185424831e | from __future__ import print_function
import argparse
from math import log10
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from eval import testing_data_loader
from tishkovets_cnn import Net as TISHKOVETS_NET
from dbpn_v1 import Net as DBPNLL
from dbpns import Net as DBPNS
from dbpn_iterative import Net as DBPNITER
from data import get_training_set
import pdb
import socket
import time
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--upscale_factor', type=int, default=8, help="super resolution upscale factor")
parser.add_argument('--batchSize', type=int, default=1, help='training batch size')
parser.add_argument('--nEpochs', type=int, default=2000, help='number of epochs to train for')
parser.add_argument('--snapshots', type=int, default=50, help='Snapshots')
parser.add_argument('--start_iter', type=int, default=1, help='Starting Epoch')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning Rate. Default=0.01')
parser.add_argument('--gpu_mode', type=bool, default=False)
parser.add_argument('--threads', type=int, default=1, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--gpus', default=1, type=int, help='number of gpu')
parser.add_argument('--data_dir', type=str, default='')
parser.add_argument('--data_augmentation', type=bool, default=True)
parser.add_argument('--hr_train_dataset', type=str, default='div2k/DIV2K_train_HR')
parser.add_argument('--model_type', type=str, default='DBPN')
parser.add_argument('--residual', type=bool, default=True)
parser.add_argument('--patch_size', type=int, default=40, help='Size of cropped HR image')
parser.add_argument('--pretrained_sr', default='MIX2K_LR_aug_x4dl10DBPNITERtpami_epoch_399.pth', help='sr pretrained base model')
parser.add_argument('--pretrained', type=bool, default=False)
parser.add_argument('--save_folder', default='weights/', help='Location to save checkpoint models')
parser.add_argument('--prefix', default='tpami_residual_filter8', help='Location to save checkpoint models')
opt = parser.parse_args()
gpus_list = range(opt.gpus)
hostname = str(socket.gethostname())
cudnn.benchmark = True
print(opt)
def train(epoch):
epoch_loss = 0
model.train()
for iteration, batch in enumerate(training_data_loader, 1):
input, target, bicubic = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
if cuda:
input = input.cuda(gpus_list[0])
target = target.cuda(gpus_list[0])
bicubic = bicubic.cuda(gpus_list[0])
optimizer.zero_grad()
t0 = time.time()
prediction = model(input)
if opt.residual:
prediction = prediction + bicubic
loss = criterion(prediction, target)
t1 = time.time()
epoch_loss += loss.data
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.4f} || Timer: {:.4f} sec.".format(epoch, iteration, len(training_data_loader), loss.data, (t1 - t0)))
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
def test():
avg_psnr = 0
for batch in testing_data_loader:
input, target = Variable(batch[0]), Variable(batch[1])
if cuda:
input = input.cuda(gpus_list[0])
target = target.cuda(gpus_list[0])
prediction = model(input)
mse = criterion(prediction, target)
psnr = 10 * log10(1 / mse.data[0])
avg_psnr += psnr
print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader)))
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def checkpoint(epoch):
model_out_path = opt.save_folder+opt.train_dataset+hostname+opt.model_type+opt.prefix+"_epoch_{}.pth".format(epoch)
torch.save(model.state_dict(), model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
print('===> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.hr_train_dataset, opt.upscale_factor, opt.patch_size, opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print('===> Building model ', opt.model_type)
if opt.model_type == 'DBPNLL':
model = DBPNLL(num_channels=3, base_filter=64, feat = 256, num_stages=10, scale_factor=opt.upscale_factor)
elif opt.model_type == 'DBPN-RES-MR64-3':
model = DBPNITER(num_channels=3, base_filter=64, feat = 256, num_stages=3, scale_factor=opt.upscale_factor)
else:
model = TISHKOVETS_NET(num_channels=3, base_filter=64, feat = 256)
model = torch.nn.DataParallel(model, device_ids=gpus_list)
criterion = nn.L1Loss()
print('---------- Networks architecture -------------')
print_network(model)
print('----------------------------------------------')
if opt.pretrained:
model_name = os.path.join(opt.save_folder + opt.pretrained_sr)
if os.path.exists(model_name):
#model= torch.load(model_name, map_location=lambda storage, loc: storage)
model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
print('Pre-trained SR model is loaded.')
if cuda:
model = model.cuda(gpus_list[0])
criterion = criterion.cuda(gpus_list[0])
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)
for epoch in range(opt.start_iter, opt.nEpochs + 1):
test()
# learning rate is decayed by a factor of 10 every half of total epochs
if (epoch+1) % (opt.nEpochs/2) == 0:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10.0
print('Learning rate decay: lr={}'.format(optimizer.param_groups[0]['lr']))
if (epoch+1) % (opt.snapshots) == 0:
checkpoint(epoch)
|
py | 7df83f80235bed1bfe338ba68cefbb476e431004 | from typing import TYPE_CHECKING, List
from django.conf import settings
from django.core.checks import Error, register
from django.utils.module_loading import import_string
if TYPE_CHECKING:
from .base_plugin import BasePlugin
@register()
def check_extensions(app_configs, **kwargs):
"""Confirm a correct import of plugins and manager."""
errors = []
check_manager(errors)
plugins = settings.PLUGINS or []
for plugin_path in plugins:
check_single_plugin(plugin_path, errors)
return errors
def check_manager(errors: List[Error]):
if not hasattr(settings, "EXTENSIONS_MANAGER") or not settings.EXTENSIONS_MANAGER:
errors.append(Error("Settings should contain EXTENSIONS_MANAGER env"))
return
try:
import_string(settings.EXTENSIONS_MANAGER)
except ImportError:
errors.append(
Error(
"Extension Manager path: %s doesn't exist" % settings.EXTENSIONS_MANAGER
)
)
def check_single_plugin(plugin_path: str, errors: List[Error]):
if not plugin_path:
errors.append(Error("Wrong plugin_path %s" % plugin_path))
return
try:
plugin_class = import_string(plugin_path)
except ImportError:
errors.append(Error("Plugin with path: %s doesn't exist" % plugin_path))
if not errors:
check_plugin_name(plugin_class, errors)
def check_plugin_name(plugin_class: "BasePlugin", errors: List[Error]):
if not getattr(plugin_class, "PLUGIN_NAME", None):
errors.append(
Error(
"Missing field PLUGIN_NAME for plugin - %s"
% plugin_class.__name__ # type: ignore
)
)
|
py | 7df84008af4a5c3297d4107e13fe7c3047732b4e | import unittest
from transformers_keras.datapipe.sc_dataset import DatasetForSequenceClassification
class DatasetTest(unittest.TestCase):
"""Dataset test."""
def test_sequence_classification_dataset_examples(self):
print()
print("====from_jsonl_files")
d = DatasetForSequenceClassification.from_jsonl_files(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt", batch_size=2
)
print(next(iter(d)))
print("====jsonl_to_examples")
examples = DatasetForSequenceClassification.jsonl_to_examples(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt"
)
for i in range(2):
print(examples[i])
print("====from_examples")
d = DatasetForSequenceClassification.from_examples(examples, batch_size=2)
print(next(iter(d)))
print("====examples_to_tfrecord")
DatasetForSequenceClassification.examples_to_tfrecord(
examples, output_files=["testdata/sequence_classify.tfrecord"]
)
print("====from_tfrecord_files")
d = DatasetForSequenceClassification.from_tfrecord_files("testdata/sequence_classify.tfrecord", batch_size=2)
print(next(iter(d)))
if __name__ == "__main__":
unittest.main()
|
py | 7df8403798269852e7f2c082bc7667fdc4d306ee | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Bootswatch Theme',
'description': 'Bootswatch themes',
'category': 'Theme',
'sequence': 900,
'version': '1.0',
'depends': ['website', 'website_theme_install'],
'data': [
'views/theme_bootswatch_templates.xml',
],
'images': [
'static/description/bootswatch.png',
'static/description/bootswatch_screenshot.jpg',
],
'application': False,
}
|
py | 7df84148d1f00a91bce2d9c8cd23f19356224a15 | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OnPrem
class _Ci(_OnPrem):
_type = "ci"
_icon_dir = "resources/onprem/ci"
class Circleci(_Ci):
_icon = "circleci.png"
class Concourseci(_Ci):
_icon = "concourseci.png"
class Gitlabci(_Ci):
_icon = "gitlabci.png"
class Jenkins(_Ci):
_icon = "jenkins.png"
class Teamcity(_Ci):
_icon = "teamcity.png"
class Travisci(_Ci):
_icon = "travisci.png"
class Zuulci(_Ci):
_icon = "zuulci.png"
# Aliases
CircleCI = Circleci
ConcourseCI = Concourseci
GitlabCI = Gitlabci
TravisCI = Travisci
TC = Teamcity
ZuulCI = Zuulci
|
py | 7df842ed005e29dc22094895579288f65f349c2b | """Test runway.core.providers.aws.s3._bucket."""
# pylint: disable=no-self-use
# pyright: basic
from __future__ import annotations
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING
from mock import MagicMock
from runway.core.providers.aws import BaseResponse
from runway.core.providers.aws.s3 import Bucket
if TYPE_CHECKING:
from pytest import LogCaptureFixture
from pytest_mock import MockerFixture
from .....factories import MockRunwayContext
MODULE = "runway.core.providers.aws.s3._bucket"
class TestBucket:
"""Test runway.core.providers.aws.s3._bucket.Bucket."""
def test_client(self) -> None:
"""Test client."""
mock_ctx = MagicMock()
mock_session = MagicMock()
mock_client = MagicMock()
mock_ctx.get_session.return_value = mock_session
mock_session.client.return_value = mock_client
bucket = Bucket(mock_ctx, "test-bucket", region="us-west-2")
assert bucket.client == mock_client
mock_ctx.get_session.assert_called_once_with(region="us-west-2")
mock_session.client.assert_called_once_with("s3")
def test_create(self, runway_context: MockRunwayContext) -> None:
"""Test create."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_client_error(
"head_bucket",
"NoSuchBucket",
"Not Found",
404,
expected_params={"Bucket": "test-bucket"},
)
stubber.add_response(
"create_bucket",
{"Location": "us-east-1"},
{"ACL": "private", "Bucket": "test-bucket"},
)
with stubber:
assert bucket.create(ACL="private")
stubber.assert_no_pending_responses()
def test_create_exists(
self, caplog: LogCaptureFixture, runway_context: MockRunwayContext
) -> None:
"""Test create with exists=True."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_response(
"head_bucket",
{"ResponseMetadata": {"HostId": "test", "HTTPStatusCode": 200}},
{"Bucket": "test-bucket"},
)
with stubber:
assert not bucket.create()
stubber.assert_no_pending_responses()
assert "bucket already exists" in "\n".join(caplog.messages)
def test_create_forbidden(
self, caplog: LogCaptureFixture, runway_context: MockRunwayContext
) -> None:
"""Test create with forbidden=True."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_client_error(
"head_bucket",
"AccessDenied",
"Forbidden",
403,
expected_params={"Bucket": "test-bucket"},
)
with stubber:
assert not bucket.create()
stubber.assert_no_pending_responses()
assert "access denied" in "\n".join(caplog.messages)
def test_create_us_west_2(self, runway_context: MockRunwayContext) -> None:
"""Test create with region=us-west-2."""
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_client_error(
"head_bucket",
"NoSuchBucket",
"The specified bucket does not exist.",
404,
expected_params={"Bucket": "test-bucket"},
)
stubber.add_response(
"create_bucket",
{"Location": "us-east-1"},
{
"Bucket": "test-bucket",
"CreateBucketConfiguration": {"LocationConstraint": "us-west-2"},
},
)
with stubber:
assert bucket.create()
stubber.assert_no_pending_responses()
def test_enable_versioning(self, runway_context: MockRunwayContext) -> None:
"""Test enable_versioning."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"get_bucket_versioning",
{"Status": "Suspended", "MFADelete": "Enabled"},
{"Bucket": "test-bucket"},
)
stubber.add_response(
"put_bucket_versioning",
{},
{
"Bucket": "test-bucket",
"VersioningConfiguration": {
"Status": "Enabled",
"MFADelete": "Enabled",
},
},
)
with stubber:
bucket.enable_versioning()
stubber.assert_no_pending_responses()
def test_enable_versioning_skipped(
self, caplog: LogCaptureFixture, runway_context: MockRunwayContext
) -> None:
"""Test enable_versioning with Status=Enabled."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"get_bucket_versioning", {"Status": "Enabled"}, {"Bucket": "test-bucket"}
)
with stubber:
bucket.enable_versioning()
stubber.assert_no_pending_responses()
assert (
'did not modify versioning policy for bucket "test-bucket"; already enabled'
) in caplog.messages
def test_exists(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test not_found."""
mock_head = mocker.patch.object(Bucket, "head", spec=BaseResponse())
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.not_found = True
assert not bucket.exists # initial value
mock_head.metadata.not_found = False
assert not bucket.exists # cached value
del bucket.not_found
assert bucket.exists # updated value
def test_forbidden(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test forbidden."""
mock_head = mocker.patch.object(Bucket, "head", spec=BaseResponse())
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.forbidden = True
assert bucket.forbidden # initial value
mock_head.metadata.forbidden = False
assert bucket.forbidden # cached value
del bucket.forbidden
assert not bucket.forbidden # updated value
def test_format_bucket_path_uri(self) -> None:
"""Test format_bucket_path_uri."""
uri = "s3://test-bucket"
bucket = Bucket(MagicMock(), uri[5:])
assert bucket.format_bucket_path_uri() == uri
assert bucket.format_bucket_path_uri(key="test.txt") == f"{uri}/test.txt"
assert (
bucket.format_bucket_path_uri(key="test.txt", prefix="prefix")
== f"{uri}/prefix/test.txt"
)
assert bucket.format_bucket_path_uri(prefix="prefix") == f"{uri}/prefix"
def test_get_versioning(self, runway_context: MockRunwayContext) -> None:
"""Test get_versioning."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
response = {"Status": "Enabled", "MFADelete": "Enabled"}
stubber.add_response(
"get_bucket_versioning", response, {"Bucket": "test-bucket"}
)
with stubber:
assert bucket.get_versioning() == response
stubber.assert_no_pending_responses()
def test_head(self, runway_context: MockRunwayContext) -> None:
"""Test head."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"head_bucket",
{"ResponseMetadata": {"HostId": "test", "HTTPStatusCode": 200}},
{"Bucket": "test-bucket"},
)
with stubber:
assert bucket.head.metadata.host_id == "test"
assert bucket.head.metadata.http_status_code == HTTPStatus.OK
stubber.assert_no_pending_responses()
def test_head_clienterror(
self, caplog: LogCaptureFixture, runway_context: MockRunwayContext
) -> None:
"""Test head with ClientError."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_client_error(
"head_bucket",
"AccessDenied",
"Forbidden",
403,
expected_params={"Bucket": "test-bucket"},
)
with stubber:
assert bucket.head.metadata.http_status_code == HTTPStatus.FORBIDDEN
stubber.assert_no_pending_responses()
assert "received an error from AWS S3" in "\n".join(caplog.messages)
def test_not_found(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test not_found."""
mock_head = mocker.patch.object(Bucket, "head", spec=BaseResponse())
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.not_found = True
assert bucket.not_found # initial value
mock_head.metadata.not_found = False
assert bucket.not_found # cached value
del bucket.not_found
assert not bucket.not_found # updated value
def test_sync_from_local(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test sync_from_local."""
mock_handler = MagicMock()
mock_handler_class = mocker.patch(
f"{MODULE}.S3SyncHandler", return_value=mock_handler
)
runway_context.add_stubber("s3")
src_directory = "/test/"
obj = Bucket(runway_context, "test-bucket")
assert not obj.sync_from_local(
src_directory, delete=True, exclude=["something"], prefix="prefix"
)
mock_handler_class.assert_called_once_with(
context=runway_context,
delete=True,
dest="s3://test-bucket/prefix",
exclude=["something"],
follow_symlinks=False,
include=None,
session=obj.session,
src=src_directory,
)
mock_handler.run.assert_called_once_with()
def test_sync_to_local(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test sync_to_local."""
mock_handler = MagicMock()
mock_handler_class = mocker.patch(
f"{MODULE}.S3SyncHandler", return_value=mock_handler
)
runway_context.add_stubber("s3")
dest_directory = "/test/"
obj = Bucket(runway_context, "test-bucket")
assert not obj.sync_to_local(
dest_directory, follow_symlinks=True, include=["something"]
)
mock_handler_class.assert_called_once_with(
context=runway_context,
delete=False,
dest=dest_directory,
exclude=None,
follow_symlinks=True,
include=["something"],
session=obj.session,
src="s3://test-bucket",
)
mock_handler.run.assert_called_once_with()
|
py | 7df8438af14e474e5aa8ccc4ffe0477974390add | import unittest
from unittest.mock import AsyncMock, patch
from pathlib import Path
from sporepedia.client import SporepediaClient
from sporepedia.api.client import APIClient
class SporepediaClientTest(unittest.IsolatedAsyncioTestCase):
@patch.object(APIClient, "request")
async def test__search(self, mock_request: AsyncMock):
async with SporepediaClient() as client:
with open(Path("./tests/testdata/dwr_search_testdata.js")) as fp:
mock_request.return_value.text.return_value = fp.read()
await client.search(text="Spore")
async def test__close_exception(self):
with self.assertRaises(ValueError):
await SporepediaClient().close()
async def test__create_and_close(self):
client = SporepediaClient()
await client.create()
self.assertIsNotNone(client._api)
await client.close()
with self.assertRaises(ValueError):
await SporepediaClient().close()
async with SporepediaClient() as client:
self.assertIsInstance(client, SporepediaClient)
self.assertIsNotNone(client._api)
|
py | 7df843c9746f48651b6c585447f0ebc8ee7b509e | """
FitGLF.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Fri Oct 23 14:34:01 PDT 2015
Description:
"""
import gc, os
import numpy as np
from ..util import read_lit
from ..util.Pickling import write_pickle_file
from ..util.ParameterFile import par_info
from ..util.Stats import symmetrize_errors
from ..populations import GalaxyCohort, GalaxyEnsemble
from .ModelFit import LogLikelihood, FitBase, def_kwargs
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
twopi = 2. * np.pi
_b14 = read_lit('bouwens2014')
hst_shallow = _b14.filt_shallow
hst_deep = _b14.filt_deep
class loglikelihood(LogLikelihood):
@property
def redshifts(self):
return self._redshifts
@redshifts.setter
def redshifts(self, value):
self._redshifts = value
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
@property
def units(self):
return self._units
@units.setter
def units(self, value):
self._units = value
@property
def zmap(self):
if not hasattr(self, '_zmap'):
self._zmap = {}
return self._zmap
@zmap.setter
def zmap(self, value):
self._zmap = value
@property
def mask(self):
if not hasattr(self, '_mask'):
if type(self.xdata) is np.ma.core.MaskedArray:
self._mask = self.xdata.mask
else:
self._mask = np.zeros(len(self.xdata))
return self._mask
@property
def include(self):
if not hasattr(self, '_include'):
assert self.metadata is not None
self._include = []
for item in self.metadata:
if item in self._include:
continue
self._include.append(item)
return self._include
@property
def monotonic_beta(self):
if not hasattr(self, '_monotonic_beta'):
self._monotonic_beta = False
return self._monotonic_beta
@monotonic_beta.setter
def monotonic_beta(self, value):
self._monotonic_beta = bool(value)
def __call__(self, sim):
"""
Compute log-likelihood for model generated via input parameters.
Returns
-------
Tuple: (log likelihood, blobs)
"""
# Figure out if `sim` is a population object or not.
# OK if it's a simulation, will loop over LF-bearing populations.
if not (isinstance(sim, GalaxyCohort.GalaxyCohort) \
or isinstance(sim, GalaxyEnsemble.GalaxyEnsemble)):
pops = []
for pop in sim.pops:
if not hasattr(pop, 'LuminosityFunction'):
continue
pops.append(pop)
else:
pops = [sim]
if len(self.ydata) == 0:
raise ValueError("Problem: data is empty.")
if len(pops) > 1:
raise NotImplemented('careful! need to think about this.')
# Loop over all data points individually.
#try:
phi = np.zeros_like(self.ydata)
for i, quantity in enumerate(self.metadata):
if self.mask[i]:
#print('masked:', rank, self.redshifts[i], self.xdata[i])
continue
xdat = self.xdata[i]
z = self.redshifts[i]
if quantity in self.zmap:
zmod = self.zmap[quantity][z]
else:
zmod = z
for j, pop in enumerate(pops):
# Generate model LF
if quantity == 'lf':
# New convention: LuminosityFunction always in terms of
# observed magnitudes.
# Compute LF
p = pop.LuminosityFunction(z=zmod, x=xdat, mags=True)
if not np.isfinite(p):
print('LF is inf or nan!', zmod, xdat)
raise ValueError('LF is inf or nan!', zmod, xdat)
elif quantity == 'smf':
M = np.log10(xdat)
p = pop.StellarMassFunction(zmod, M)
elif quantity == 'beta':
zstr = int(round(zmod))
if zstr >= 7:
filt_hst = hst_deep
else:
filt_hst = hst_shallow
M = xdat
p = pop.Beta(zmod, MUV=M, presets='hst', dlam=20.,
return_binned=True, rest_wave=None)
if not np.isfinite(p):
print('beta is inf or nan!', z, M)
return -np.inf
#raise ValueError('beta is inf or nan!', z, M)
else:
raise ValueError('Unrecognized quantity: {!s}'.format(\
quantity))
# If UVLF or SMF, could do multi-pop in which case we'd
# increment here.
phi[i] = p
##
# Apply restrictions to beta
if self.monotonic_beta:
if type(self.monotonic_beta) in [int, float, np.float64]:
Mlim = self.monotonic_beta
else:
# Don't let beta turn-over in the range of magnitudes that
# overlap with UVLF constraints, or 2 extra mags if no UVLF
# fitting happening (rare).
xmod = []
ymod = []
zmod = []
xlf = []
for i, quantity in enumerate(self.metadata):
if quantity == 'lf':
xlf.append(self.xdata[i])
z = self.redshifts[i]
if quantity in self.zmap:
_zmod = self.zmap[quantity][z]
else:
_zmod = z
xmod.append(self.xdata[i])
ymod.append(phi[i])
zmod.append(_zmod)
i_lo = np.argmin(xmod)
M_lo = xmod[i_lo]
b_lo = ymod[i_lo]
if 'lf' in self.metadata:
Mlim = np.nanmin(xlf) - 1.
else:
Mlim = M_lo - 2.
b_hi = {}
for i, quantity in enumerate(self.metadata):
if quantity != 'beta':
continue
if zmod[i] not in b_hi:
b_hi[zmod[i]] = pop.Beta(zmod[i], MUV=Mlim, presets='hst',
dlam=20., return_binned=True, rest_wave=None)
if not (np.isfinite(Mlim) or np.isfinite(b_hi[zmod[i]])):
raise ValueError("Mlim={}, beta_hi={:.2f}".format(Mlim, b_hi))
# Bit overkill to check every magnitude, but will *really*
# enforce monotonic behavior.
if b_hi[zmod[i]] < ymod[i]:
print('beta is not monotonic!', zmod[i],
Mlim, b_hi[zmod[i]], xmod[i], ymod[i])
return -np.inf
#else:
# print("beta monotonic at z={}: beta(MUV={})={}, beta(MUV={})={}".format(zmod[i],
# Mlim, b_hi[zmod[i]], xmod[i], ymod[i]))
#except:
# return -np.inf, self.blank_blob
#phi = np.ma.array(_phi, mask=self.mask)
#del sim, pops
lnL = -0.5 * np.ma.sum((phi - self.ydata)**2 / self.error**2)
return lnL + self.const_term
class FitGalaxyPopulation(FitBase):
@property
def loglikelihood(self):
if not hasattr(self, '_loglikelihood'):
self._loglikelihood = loglikelihood(self.xdata_flat,
self.ydata_flat, self.error_flat)
self._loglikelihood.redshifts = self.redshifts_flat
self._loglikelihood.metadata = self.metadata_flat
self._loglikelihood.zmap = self.zmap
self._loglikelihood.monotonic_beta = self.monotonic_beta
self.info
return self._loglikelihood
@property
def monotonic_beta(self):
if not hasattr(self, '_monotonic_beta'):
self._monotonic_beta = False
return self._monotonic_beta
@monotonic_beta.setter
def monotonic_beta(self, value):
self._monotonic_beta = bool(value)
@property
def zmap(self):
if not hasattr(self, '_zmap'):
self._zmap = {}
return self._zmap
@zmap.setter
def zmap(self, value):
self._zmap = value
@property
def redshift_bounds(self):
if not hasattr(self, '_redshift_bounds'):
raise ValueError('Set by hand or include in litdata.')
return self._redshift_bounds
@redshift_bounds.setter
def redshift_bounds(self, value):
assert len(value) == 2
self._redshift_bounds = tuple(value)
@property
def redshifts(self):
if not hasattr(self, '_redshifts'):
raise ValueError('Set by hand or include in litdata.')
return self._redshifts
@redshifts.setter
def redshifts(self, value):
# This can be used to override the redshifts in the dataset and only
# use some subset of them
# Need to be ready for 'lf' or 'smf' designation.
if len(self.include) > 1:
assert type(value) is dict
if type(value) in [int, float]:
value = [value]
self._redshifts = value
@property
def ztol(self):
if not hasattr(self, '_ztol'):
self._ztol = 0.
return self._ztol
@ztol.setter
def ztol(self, value):
self._ztol = value
@property
def data(self):
if not hasattr(self, '_data'):
raise AttributeError('Must set data by hand!')
return self._data
@data.setter
def data(self, value):
"""
Set the data (duh).
The structure is as follows. The highest level division is between
different quantities (e.g., 'lf' vs. 'smf'). Each of these quantities
is an element of the returned dictionary. For each, there is a list
of dictionaries, one per redshift. Each redshift dictionary contains
the magnitudes (or masses) along with number density measurements
and error-bars.
"""
if isinstance(value, basestring):
value = [value]
if type(value) in [list, tuple]:
self._data = {quantity:[] for quantity in self.include}
self._units = {quantity:[] for quantity in self.include}
z_by_range = hasattr(self, '_redshift_bounds')
z_by_hand = hasattr(self, '_redshifts')
if not z_by_hand:
self._redshifts = {quantity:[] for quantity in self.include}
# Loop over data sources
for src in value:
# Grab the data
litdata = read_lit(src)
# Loop over LF, SMF, etc.
for quantity in self.include:
if quantity not in litdata.data.keys():
continue
# Short hand
data = litdata.data[quantity]
redshifts = litdata.redshifts
# This is always just a number or str, i.e.,
# no need to breakdown by redshift so just do it now
self._units[quantity].append(litdata.units[quantity])
# Now, be careful about what redshifts to include.
if not (z_by_range or z_by_hand):
srcdata = data
srczarr = redshifts
print('not by hand', srczarr)
else:
srczarr = []
srcdata = {}
for z in redshifts:
if z_by_range:
zb = self.redshift_bounds
if (zb[0] <= z <= zb[1]):
srczarr.append(z)
srcdata[z] = data[z]
continue
# z by hand from here down.
# Find closest redshift to those requested,
# see if it meets our tolerance.
zreq = np.array(self._redshifts[quantity])
iz = np.argmin(np.abs(z - zreq))
# Does this redshift match any we've requested?
if abs(z - zreq[iz]) > self.ztol:
continue
srczarr.append(z)
srcdata[z] = data[z]
self._data[quantity].append(srcdata)
if not z_by_hand:
self._redshifts[quantity].extend(srczarr)
# Check to make sure we find requested measurements.
for quantity in self.include:
zlit = []
for element in self._data[quantity]:
zlit.extend(list(element.keys()))
zlit = np.array(zlit).ravel()
zreq = self._redshifts[quantity]
# Problems straight away if we don't have enough redshifts
if len(zlit) != len(zreq):
s = "Found {} suitable redshifts for {}.".format(len(zlit),
quantity)
s += " Requested {}.".format(len(zreq))
s += "z_requested={}, z_found={}.".format(zreq, zlit)
s += " Perhaps rounding issue? Toggle `ztol` attribute"
s += " to be more lenient in finding match with measurements."
raise ValueError(s)
# Need to loop over all sources. When we're done, should be
# able to account for all requested redshifts.
for j, z in enumerate(zreq):
if z != zlit[j]:
s = "# Will fit to {} at z={}".format(quantity, zlit[j])
s += " as it lies within ztol={} of requested z={}".format(
self.ztol, z)
if rank == 0:
print(s)
else:
raise NotImplemented('help!')
@property
def include(self):
if not hasattr(self, '_include'):
self._include = ['lf']
return self._include
@include.setter
def include(self, value):
self._include = value
@property
def xdata_flat(self):
if not hasattr(self, '_xdata_flat'):
self._mask = []
self._xdata_flat = []; self._ydata_flat = []
self._error_flat = []; self._redshifts_flat = []
self._metadata_flat = []
for quantity in self.include:
# Sorted by sources
for i, dataset in enumerate(self.data[quantity]):
for j, redshift in enumerate(self.data[quantity][i]):
M = self.data[quantity][i][redshift]['M']
# These could still be in log10 units
if quantity == 'beta':
phi = self.data[quantity][i][redshift]['beta']
else:
phi = self.data[quantity][i][redshift]['phi']
err = self.data[quantity][i][redshift]['err']
if hasattr(M, 'mask'):
self._mask.extend(M.mask)
self._xdata_flat.extend(M.data)
else:
self._mask.extend(np.zeros_like(M))
self._xdata_flat.extend(M)
if self.units[quantity][i] == 'log10':
_phi = 10**phi
else:
_phi = phi
if hasattr(M, 'mask'):
self._ydata_flat.extend(_phi.data)
else:
self._ydata_flat.extend(_phi)
# Cludge for asymmetric errors
for k, _err in enumerate(err):
if self.units[quantity][i] == 'log10':
_err_ = symmetrize_errors(phi[k], _err,
operation='min')
else:
_err_ = _err
if type(_err_) in [tuple, list]:
self._error_flat.append(np.mean(_err_))
else:
self._error_flat.append(_err_)
zlist = [redshift] * len(M)
self._redshifts_flat.extend(zlist)
self._metadata_flat.extend([quantity] * len(M))
self._mask = np.array(self._mask)
self._xdata_flat = np.ma.array(self._xdata_flat, mask=self._mask)
self._ydata_flat = np.ma.array(self._ydata_flat, mask=self._mask)
self._error_flat = np.ma.array(self._error_flat, mask=self._mask)
return self._xdata_flat
@property
def ydata_flat(self):
if not hasattr(self, '_ydata_flat'):
xdata_flat = self.xdata_flat
return self._ydata_flat
@property
def error_flat(self):
if not hasattr(self, '_error_flat'):
xdata_flat = self.xdata_flat
return self._error_flat
@property
def redshifts_flat(self):
if not hasattr(self, '_redshifts_flat'):
xdata_flat = self.xdata_flat
return self._redshifts_flat
@property
def metadata_flat(self):
if not hasattr(self, '_metadata_flat'):
xdata_flat = self.xdata_flat
return self._metadata_flat
@property
def units(self):
if not hasattr(self, '_units'):
xdata_flat = self.xdata_flat
return self._units
@property
def xdata(self):
if not hasattr(self, '_xdata'):
if hasattr(self, '_data'):
self._xdata = []; self._ydata = []; self._error = []
#for i, dataset in enumerate(self.redshifts):
for h, quantity in enumerate(self.include):
for i, dataset in enumerate(self.data[quantity]):
for j, redshift in enumerate(self.data[i]):
self._xdata.append(dataset[redshift]['M'])
if quantity == 'beta':
self._ydata.append(dataset[redshift]['beta'])
else:
self._ydata.append(dataset[redshift]['phi'])
self._error.append(dataset[redshift]['err'])
return self._xdata
@xdata.setter
def xdata(self, value):
self._xdata = value
@property
def ydata(self):
if not hasattr(self, '_ydata'):
if hasattr(self, '_data'):
xdata = self.xdata
return self._ydata
@ydata.setter
def ydata(self, value):
self._ydata = value
@property
def error(self):
if not hasattr(self, '_error'):
if hasattr(self, '_data'):
xdata = self.xdata
return self._error
@error.setter
def error(self, value):
self._error = value
@property
def guess_override(self):
if not hasattr(self, '_guess_override_'):
self._guess_override_ = {}
return self._guess_override_
@guess_override.setter
def guess_override(self, kwargs):
if not hasattr(self, '_guess_override_'):
self._guess_override_ = {}
self._guess_override_.update(kwargs)
def save_data(self, prefix, clobber=False):
if rank > 0:
return
fn = '{!s}.data.pkl'.format(prefix)
if os.path.exists(fn) and (not clobber):
print("{!s} exists! Set clobber=True to overwrite.".format(fn))
return
write_pickle_file((self.xdata, self.ydata, self.redshifts,\
self.error), fn, ndumps=1, open_mode='w', safe_mode=False,\
verbose=False)
|
py | 7df84435559fedc2113734d3a7de0dea14516e61 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import DigiByteTestFramework
class CreateCache(DigiByteTestFramework):
# Test network and test nodes are not required:
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
|
py | 7df844eb666e860791bd2d9fcb655b54ba21f7ef | # IMPORTS
import pygetwindow
from helping_scripts import json_handler
from data_models.account import Account
from data_models.alien_bot import AlienBot
from program_logic.bot_runnable import BotRunnable
import helping_scripts.chrome_driver_handler as cd_handler
# ProgramInitializer CLASS
# Read all accounts from json config file
# Create separate chrome instances for each account found
# Create one instance of BotRunnable for each account found, containing both account & chrome data
# Return a list of all BotRunnables to main.py
class ProgramInitializer:
def __init__(self):
self.bot_runnables_list = [] # [ BotRunnableInstance, BotRunnableInstance ]
self.alien_bots_list = [] # [ AlienBotModel, AlienBotModel, AlienBotModel ]
self.accounts_list = []
self.previous_chrome_windows = [] # USE CASE: helps keep track when new Chrome Windows are created
self.new_chrome_port = 8001
def initialize(self):
self.read_accounts()
cd_handler.remove_all_existing_instances() # Delete previous chrome instances
self.previous_chrome_windows = pygetwindow.getWindowsWithTitle(
'Chrome') # fill start value for previous_chrome_ids
self.create_chrome_instances()
self.initialize_bot_runnables()
print("---\nProgram Initialization Successful...")
pass
def read_accounts(self):
print("---\nACCOUNTS_CONFIGS_LIST")
json_list = json_handler.read_json_file('account_configs/account_configs.json')
for item in json_list:
self.accounts_list.append(Account(item['username'], item['password']))
print(item)
def create_chrome_instances(self):
print("---\nBeginning creating chrome instances...\n")
for account in self.accounts_list:
driver = cd_handler.start_chrome(self.new_chrome_port)
chrome_window = self.get_new_chrome_window()
# print(type(driver))
self.alien_bots_list.append(AlienBot(driver, driver.current_window_handle, chrome_window, account))
self.new_chrome_port += 1 # Increment port to prepare creation of next Chrome Window
print("\nSuccessfully created chrome_instances & created AlienBot data models")
def get_new_chrome_window(self):
new_chrome_window = None
current_chrome_ids = pygetwindow.getWindowsWithTitle('Chrome')
for chrome_id in current_chrome_ids:
if chrome_id not in self.previous_chrome_windows:
new_chrome_window = chrome_id
# Before returning the Chrome Window, update previous_chrome_windows list with the newly created window
self.previous_chrome_windows.append(new_chrome_window)
print("New Chrome Window Found: " + str(new_chrome_window))
return new_chrome_window
def initialize_bot_runnables(self):
# Create new BotRunnable instances from AlienBots and append to bot_runnables_list
for alien_bot_model in self.alien_bots_list:
self.bot_runnables_list.append(BotRunnable(alien_bot_model))
# This function will be called by main.py
def get_bot_runnables(self):
return self.bot_runnables_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.